// Validate ensures that the specified values fall within the range of the strategy. func (s *mustRunAs) Validate(pod *api.Pod, container *api.Container) field.ErrorList { allErrs := field.ErrorList{} if container.SecurityContext == nil { detail := fmt.Sprintf("unable to validate nil security context for %s", container.Name) allErrs = append(allErrs, field.Invalid(field.NewPath("securityContext"), container.SecurityContext, detail)) return allErrs } if container.SecurityContext.SELinuxOptions == nil { detail := fmt.Sprintf("unable to validate nil seLinuxOptions for %s", container.Name) allErrs = append(allErrs, field.Invalid(field.NewPath("seLinuxOptions"), container.SecurityContext.SELinuxOptions, detail)) return allErrs } seLinuxOptionsPath := field.NewPath("seLinuxOptions") seLinux := container.SecurityContext.SELinuxOptions if seLinux.Level != s.opts.SELinuxOptions.Level { detail := fmt.Sprintf("seLinuxOptions.level on %s does not match required level. Found %s, wanted %s", container.Name, seLinux.Level, s.opts.SELinuxOptions.Level) allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("level"), seLinux.Level, detail)) } if seLinux.Role != s.opts.SELinuxOptions.Role { detail := fmt.Sprintf("seLinuxOptions.role on %s does not match required role. Found %s, wanted %s", container.Name, seLinux.Role, s.opts.SELinuxOptions.Role) allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("role"), seLinux.Role, detail)) } if seLinux.Type != s.opts.SELinuxOptions.Type { detail := fmt.Sprintf("seLinuxOptions.type on %s does not match required type. Found %s, wanted %s", container.Name, seLinux.Type, s.opts.SELinuxOptions.Type) allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("type"), seLinux.Type, detail)) } if seLinux.User != s.opts.SELinuxOptions.User { detail := fmt.Sprintf("seLinuxOptions.user on %s does not match required user. Found %s, wanted %s", container.Name, seLinux.User, s.opts.SELinuxOptions.User) allErrs = append(allErrs, field.Invalid(seLinuxOptionsPath.Child("user"), seLinux.User, detail)) } return allErrs }
// TestValidateAllowedVolumes will test that for every field of VolumeSource we can create // a pod with that type of volume and deny it, accept it explicitly, or accept it with // the FSTypeAll wildcard. func TestValidateAllowedVolumes(t *testing.T) { val := reflect.ValueOf(api.VolumeSource{}) for i := 0; i < val.NumField(); i++ { // reflectively create the volume source fieldVal := val.Type().Field(i) volumeSource := api.VolumeSource{} volumeSourceVolume := reflect.New(fieldVal.Type.Elem()) reflect.ValueOf(&volumeSource).Elem().FieldByName(fieldVal.Name).Set(volumeSourceVolume) volume := api.Volume{VolumeSource: volumeSource} // sanity check before moving on fsType, err := psputil.GetVolumeFSType(volume) if err != nil { t.Errorf("error getting FSType for %s: %s", fieldVal.Name, err.Error()) continue } // add the volume to the pod pod := defaultPod() pod.Spec.Volumes = []api.Volume{volume} // create a PSP that allows no volumes psp := defaultPSP() provider, err := NewSimpleProvider(psp, "namespace", NewSimpleStrategyFactory()) if err != nil { t.Errorf("error creating provider for %s: %s", fieldVal.Name, err.Error()) continue } // expect a denial for this PSP and test the error message to ensure it's related to the volumesource errs := provider.ValidateContainerSecurityContext(pod, &pod.Spec.Containers[0], field.NewPath("")) if len(errs) != 1 { t.Errorf("expected exactly 1 error for %s but got %v", fieldVal.Name, errs) } else { if !strings.Contains(errs.ToAggregate().Error(), fmt.Sprintf("%s volumes are not allowed to be used", fsType)) { t.Errorf("did not find the expected error, received: %v", errs) } } // now add the fstype directly to the psp and it should validate psp.Spec.Volumes = []extensions.FSType{fsType} errs = provider.ValidateContainerSecurityContext(pod, &pod.Spec.Containers[0], field.NewPath("")) if len(errs) != 0 { t.Errorf("directly allowing volume expected no errors for %s but got %v", fieldVal.Name, errs) } // now change the psp to allow any volumes and the pod should still validate psp.Spec.Volumes = []extensions.FSType{extensions.All} errs = provider.ValidateContainerSecurityContext(pod, &pod.Spec.Containers[0], field.NewPath("")) if len(errs) != 0 { t.Errorf("wildcard volume expected no errors for %s but got %v", fieldVal.Name, errs) } } }
// Validate ensures that the specified values fall within the range of the strategy. func (s *defaultCapabilities) Validate(pod *api.Pod, container *api.Container) field.ErrorList { allErrs := field.ErrorList{} // if the security context isn't set then we haven't generated correctly. Shouldn't get here // if using the provider correctly if container.SecurityContext == nil { allErrs = append(allErrs, field.Invalid(field.NewPath("securityContext"), container.SecurityContext, "no security context is set")) return allErrs } if container.SecurityContext.Capabilities == nil { // if container.SC.Caps is nil then nothing was defaulted by the strat or requested by the pod author // if there are no required caps on the strategy and nothing is requested on the pod // then we can safely return here without further validation. if len(s.defaultAddCapabilities) == 0 && len(s.requiredDropCapabilities) == 0 { return allErrs } // container has no requested caps but we have required caps. We should have something in // at least the drops on the container. allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities"), container.SecurityContext.Capabilities, "required capabilities are not set on the securityContext")) return allErrs } // validate that anything being added is in the default or allowed sets defaultAdd := makeCapSet(s.defaultAddCapabilities) allowedAdd := makeCapSet(s.allowedCaps) for _, cap := range container.SecurityContext.Capabilities.Add { sCap := string(cap) if !defaultAdd.Has(sCap) && !allowedAdd.Has(sCap) { allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities", "add"), sCap, "capability may not be added")) } } // validate that anything that is required to be dropped is in the drop set containerDrops := makeCapSet(container.SecurityContext.Capabilities.Drop) for _, requiredDrop := range s.requiredDropCapabilities { sDrop := string(requiredDrop) if !containerDrops.Has(sDrop) { allErrs = append(allErrs, field.Invalid(field.NewPath("capabilities", "drop"), container.SecurityContext.Capabilities.Drop, fmt.Sprintf("%s is required to be dropped but was not found", sDrop))) } } return allErrs }
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { names := sets.String{} for i, pod := range pods { var errlist field.ErrorList if errs := validation.ValidatePod(pod); len(errs) != 0 { errlist = append(errlist, errs...) // If validation fails, don't trust it any further - // even Name could be bad. } else { name := kubecontainer.GetPodFullName(pod) if names.Has(name) { // TODO: when validation becomes versioned, this gets a bit // more complicated. errlist = append(errlist, field.Duplicate(field.NewPath("metadata", "name"), pod.Name)) } else { names.Insert(name) } } if len(errlist) > 0 { name := bestPodIdentString(pod) err := errlist.ToAggregate() glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) recorder.Eventf(pod, api.EventTypeWarning, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err) continue } filtered = append(filtered, pod) } return }
func TestValidateSelfSAR(t *testing.T) { successCases := []authorizationapi.SelfSubjectAccessReviewSpec{ {ResourceAttributes: &authorizationapi.ResourceAttributes{}}, } for _, successCase := range successCases { if errs := ValidateSelfSubjectAccessReviewSpec(successCase, field.NewPath("spec")); len(errs) != 0 { t.Errorf("expected success: %v", errs) } } errorCases := []struct { name string obj authorizationapi.SelfSubjectAccessReviewSpec msg string }{ { name: "neither request", obj: authorizationapi.SelfSubjectAccessReviewSpec{}, msg: "exactly one of nonResourceAttributes or resourceAttributes must be specified", }, { name: "both requests", obj: authorizationapi.SelfSubjectAccessReviewSpec{ ResourceAttributes: &authorizationapi.ResourceAttributes{}, NonResourceAttributes: &authorizationapi.NonResourceAttributes{}, }, msg: "cannot be specified in combination with resourceAttributes", }, } for _, c := range errorCases { errs := ValidateSelfSubjectAccessReviewSpec(c.obj, field.NewPath("spec")) if len(errs) == 0 { t.Errorf("%s: expected failure for %q", c.name, c.msg) } else if !strings.Contains(errs[0].Error(), c.msg) { t.Errorf("%s: unexpected error: %q, expected: %q", c.name, errs[0], c.msg) } errs = ValidateSelfSubjectAccessReview(&authorizationapi.SelfSubjectAccessReview{Spec: c.obj}) if len(errs) == 0 { t.Errorf("%s: expected failure for %q", c.name, c.msg) } else if !strings.Contains(errs[0].Error(), c.msg) { t.Errorf("%s: unexpected error: %q, expected: %q", c.name, errs[0], c.msg) } } }
func ValidateScale(scale *autoscaling.Scale) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&scale.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...) if scale.Spec.Replicas < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "replicas"), scale.Spec.Replicas, "must be greater than or equal to 0")) } return allErrs }
// Validate ensures that the specified values fall within the range of the strategy. // Groups are passed in here to allow this strategy to support multiple group fields (fsgroup and // supplemental groups). func (s *mustRunAs) Validate(pod *api.Pod, groups []int64) field.ErrorList { allErrs := field.ErrorList{} if pod.Spec.SecurityContext == nil { allErrs = append(allErrs, field.Invalid(field.NewPath("securityContext"), pod.Spec.SecurityContext, "unable to validate nil security context")) return allErrs } if len(groups) == 0 && len(s.ranges) > 0 { allErrs = append(allErrs, field.Invalid(field.NewPath(s.field), groups, "unable to validate empty groups against required ranges")) } for _, group := range groups { if !s.isGroupValid(group) { detail := fmt.Sprintf("%d is not an allowed group", group) allErrs = append(allErrs, field.Invalid(field.NewPath(s.field), groups, detail)) } } return allErrs }
// ParseWatchResourceVersion takes a resource version argument and converts it to // the etcd version we should pass to helper.Watch(). Because resourceVersion is // an opaque value, the default watch behavior for non-zero watch is to watch // the next value (if you pass "1", you will see updates from "2" onwards). func ParseWatchResourceVersion(resourceVersion string) (uint64, error) { if resourceVersion == "" || resourceVersion == "0" { return 0, nil } version, err := strconv.ParseUint(resourceVersion, 10, 64) if err != nil { return 0, NewInvalidError(field.ErrorList{ // Validation errors are supposed to return version-specific field // paths, but this is probably close enough. field.Invalid(field.NewPath("resourceVersion"), resourceVersion, err.Error()), }) } return version, nil }
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool { // Check if the node satisfies the daemon set's node selector. nodeSelector := labels.Set(ds.Spec.Template.Spec.NodeSelector).AsSelector() if !nodeSelector.Matches(labels.Set(node.Labels)) { return false } // If the daemon set specifies a node name, check that it matches with node.Name. if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) { return false } for _, c := range node.Status.Conditions { if c.Type == api.NodeOutOfDisk && c.Status == api.ConditionTrue { return false } } newPod := &api.Pod{Spec: ds.Spec.Template.Spec} newPod.Spec.NodeName = node.Name pods := []*api.Pod{newPod} for _, m := range dsc.podStore.Indexer.List() { pod := m.(*api.Pod) if pod.Spec.NodeName != node.Name { continue } if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { continue } // ignore pods that belong to the daemonset when taking into account wheter // a daemonset should bind to a node. if pds := dsc.getPodDaemonSet(pod); pds != nil && ds.Name == pds.Name { continue } pods = append(pods, pod) } _, notFittingCPU, notFittingMemory, notFittingNvidiaGPU := predicates.CheckPodsExceedingFreeResources(pods, node.Status.Allocatable) if len(notFittingCPU)+len(notFittingMemory)+len(notFittingNvidiaGPU) != 0 { dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: insufficent free resources", node.ObjectMeta.Name) return false } ports := sets.String{} for _, pod := range pods { if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 { dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name) return false } } return true }
// Validate ensures that the specified values fall within the range of the strategy. Validation // of this will pass if either the UID is not set, assuming that the image will provided the UID // or if the UID is set it is not root. In order to work properly this assumes that the kubelet // performs a final check on runAsUser or the image UID when runAsUser is nil. func (s *nonRoot) Validate(pod *api.Pod, container *api.Container) field.ErrorList { allErrs := field.ErrorList{} securityContextPath := field.NewPath("securityContext") if container.SecurityContext == nil { detail := fmt.Sprintf("unable to validate nil security context for container %s", container.Name) allErrs = append(allErrs, field.Invalid(securityContextPath, container.SecurityContext, detail)) return allErrs } if container.SecurityContext.RunAsUser != nil && *container.SecurityContext.RunAsUser == 0 { detail := fmt.Sprintf("running with the root UID is forbidden by the pod security policy %s", container.Name) allErrs = append(allErrs, field.Invalid(securityContextPath.Child("runAsUser"), *container.SecurityContext.RunAsUser, detail)) return allErrs } return allErrs }
func TestValidatePodDisruptionBudgetSpec(t *testing.T) { successCases := []intstr.IntOrString{ intstr.FromString("0%"), intstr.FromString("1%"), intstr.FromString("100%"), intstr.FromInt(0), intstr.FromInt(1), intstr.FromInt(100), } for _, c := range successCases { spec := policy.PodDisruptionBudgetSpec{ MinAvailable: c, } errs := ValidatePodDisruptionBudgetSpec(spec, field.NewPath("foo")) if len(errs) != 0 { t.Errorf("unexpected failure %v for %v", errs, spec) } } failureCases := []intstr.IntOrString{ intstr.FromString("1.1%"), intstr.FromString("nope"), intstr.FromString("-1%"), intstr.FromString("101%"), intstr.FromInt(-1), } for _, c := range failureCases { spec := policy.PodDisruptionBudgetSpec{ MinAvailable: c, } errs := ValidatePodDisruptionBudgetSpec(spec, field.NewPath("foo")) if len(errs) == 0 { t.Errorf("unexpected success for %v", spec) } } }
func (s strategy) Export(obj runtime.Object, exact bool) error { t, ok := obj.(*api.Secret) if !ok { // unexpected programmer error return fmt.Errorf("unexpected object: %v", obj) } s.PrepareForCreate(obj) if exact { return nil } // secrets that are tied to the UID of a service account cannot be exported anyway if t.Type == api.SecretTypeServiceAccountToken || len(t.Annotations[api.ServiceAccountUIDKey]) > 0 { errs := []*field.Error{ field.Invalid(field.NewPath("type"), t, "can not export service account secrets"), } return errors.NewInvalid(api.Kind("Secret"), t.Name, errs) } return nil }
// ValidatePetSetUpdate tests if required fields in the PetSet are set. func ValidatePetSetUpdate(petSet, oldPetSet *apps.PetSet) field.ErrorList { allErrs := field.ErrorList{} // TODO: For now we're taking the safe route and disallowing all updates to spec except for Spec.Replicas. // Enable on a case by case basis. restoreReplicas := petSet.Spec.Replicas petSet.Spec.Replicas = oldPetSet.Spec.Replicas // The generation changes for this update restoreGeneration := petSet.Generation petSet.Generation = oldPetSet.Generation if !reflect.DeepEqual(petSet, oldPetSet) { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to petset spec for fields other than 'replicas' are forbidden.")) } petSet.Spec.Replicas = restoreReplicas petSet.Generation = restoreGeneration allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(petSet.Spec.Replicas), field.NewPath("spec", "replicas"))...) return allErrs }
// PodConstraintsFunc verifies that all required resources are present on the pod // In addition, it validates that the resources are valid (i.e. requests < limits) func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) error { pod, ok := object.(*api.Pod) if !ok { return fmt.Errorf("Unexpected input object %v", object) } // Pod level resources are often set during admission control // As a consequence, we want to verify that resources are valid prior // to ever charging quota prematurely in case they are not. allErrs := field.ErrorList{} fldPath := field.NewPath("spec").Child("containers") for i, ctr := range pod.Spec.Containers { idxPath := fldPath.Index(i) allErrs = append(allErrs, validation.ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...) } if len(allErrs) > 0 { return allErrs.ToAggregate() } // TODO: fix this when we have pod level cgroups // since we do not yet pod level requests/limits, we need to ensure each // container makes an explict request or limit for a quota tracked resource requiredSet := quota.ToSet(required) missingSet := sets.NewString() for i := range pod.Spec.Containers { requests := pod.Spec.Containers[i].Resources.Requests limits := pod.Spec.Containers[i].Resources.Limits containerUsage := podUsageHelper(requests, limits) containerSet := quota.ToSet(quota.ResourceNames(containerUsage)) if !containerSet.Equal(requiredSet) { difference := requiredSet.Difference(containerSet) missingSet.Insert(difference.List()...) } } if len(missingSet) == 0 { return nil } return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ",")) }
func ValidateHorizontalPodAutoscaler(autoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList { allErrs := apivalidation.ValidateObjectMeta(&autoscaler.ObjectMeta, true, ValidateHorizontalPodAutoscalerName, field.NewPath("metadata")) allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(autoscaler.Spec, field.NewPath("spec"))...) allErrs = append(allErrs, validateHorizontalPodAutoscalerAnnotations(autoscaler.Annotations, field.NewPath("metadata"))...) return allErrs }
func TestValidateContainerSecurityContextSuccess(t *testing.T) { var notPriv bool = false defaultPod := func() *api.Pod { return &api.Pod{ Spec: api.PodSpec{ SecurityContext: &api.PodSecurityContext{}, Containers: []api.Container{ { SecurityContext: &api.SecurityContext{ // expected to be set by defaulting mechanisms Privileged: ¬Priv, // fill in the rest for test cases }, }, }, }, } } // fail user strat userPSP := defaultPSP() var uid int64 = 999 userPSP.Spec.RunAsUser = extensions.RunAsUserStrategyOptions{ Rule: extensions.RunAsUserStrategyMustRunAs, Ranges: []extensions.IDRange{{Min: uid, Max: uid}}, } userPod := defaultPod() userPod.Spec.Containers[0].SecurityContext.RunAsUser = &uid // fail selinux strat seLinuxPSP := defaultPSP() seLinuxPSP.Spec.SELinux = extensions.SELinuxStrategyOptions{ Rule: extensions.SELinuxStrategyMustRunAs, SELinuxOptions: &api.SELinuxOptions{ Level: "foo", }, } seLinuxPod := defaultPod() seLinuxPod.Spec.Containers[0].SecurityContext.SELinuxOptions = &api.SELinuxOptions{ Level: "foo", } privPSP := defaultPSP() privPSP.Spec.Privileged = true privPod := defaultPod() var priv bool = true privPod.Spec.Containers[0].SecurityContext.Privileged = &priv capsPSP := defaultPSP() capsPSP.Spec.AllowedCapabilities = []api.Capability{"foo"} capsPod := defaultPod() capsPod.Spec.Containers[0].SecurityContext.Capabilities = &api.Capabilities{ Add: []api.Capability{"foo"}, } // pod should be able to request caps that are in the required set even if not specified in the allowed set requiredCapsPSP := defaultPSP() requiredCapsPSP.Spec.DefaultAddCapabilities = []api.Capability{"foo"} requiredCapsPod := defaultPod() requiredCapsPod.Spec.Containers[0].SecurityContext.Capabilities = &api.Capabilities{ Add: []api.Capability{"foo"}, } hostDirPSP := defaultPSP() hostDirPSP.Spec.Volumes = []extensions.FSType{extensions.HostPath} hostDirPod := defaultPod() hostDirPod.Spec.Volumes = []api.Volume{ { Name: "bad volume", VolumeSource: api.VolumeSource{ HostPath: &api.HostPathVolumeSource{}, }, }, } hostPortPSP := defaultPSP() hostPortPSP.Spec.HostPorts = []extensions.HostPortRange{{Min: 1, Max: 1}} hostPortPod := defaultPod() hostPortPod.Spec.Containers[0].Ports = []api.ContainerPort{{HostPort: 1}} readOnlyRootFSPodFalse := defaultPod() readOnlyRootFSFalse := false readOnlyRootFSPodFalse.Spec.Containers[0].SecurityContext.ReadOnlyRootFilesystem = &readOnlyRootFSFalse readOnlyRootFSPodTrue := defaultPod() readOnlyRootFSTrue := true readOnlyRootFSPodTrue.Spec.Containers[0].SecurityContext.ReadOnlyRootFilesystem = &readOnlyRootFSTrue errorCases := map[string]struct { pod *api.Pod psp *extensions.PodSecurityPolicy }{ "pass user must run as PSP": { pod: userPod, psp: userPSP, }, "pass seLinux must run as PSP": { pod: seLinuxPod, psp: seLinuxPSP, }, "pass priv validating PSP": { pod: privPod, psp: privPSP, }, "pass allowed caps validating PSP": { pod: capsPod, psp: capsPSP, }, "pass required caps validating PSP": { pod: requiredCapsPod, psp: requiredCapsPSP, }, "pass hostDir validating PSP": { pod: hostDirPod, psp: hostDirPSP, }, "pass hostPort validating PSP": { pod: hostPortPod, psp: hostPortPSP, }, "pass read only root fs - nil": { pod: defaultPod(), psp: defaultPSP(), }, "pass read only root fs - false": { pod: readOnlyRootFSPodFalse, psp: defaultPSP(), }, "pass read only root fs - true": { pod: readOnlyRootFSPodTrue, psp: defaultPSP(), }, } for k, v := range errorCases { provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory()) if err != nil { t.Fatalf("unable to create provider %v", err) } errs := provider.ValidateContainerSecurityContext(v.pod, &v.pod.Spec.Containers[0], field.NewPath("")) if len(errs) != 0 { t.Errorf("%s expected validation pass but received errors %v", k, errs) continue } } }
func TestValidatePodSecurityContextSuccess(t *testing.T) { hostNetworkPSP := defaultPSP() hostNetworkPSP.Spec.HostNetwork = true hostNetworkPod := defaultPod() hostNetworkPod.Spec.SecurityContext.HostNetwork = true hostPIDPSP := defaultPSP() hostPIDPSP.Spec.HostPID = true hostPIDPod := defaultPod() hostPIDPod.Spec.SecurityContext.HostPID = true hostIPCPSP := defaultPSP() hostIPCPSP.Spec.HostIPC = true hostIPCPod := defaultPod() hostIPCPod.Spec.SecurityContext.HostIPC = true supGroupPSP := defaultPSP() supGroupPSP.Spec.SupplementalGroups = extensions.SupplementalGroupsStrategyOptions{ Rule: extensions.SupplementalGroupsStrategyMustRunAs, Ranges: []extensions.IDRange{ {Min: 1, Max: 5}, }, } supGroupPod := defaultPod() supGroupPod.Spec.SecurityContext.SupplementalGroups = []int64{3} fsGroupPSP := defaultPSP() fsGroupPSP.Spec.FSGroup = extensions.FSGroupStrategyOptions{ Rule: extensions.FSGroupStrategyMustRunAs, Ranges: []extensions.IDRange{ {Min: 1, Max: 5}, }, } fsGroupPod := defaultPod() fsGroup := int64(3) fsGroupPod.Spec.SecurityContext.FSGroup = &fsGroup seLinuxPod := defaultPod() seLinuxPod.Spec.SecurityContext.SELinuxOptions = &api.SELinuxOptions{ User: "******", Role: "role", Type: "type", Level: "level", } seLinuxPSP := defaultPSP() seLinuxPSP.Spec.SELinux.Rule = extensions.SELinuxStrategyMustRunAs seLinuxPSP.Spec.SELinux.SELinuxOptions = &api.SELinuxOptions{ User: "******", Role: "role", Type: "type", Level: "level", } errorCases := map[string]struct { pod *api.Pod psp *extensions.PodSecurityPolicy }{ "pass hostNetwork validating PSP": { pod: hostNetworkPod, psp: hostNetworkPSP, }, "pass hostPID validating PSP": { pod: hostPIDPod, psp: hostPIDPSP, }, "pass hostIPC validating PSP": { pod: hostIPCPod, psp: hostIPCPSP, }, "pass supplemental group validating PSP": { pod: supGroupPod, psp: supGroupPSP, }, "pass fs group validating PSP": { pod: fsGroupPod, psp: fsGroupPSP, }, "pass selinux validating PSP": { pod: seLinuxPod, psp: seLinuxPSP, }, } for k, v := range errorCases { provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory()) if err != nil { t.Fatalf("unable to create provider %v", err) } errs := provider.ValidatePodSecurityContext(v.pod, field.NewPath("")) if len(errs) != 0 { t.Errorf("%s expected validation pass but received errors %v", k, errs) continue } } }
func TestValidateContainerSecurityContextFailures(t *testing.T) { // fail user strat failUserPSP := defaultPSP() var uid int64 = 999 var badUID int64 = 1 failUserPSP.Spec.RunAsUser = extensions.RunAsUserStrategyOptions{ Rule: extensions.RunAsUserStrategyMustRunAs, Ranges: []extensions.IDRange{{Min: uid, Max: uid}}, } failUserPod := defaultPod() failUserPod.Spec.Containers[0].SecurityContext.RunAsUser = &badUID // fail selinux strat failSELinuxPSP := defaultPSP() failSELinuxPSP.Spec.SELinux = extensions.SELinuxStrategyOptions{ Rule: extensions.SELinuxStrategyMustRunAs, SELinuxOptions: &api.SELinuxOptions{ Level: "foo", }, } failSELinuxPod := defaultPod() failSELinuxPod.Spec.Containers[0].SecurityContext.SELinuxOptions = &api.SELinuxOptions{ Level: "bar", } failPrivPod := defaultPod() var priv bool = true failPrivPod.Spec.Containers[0].SecurityContext.Privileged = &priv failCapsPod := defaultPod() failCapsPod.Spec.Containers[0].SecurityContext.Capabilities = &api.Capabilities{ Add: []api.Capability{"foo"}, } failHostDirPod := defaultPod() failHostDirPod.Spec.Volumes = []api.Volume{ { Name: "bad volume", VolumeSource: api.VolumeSource{ HostPath: &api.HostPathVolumeSource{}, }, }, } failHostPortPod := defaultPod() failHostPortPod.Spec.Containers[0].Ports = []api.ContainerPort{{HostPort: 1}} readOnlyRootFSPSP := defaultPSP() readOnlyRootFSPSP.Spec.ReadOnlyRootFilesystem = true readOnlyRootFSPodFalse := defaultPod() readOnlyRootFS := false readOnlyRootFSPodFalse.Spec.Containers[0].SecurityContext.ReadOnlyRootFilesystem = &readOnlyRootFS errorCases := map[string]struct { pod *api.Pod psp *extensions.PodSecurityPolicy expectedError string }{ "failUserPSP": { pod: failUserPod, psp: failUserPSP, expectedError: "does not match required range", }, "failSELinuxPSP": { pod: failSELinuxPod, psp: failSELinuxPSP, expectedError: "does not match required level", }, "failPrivPSP": { pod: failPrivPod, psp: defaultPSP(), expectedError: "Privileged containers are not allowed", }, "failCapsPSP": { pod: failCapsPod, psp: defaultPSP(), expectedError: "capability may not be added", }, "failHostDirPSP": { pod: failHostDirPod, psp: defaultPSP(), expectedError: "hostPath volumes are not allowed to be used", }, "failHostPortPSP": { pod: failHostPortPod, psp: defaultPSP(), expectedError: "Host port 1 is not allowed to be used. Allowed ports: []", }, "failReadOnlyRootFS - nil": { pod: defaultPod(), psp: readOnlyRootFSPSP, expectedError: "ReadOnlyRootFilesystem may not be nil and must be set to true", }, "failReadOnlyRootFS - false": { pod: readOnlyRootFSPodFalse, psp: readOnlyRootFSPSP, expectedError: "ReadOnlyRootFilesystem must be set to true", }, } for k, v := range errorCases { provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory()) if err != nil { t.Fatalf("unable to create provider %v", err) } errs := provider.ValidateContainerSecurityContext(v.pod, &v.pod.Spec.Containers[0], field.NewPath("")) if len(errs) == 0 { t.Errorf("%s expected validation failure but did not receive errors", k) continue } if !strings.Contains(errs[0].Error(), v.expectedError) { t.Errorf("%s received unexpected error %v", k, errs) } } }
func TestValidatePodSecurityContextFailures(t *testing.T) { failHostNetworkPod := defaultPod() failHostNetworkPod.Spec.SecurityContext.HostNetwork = true failHostPIDPod := defaultPod() failHostPIDPod.Spec.SecurityContext.HostPID = true failHostIPCPod := defaultPod() failHostIPCPod.Spec.SecurityContext.HostIPC = true failSupplementalGroupPod := defaultPod() failSupplementalGroupPod.Spec.SecurityContext.SupplementalGroups = []int64{999} failSupplementalGroupPSP := defaultPSP() failSupplementalGroupPSP.Spec.SupplementalGroups = extensions.SupplementalGroupsStrategyOptions{ Rule: extensions.SupplementalGroupsStrategyMustRunAs, Ranges: []extensions.IDRange{ {Min: 1, Max: 1}, }, } failFSGroupPod := defaultPod() fsGroup := int64(999) failFSGroupPod.Spec.SecurityContext.FSGroup = &fsGroup failFSGroupPSP := defaultPSP() failFSGroupPSP.Spec.FSGroup = extensions.FSGroupStrategyOptions{ Rule: extensions.FSGroupStrategyMustRunAs, Ranges: []extensions.IDRange{ {Min: 1, Max: 1}, }, } failNilSELinuxPod := defaultPod() failSELinuxPSP := defaultPSP() failSELinuxPSP.Spec.SELinux.Rule = extensions.SELinuxStrategyMustRunAs failSELinuxPSP.Spec.SELinux.SELinuxOptions = &api.SELinuxOptions{ Level: "foo", } failInvalidSELinuxPod := defaultPod() failInvalidSELinuxPod.Spec.SecurityContext.SELinuxOptions = &api.SELinuxOptions{ Level: "bar", } errorCases := map[string]struct { pod *api.Pod psp *extensions.PodSecurityPolicy expectedError string }{ "failHostNetwork": { pod: failHostNetworkPod, psp: defaultPSP(), expectedError: "Host network is not allowed to be used", }, "failHostPID": { pod: failHostPIDPod, psp: defaultPSP(), expectedError: "Host PID is not allowed to be used", }, "failHostIPC": { pod: failHostIPCPod, psp: defaultPSP(), expectedError: "Host IPC is not allowed to be used", }, "failSupplementalGroupOutOfRange": { pod: failSupplementalGroupPod, psp: failSupplementalGroupPSP, expectedError: "999 is not an allowed group", }, "failSupplementalGroupEmpty": { pod: defaultPod(), psp: failSupplementalGroupPSP, expectedError: "unable to validate empty groups against required ranges", }, "failFSGroupOutOfRange": { pod: failFSGroupPod, psp: failFSGroupPSP, expectedError: "999 is not an allowed group", }, "failFSGroupEmpty": { pod: defaultPod(), psp: failFSGroupPSP, expectedError: "unable to validate empty groups against required ranges", }, "failNilSELinux": { pod: failNilSELinuxPod, psp: failSELinuxPSP, expectedError: "unable to validate nil seLinuxOptions", }, "failInvalidSELinux": { pod: failInvalidSELinuxPod, psp: failSELinuxPSP, expectedError: "does not match required level. Found bar, wanted foo", }, } for k, v := range errorCases { provider, err := NewSimpleProvider(v.psp, "namespace", NewSimpleStrategyFactory()) if err != nil { t.Fatalf("unable to create provider %v", err) } errs := provider.ValidatePodSecurityContext(v.pod, field.NewPath("")) if len(errs) == 0 { t.Errorf("%s expected validation failure but did not receive errors", k) continue } if !strings.Contains(errs[0].Error(), v.expectedError) { t.Errorf("%s received unexpected error %v", k, errs) } } }
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) { service := obj.(*api.Service) if err := rest.BeforeCreate(Strategy, ctx, obj); err != nil { return nil, err } // TODO: this should probably move to strategy.PrepareForCreate() releaseServiceIP := false defer func() { if releaseServiceIP { if api.IsServiceIPSet(service) { rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP)) } } }() nodePortOp := portallocator.StartOperation(rs.serviceNodePorts) defer nodePortOp.Finish() if api.IsServiceIPRequested(service) { // Allocate next available. ip, err := rs.serviceIPs.AllocateNext() if err != nil { // TODO: what error should be returned here? It's not a // field-level validation failure (the field is valid), and it's // not really an internal error. return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a serviceIP: %v", err)) } service.Spec.ClusterIP = ip.String() releaseServiceIP = true } else if api.IsServiceIPSet(service) { // Try to respect the requested IP. if err := rs.serviceIPs.Allocate(net.ParseIP(service.Spec.ClusterIP)); err != nil { // TODO: when validation becomes versioned, this gets more complicated. el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIP"), service.Spec.ClusterIP, err.Error())} return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el) } releaseServiceIP = true } assignNodePorts := shouldAssignNodePorts(service) for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] if servicePort.NodePort != 0 { err := nodePortOp.Allocate(int(servicePort.NodePort)) if err != nil { // TODO: when validation becomes versioned, this gets more complicated. el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), servicePort.NodePort, err.Error())} return nil, errors.NewInvalid(api.Kind("Service"), service.Name, el) } } else if assignNodePorts { nodePort, err := nodePortOp.AllocateNext() if err != nil { // TODO: what error should be returned here? It's not a // field-level validation failure (the field is valid), and it's // not really an internal error. return nil, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) } servicePort.NodePort = int32(nodePort) } } out, err := rs.registry.CreateService(ctx, service) if err != nil { err = rest.CheckGeneratedNameError(Strategy, err, service) } if err == nil { el := nodePortOp.Commit() if el != nil { // these should be caught by an eventual reconciliation / restart glog.Errorf("error(s) committing service node-ports changes: %v", el) } releaseServiceIP = false } return out, err }
func (rs *REST) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) { oldService, err := rs.registry.GetService(ctx, name) if err != nil { return nil, false, err } obj, err := objInfo.UpdatedObject(ctx, oldService) if err != nil { return nil, false, err } service := obj.(*api.Service) if !api.ValidNamespace(ctx, &service.ObjectMeta) { return nil, false, errors.NewConflict(api.Resource("services"), service.Namespace, fmt.Errorf("Service.Namespace does not match the provided context")) } // Copy over non-user fields // TODO: make this a merge function if errs := validation.ValidateServiceUpdate(service, oldService); len(errs) > 0 { return nil, false, errors.NewInvalid(api.Kind("Service"), service.Name, errs) } nodePortOp := portallocator.StartOperation(rs.serviceNodePorts) defer nodePortOp.Finish() assignNodePorts := shouldAssignNodePorts(service) oldNodePorts := CollectServiceNodePorts(oldService) newNodePorts := []int{} if assignNodePorts { for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] nodePort := int(servicePort.NodePort) if nodePort != 0 { if !contains(oldNodePorts, nodePort) { err := nodePortOp.Allocate(nodePort) if err != nil { el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), nodePort, err.Error())} return nil, false, errors.NewInvalid(api.Kind("Service"), service.Name, el) } } } else { nodePort, err = nodePortOp.AllocateNext() if err != nil { // TODO: what error should be returned here? It's not a // field-level validation failure (the field is valid), and it's // not really an internal error. return nil, false, errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err)) } servicePort.NodePort = int32(nodePort) } // Detect duplicate node ports; this should have been caught by validation, so we panic if contains(newNodePorts, nodePort) { panic("duplicate node port") } newNodePorts = append(newNodePorts, nodePort) } } else { // Validate should have validated that nodePort == 0 } // The comparison loops are O(N^2), but we don't expect N to be huge // (there's a hard-limit at 2^16, because they're ports; and even 4 ports would be a lot) for _, oldNodePort := range oldNodePorts { if !contains(newNodePorts, oldNodePort) { continue } nodePortOp.ReleaseDeferred(oldNodePort) } // Remove any LoadBalancerStatus now if Type != LoadBalancer; // although loadbalancer delete is actually asynchronous, we don't need to expose the user to that complexity. if service.Spec.Type != api.ServiceTypeLoadBalancer { service.Status.LoadBalancer = api.LoadBalancerStatus{} } out, err := rs.registry.UpdateService(ctx, service) if err == nil { el := nodePortOp.Commit() if el != nil { // problems should be fixed by an eventual reconciliation / restart glog.Errorf("error(s) committing NodePorts changes: %v", el) } } return out, false, err }
// TODO: add other common fields that require global validation. func validateCommonFields(obj, old runtime.Object) (field.ErrorList, error) { allErrs := field.ErrorList{} objectMeta, err := api.ObjectMetaFor(obj) if err != nil { return nil, fmt.Errorf("failed to get new object metadata: %v", err) } oldObjectMeta, err := api.ObjectMetaFor(old) if err != nil { return nil, fmt.Errorf("failed to get old object metadata: %v", err) } allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(objectMeta, oldObjectMeta, field.NewPath("metadata"))...) return allErrs, nil }
func TestCompatibility_v1_PodSecurityContext(t *testing.T) { cases := []struct { name string input string expectedKeys map[string]string absentKeys []string }{ { name: "hostNetwork = true", input: ` { "kind":"Pod", "apiVersion":"v1", "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, "spec": { "hostNetwork": true, "containers":[{ "name":"a", "image":"my-container-image" }] } } `, expectedKeys: map[string]string{ "spec.hostNetwork": "true", }, }, { name: "hostNetwork = false", input: ` { "kind":"Pod", "apiVersion":"v1", "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, "spec": { "hostNetwork": false, "containers":[{ "name":"a", "image":"my-container-image" }] } } `, absentKeys: []string{ "spec.hostNetwork", }, }, { name: "hostIPC = true", input: ` { "kind":"Pod", "apiVersion":"v1", "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, "spec": { "hostIPC": true, "containers":[{ "name":"a", "image":"my-container-image" }] } } `, expectedKeys: map[string]string{ "spec.hostIPC": "true", }, }, { name: "hostIPC = false", input: ` { "kind":"Pod", "apiVersion":"v1", "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, "spec": { "hostIPC": false, "containers":[{ "name":"a", "image":"my-container-image" }] } } `, absentKeys: []string{ "spec.hostIPC", }, }, { name: "hostPID = true", input: ` { "kind":"Pod", "apiVersion":"v1", "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, "spec": { "hostPID": true, "containers":[{ "name":"a", "image":"my-container-image" }] } } `, expectedKeys: map[string]string{ "spec.hostPID": "true", }, }, { name: "hostPID = false", input: ` { "kind":"Pod", "apiVersion":"v1", "metadata":{"name":"my-pod-name", "namespace":"my-pod-namespace"}, "spec": { "hostPID": false, "containers":[{ "name":"a", "image":"my-container-image" }] } } `, absentKeys: []string{ "spec.hostPID", }, }, { name: "reseting defaults for pre-v1.1 mirror pods", input: ` { "kind":"Pod", "apiVersion":"v1", "metadata":{ "name":"my-pod-name", "namespace":"my-pod-namespace", "annotations": { "kubernetes.io/config.mirror": "mirror" } }, "spec": { "containers":[{ "name":"a", "image":"my-container-image", "resources": { "limits": { "cpu": "100m" } } }] } } `, absentKeys: []string{ "spec.terminationGracePeriodSeconds", "spec.containers[0].resources.requests", }, }, { name: "preserving defaults for v1.1+ mirror pods", input: ` { "kind":"Pod", "apiVersion":"v1", "metadata":{ "name":"my-pod-name", "namespace":"my-pod-namespace", "annotations": { "kubernetes.io/config.mirror": "cbe924f710c7e26f7693d6a341bcfad0" } }, "spec": { "containers":[{ "name":"a", "image":"my-container-image", "resources": { "limits": { "cpu": "100m" } } }] } } `, expectedKeys: map[string]string{ "spec.terminationGracePeriodSeconds": "30", "spec.containers[0].resources.requests": "map[cpu:100m]", }, }, } validator := func(obj runtime.Object) field.ErrorList { return validation.ValidatePodSpec(&(obj.(*api.Pod).Spec), field.NewPath("spec")) } for _, tc := range cases { t.Logf("Testing 1.0.0 backward compatibility for %v", tc.name) compat.TestCompatibility(t, v1.SchemeGroupVersion, []byte(tc.input), validator, tc.expectedKeys, tc.absentKeys) } }
func ValidateLocalSubjectAccessReview(sar *authorizationapi.LocalSubjectAccessReview) field.ErrorList { allErrs := ValidateSubjectAccessReviewSpec(sar.Spec, field.NewPath("spec")) return allErrs }
// ValidatePetSet validates a PetSet. func ValidatePetSet(petSet *apps.PetSet) field.ErrorList { allErrs := apivalidation.ValidateObjectMeta(&petSet.ObjectMeta, true, ValidatePetSetName, field.NewPath("metadata")) allErrs = append(allErrs, ValidatePetSetSpec(&petSet.Spec, field.NewPath("spec"))...) return allErrs }
func TestNewInvalid(t *testing.T) { testCases := []struct { Err *field.Error Details *unversioned.StatusDetails }{ { field.Duplicate(field.NewPath("field[0].name"), "bar"), &unversioned.StatusDetails{ Kind: "Kind", Name: "name", Causes: []unversioned.StatusCause{{ Type: unversioned.CauseTypeFieldValueDuplicate, Field: "field[0].name", }}, }, }, { field.Invalid(field.NewPath("field[0].name"), "bar", "detail"), &unversioned.StatusDetails{ Kind: "Kind", Name: "name", Causes: []unversioned.StatusCause{{ Type: unversioned.CauseTypeFieldValueInvalid, Field: "field[0].name", }}, }, }, { field.NotFound(field.NewPath("field[0].name"), "bar"), &unversioned.StatusDetails{ Kind: "Kind", Name: "name", Causes: []unversioned.StatusCause{{ Type: unversioned.CauseTypeFieldValueNotFound, Field: "field[0].name", }}, }, }, { field.NotSupported(field.NewPath("field[0].name"), "bar", nil), &unversioned.StatusDetails{ Kind: "Kind", Name: "name", Causes: []unversioned.StatusCause{{ Type: unversioned.CauseTypeFieldValueNotSupported, Field: "field[0].name", }}, }, }, { field.Required(field.NewPath("field[0].name"), ""), &unversioned.StatusDetails{ Kind: "Kind", Name: "name", Causes: []unversioned.StatusCause{{ Type: unversioned.CauseTypeFieldValueRequired, Field: "field[0].name", }}, }, }, } for i, testCase := range testCases { vErr, expected := testCase.Err, testCase.Details expected.Causes[0].Message = vErr.ErrorBody() err := NewInvalid(api.Kind("Kind"), "name", field.ErrorList{vErr}) status := err.ErrStatus if status.Code != 422 || status.Reason != unversioned.StatusReasonInvalid { t.Errorf("%d: unexpected status: %#v", i, status) } if !reflect.DeepEqual(expected, status.Details) { t.Errorf("%d: expected %#v, got %#v", i, expected, status.Details) } } }
// ValidatePetSetStatusUpdate tests if required fields in the PetSet are set. func ValidatePetSetStatusUpdate(petSet, oldPetSet *apps.PetSet) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&petSet.ObjectMeta, &oldPetSet.ObjectMeta, field.NewPath("metadata"))...) // TODO: Validate status. return allErrs }
func ValidateHorizontalPodAutoscalerUpdate(newAutoscaler, oldAutoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList { allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, validateHorizontalPodAutoscalerSpec(newAutoscaler.Spec, field.NewPath("spec"))...) return allErrs }
func ValidateHorizontalPodAutoscalerStatusUpdate(newAutoscaler, oldAutoscaler *autoscaling.HorizontalPodAutoscaler) field.ErrorList { allErrs := apivalidation.ValidateObjectMetaUpdate(&newAutoscaler.ObjectMeta, &oldAutoscaler.ObjectMeta, field.NewPath("metadata")) status := newAutoscaler.Status allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentReplicas), field.NewPath("status", "currentReplicas"))...) allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredReplicas), field.NewPath("status", "desiredReplicasa"))...) return allErrs }
func TestCheckInvalidErr(t *testing.T) { tests := []struct { err error expected string }{ { errors.NewInvalid(api.Kind("Invalid1"), "invalidation", field.ErrorList{field.Invalid(field.NewPath("field"), "single", "details")}), `Error from server: Invalid1 "invalidation" is invalid: field: Invalid value: "single": details`, }, { errors.NewInvalid(api.Kind("Invalid2"), "invalidation", field.ErrorList{field.Invalid(field.NewPath("field1"), "multi1", "details"), field.Invalid(field.NewPath("field2"), "multi2", "details")}), `Error from server: Invalid2 "invalidation" is invalid: [field1: Invalid value: "multi1": details, field2: Invalid value: "multi2": details]`, }, { errors.NewInvalid(api.Kind("Invalid3"), "invalidation", field.ErrorList{}), `Error from server: Invalid3 "invalidation" is invalid: <nil>`, }, } var errReturned string errHandle := func(err string) { errReturned = err } for _, test := range tests { checkErr("", test.err, errHandle) if errReturned != test.expected { t.Fatalf("Got: %s, expected: %s", errReturned, test.expected) } } }