Exemple #1
0
func (e *exists) Admit(a admission.Attributes) (err error) {
	defaultVersion, kind, err := latest.RESTMapper.VersionAndKindForResource(a.GetResource())
	if err != nil {
		return err
	}
	mapping, err := latest.RESTMapper.RESTMapping(kind, defaultVersion)
	if err != nil {
		return err
	}
	if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
		return nil
	}
	namespace := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
		Status: api.NamespaceStatus{},
	}
	_, exists, err := e.store.Get(namespace)
	if err != nil {
		return err
	}
	if exists {
		return nil
	}
	obj := a.GetObject()
	name := "Unknown"
	if obj != nil {
		name, _ = meta.NewAccessor().Name(obj)
	}
	return apierrors.NewForbidden(kind, name, fmt.Errorf("Namespace %s does not exist", a.GetNamespace()))
}
Exemple #2
0
// Admit determines if the pod should be admitted based on the requested security context
// and the available SCCs.
//
// 1.  Find SCCs for the user.
// 2.  Find SCCs for the SA.  If there is an error retrieving SA SCCs it is not fatal.
// 3.  Remove duplicates between the user/SA SCCs.
// 4.  Create the providers, includes setting pre-allocated values if necessary.
// 5.  Try to generate and validate an SCC with providers.  If we find one then admit the pod
//     with the validated SCC.  If we don't find any reject the pod and give all errors from the
//     failed attempts.
func (c *constraint) Admit(a kadmission.Attributes) error {
	if a.GetResource() != string(kapi.ResourcePods) {
		return nil
	}

	pod, ok := a.GetObject().(*kapi.Pod)
	// if we can't convert then we don't handle this object so just return
	if !ok {
		return nil
	}

	// get all constraints that are usable by the user
	glog.V(4).Infof("getting security context constraints for pod %s (generate: %s) in namespace %s with user info %v", pod.Name, pod.GenerateName, a.GetNamespace(), a.GetUserInfo())
	matchedConstraints, err := getMatchingSecurityContextConstraints(c.store, a.GetUserInfo())
	if err != nil {
		return kadmission.NewForbidden(a, err)
	}

	// get all constraints that are usable by the SA
	if len(pod.Spec.ServiceAccountName) > 0 {
		userInfo := serviceaccount.UserInfo(a.GetNamespace(), pod.Spec.ServiceAccountName, "")
		glog.V(4).Infof("getting security context constraints for pod %s (generate: %s) with service account info %v", pod.Name, pod.GenerateName, userInfo)
		saConstraints, err := getMatchingSecurityContextConstraints(c.store, userInfo)
		if err != nil {
			return kadmission.NewForbidden(a, err)
		}
		matchedConstraints = append(matchedConstraints, saConstraints...)
	}

	// remove duplicate constraints and sort
	matchedConstraints = deduplicateSecurityContextConstraints(matchedConstraints)
	sort.Sort(ByRestrictions(matchedConstraints))
	providers, errs := c.createProvidersFromConstraints(a.GetNamespace(), matchedConstraints)
	logProviders(pod, providers, errs)

	if len(providers) == 0 {
		return kadmission.NewForbidden(a, fmt.Errorf("no providers available to validated pod request"))
	}

	// all containers in a single pod must validate under a single provider or we will reject the request
	validationErrs := fielderrors.ValidationErrorList{}
	for _, provider := range providers {
		if errs := assignSecurityContext(provider, pod); len(errs) > 0 {
			validationErrs = append(validationErrs, errs.Prefix(fmt.Sprintf("provider %s: ", provider.GetSCCName()))...)
			continue
		}

		// the entire pod validated, annotate and accept the pod
		glog.V(4).Infof("pod %s (generate: %s) validated against provider %s", pod.Name, pod.GenerateName, provider.GetSCCName())
		if pod.ObjectMeta.Annotations == nil {
			pod.ObjectMeta.Annotations = map[string]string{}
		}
		pod.ObjectMeta.Annotations[allocator.ValidatedSCCAnnotation] = provider.GetSCCName()
		return nil
	}

	// we didn't validate against any security context constraint provider, reject the pod and give the errors for each attempt
	glog.V(4).Infof("unable to validate pod %s (generate: %s) against any security context constraint: %v", pod.Name, pod.GenerateName, validationErrs)
	return kadmission.NewForbidden(a, fmt.Errorf("unable to validate against any security context constraint: %v", validationErrs))
}
Exemple #3
0
func (resourceDefaults) Admit(a admission.Attributes) (err error) {
	// ignore deletes, only process create and update
	if a.GetOperation() == "DELETE" {
		return nil
	}

	// we only care about pods
	if a.GetResource() != "pods" {
		return nil
	}

	// get the pod, so we can validate each of the containers within have default mem / cpu constraints
	obj := a.GetObject()
	pod := obj.(*api.Pod)
	for index := range pod.Spec.Containers {
		if pod.Spec.Containers[index].Resources.Limits == nil {
			pod.Spec.Containers[index].Resources.Limits = api.ResourceList{}
		}
		if pod.Spec.Containers[index].Resources.Limits.Memory().Value() == 0 {
			pod.Spec.Containers[index].Resources.Limits[api.ResourceMemory] = resource.MustParse(defaultMemory)
		}
		if pod.Spec.Containers[index].Resources.Limits.Cpu().Value() == 0 {
			pod.Spec.Containers[index].Resources.Limits[api.ResourceCPU] = resource.MustParse(defaultCPU)
		}
	}
	return nil
}
Exemple #4
0
// Admit admits resources into cluster that do not violate any defined LimitRange in the namespace
func (l *limitRanger) Admit(a admission.Attributes) (err error) {
	obj := a.GetObject()
	resource := a.GetResource()
	name := "Unknown"
	if obj != nil {
		name, _ = meta.NewAccessor().Name(obj)
		if len(name) == 0 {
			name, _ = meta.NewAccessor().GenerateName(obj)
		}
	}

	key := &api.LimitRange{
		ObjectMeta: api.ObjectMeta{
			Namespace: a.GetNamespace(),
			Name:      "",
		},
	}
	items, err := l.indexer.Index("namespace", key)
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Unable to %s %s at this time because there was an error enforcing limit ranges", a.GetOperation(), resource))
	}
	if len(items) == 0 {
		return nil
	}

	// ensure it meets each prescribed min/max
	for i := range items {
		limitRange := items[i].(*api.LimitRange)
		err = l.limitFunc(limitRange, a.GetResource(), a.GetObject())
		if err != nil {
			return admission.NewForbidden(a, err)
		}
	}
	return nil
}
Exemple #5
0
func (d *denyExecOnPrivileged) Admit(a admission.Attributes) (err error) {
	connectRequest, ok := a.GetObject().(*rest.ConnectRequest)
	if !ok {
		return errors.NewBadRequest("a connect request was received, but could not convert the request object.")
	}
	// Only handle exec requests on pods
	if connectRequest.ResourcePath != "pods/exec" {
		return nil
	}
	pod, err := d.client.Pods(a.GetNamespace()).Get(connectRequest.Name)
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	if isPrivileged(pod) {
		return admission.NewForbidden(a, fmt.Errorf("Cannot exec into a privileged container"))
	}
	return nil
}
Exemple #6
0
// Admit will deny any SecurityContext that defines options that were not previously available in the api.Container
// struct (Capabilities and Privileged)
func (p *plugin) Admit(a admission.Attributes) (err error) {
	if a.GetResource() != string(api.ResourcePods) {
		return nil
	}

	pod, ok := a.GetObject().(*api.Pod)
	if !ok {
		return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted")
	}
	for _, v := range pod.Spec.Containers {
		if v.SecurityContext != nil {
			if v.SecurityContext.SELinuxOptions != nil {
				return apierrors.NewForbidden(a.GetResource(), pod.Name, fmt.Errorf("SecurityContext.SELinuxOptions is forbidden"))
			}
			if v.SecurityContext.RunAsUser != nil {
				return apierrors.NewForbidden(a.GetResource(), pod.Name, fmt.Errorf("SecurityContext.RunAsUser is forbidden"))
			}
		}
	}
	return nil
}
// Admit enforces that pod and its project node label selectors matches at least a node in the cluster.
func (p *podNodeEnvironment) Admit(a admission.Attributes) (err error) {
	// ignore anything except create or update of pods
	if !(a.GetOperation() == admission.Create || a.GetOperation() == admission.Update) {
		return nil
	}
	resource := a.GetResource()
	if resource != "pods" {
		return nil
	}

	obj := a.GetObject()
	pod, ok := obj.(*kapi.Pod)
	if !ok {
		return nil
	}

	name := pod.Name

	projects, err := projectcache.GetProjectCache()
	if err != nil {
		return err
	}
	namespace, err := projects.GetNamespaceObject(a.GetNamespace())
	if err != nil {
		return apierrors.NewForbidden(resource, name, err)
	}
	projectNodeSelector, err := projects.GetNodeSelectorMap(namespace)
	if err != nil {
		return err
	}

	if labelselector.Conflicts(projectNodeSelector, pod.Spec.NodeSelector) {
		return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its project node label selector"))
	}

	// modify pod node selector = project node selector + current pod node selector
	pod.Spec.NodeSelector = labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector)

	return nil
}
func (a *buildByStrategy) Admit(attr admission.Attributes) error {
	if resource := attr.GetResource(); resource != buildsResource && resource != buildConfigsResource {
		return nil
	}
	var err error
	switch obj := attr.GetObject().(type) {
	case *buildapi.Build:
		err = a.checkBuildAuthorization(obj, attr)
	case *buildapi.BuildConfig:
		err = a.checkBuildConfigAuthorization(obj, attr)
	}

	return err
}
// Admit enforces that a namespace must exist in order to associate content with it.
// Admit enforces that a namespace that is terminating cannot accept new content being associated with it.
func (e *lifecycle) Admit(a admission.Attributes) (err error) {
	if len(a.GetNamespace()) == 0 {
		return nil
	}
	defaultVersion, kind, err := latest.RESTMapper.VersionAndKindForResource(a.GetResource())
	if err != nil {
		glog.V(4).Infof("Ignoring life-cycle enforcement for resource %v; no associated default version and kind could be found.", a.GetResource())
		return nil
	}
	mapping, err := latest.RESTMapper.RESTMapping(kind, defaultVersion)
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
		return nil
	}

	// we want to allow someone to delete something in case it was phantom created somehow
	if a.GetOperation() == "DELETE" {
		return nil
	}

	name := "Unknown"
	obj := a.GetObject()
	if obj != nil {
		name, _ = meta.NewAccessor().Name(obj)
	}

	projects, err := cache.GetProjectCache()
	if err != nil {
		return admission.NewForbidden(a, err)
	}

	namespace, err := projects.GetNamespaceObject(a.GetNamespace())
	if err != nil {
		return admission.NewForbidden(a, err)
	}

	if a.GetOperation() != "CREATE" {
		return nil
	}

	if namespace.Status.Phase == kapi.NamespaceTerminating && !e.creatableResources.Has(strings.ToLower(a.GetResource())) {
		return apierrors.NewForbidden(kind, name, fmt.Errorf("Namespace %s is terminating", a.GetNamespace()))
	}

	// in case of concurrency issues, we will retry this logic
	numRetries := 10
	interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond
	for retry := 1; retry <= numRetries; retry++ {

		// associate this namespace with openshift
		_, err = projectutil.Associate(e.client, namespace)
		if err == nil {
			break
		}

		// we have exhausted all reasonable efforts to retry so give up now
		if retry == numRetries {
			return admission.NewForbidden(a, err)
		}

		// get the latest namespace for the next pass in case of resource version updates
		time.Sleep(interval)

		// it's possible the namespace actually was deleted, so just forbid if this occurs
		namespace, err = e.client.Namespaces().Get(a.GetNamespace())
		if err != nil {
			return admission.NewForbidden(a, err)
		}
	}
	return nil
}
Exemple #10
0
func (s *serviceAccount) Admit(a admission.Attributes) (err error) {
	if a.GetResource() != string(api.ResourcePods) {
		return nil
	}
	obj := a.GetObject()
	if obj == nil {
		return nil
	}
	pod, ok := obj.(*api.Pod)
	if !ok {
		return nil
	}

	// Don't modify the spec of mirror pods.
	// That makes the kubelet very angry and confused, and it immediately deletes the pod (because the spec doesn't match)
	// That said, don't allow mirror pods to reference ServiceAccounts or SecretVolumeSources either
	if _, isMirrorPod := pod.Annotations[kubelet.ConfigMirrorAnnotationKey]; isMirrorPod {
		if len(pod.Spec.ServiceAccountName) != 0 {
			return admission.NewForbidden(a, fmt.Errorf("A mirror pod may not reference service accounts"))
		}
		for _, volume := range pod.Spec.Volumes {
			if volume.VolumeSource.Secret != nil {
				return admission.NewForbidden(a, fmt.Errorf("A mirror pod may not reference secrets"))
			}
		}
		return nil
	}

	// Set the default service account if needed
	if len(pod.Spec.ServiceAccountName) == 0 {
		pod.Spec.ServiceAccountName = DefaultServiceAccountName
	}

	// Ensure the referenced service account exists
	serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName)
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Error looking up service account %s/%s: %v", a.GetNamespace(), pod.Spec.ServiceAccountName, err))
	}
	if serviceAccount == nil {
		// TODO: convert to a ServerTimeout error (or other error that sends a Retry-After header)
		return admission.NewForbidden(a, fmt.Errorf("service account %s/%s was not found, retry after the service account is created", a.GetNamespace(), pod.Spec.ServiceAccountName))
	}

	if s.LimitSecretReferences {
		if err := s.limitSecretReferences(serviceAccount, pod); err != nil {
			return admission.NewForbidden(a, err)
		}
	}

	if s.MountServiceAccountToken {
		if err := s.mountServiceAccountToken(serviceAccount, pod); err != nil {
			return admission.NewForbidden(a, err)
		}
	}

	if len(pod.Spec.ImagePullSecrets) == 0 {
		pod.Spec.ImagePullSecrets = make([]api.LocalObjectReference, len(serviceAccount.ImagePullSecrets))
		copy(pod.Spec.ImagePullSecrets, serviceAccount.ImagePullSecrets)
	}

	return nil
}
Exemple #11
0
// IncrementUsage updates the supplied ResourceQuotaStatus object based on the incoming operation
// Return true if the usage must be recorded prior to admitting the new resource
// Return an error if the operation should not pass admission control
func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, client client.Interface) (bool, error) {
	dirty := false
	set := map[api.ResourceName]bool{}
	for k := range status.Hard {
		set[k] = true
	}
	obj := a.GetObject()
	// handle max counts for each kind of resource (pods, services, replicationControllers, etc.)
	if a.GetOperation() == admission.Create {
		resourceName := resourceToResourceName[a.GetResource()]
		hard, hardFound := status.Hard[resourceName]
		if hardFound {
			used, usedFound := status.Used[resourceName]
			if !usedFound {
				return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
			}
			if used.Value() >= hard.Value() {
				return false, fmt.Errorf("Limited to %s %s", hard.String(), resourceName)
			} else {
				status.Used[resourceName] = *resource.NewQuantity(used.Value()+int64(1), resource.DecimalSI)
				dirty = true
			}
		}
	}
	// handle memory/cpu constraints, and any diff of usage based on memory/cpu on updates
	if a.GetResource() == "pods" && (set[api.ResourceMemory] || set[api.ResourceCPU]) {
		pod := obj.(*api.Pod)
		deltaCPU := resourcequotacontroller.PodCPU(pod)
		deltaMemory := resourcequotacontroller.PodMemory(pod)
		// if this is an update, we need to find the delta cpu/memory usage from previous state
		if a.GetOperation() == admission.Update {
			oldPod, err := client.Pods(a.GetNamespace()).Get(pod.Name)
			if err != nil {
				return false, err
			}
			oldCPU := resourcequotacontroller.PodCPU(oldPod)
			oldMemory := resourcequotacontroller.PodMemory(oldPod)
			deltaCPU = resource.NewMilliQuantity(deltaCPU.MilliValue()-oldCPU.MilliValue(), resource.DecimalSI)
			deltaMemory = resource.NewQuantity(deltaMemory.Value()-oldMemory.Value(), resource.DecimalSI)
		}

		hardMem, hardMemFound := status.Hard[api.ResourceMemory]
		if hardMemFound {
			if set[api.ResourceMemory] && resourcequotacontroller.IsPodMemoryUnbounded(pod) {
				return false, fmt.Errorf("Limited to %s memory, but pod has no specified memory limit", hardMem.String())
			}
			used, usedFound := status.Used[api.ResourceMemory]
			if !usedFound {
				return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
			}
			if used.Value()+deltaMemory.Value() > hardMem.Value() {
				return false, fmt.Errorf("Limited to %s memory", hardMem.String())
			} else {
				status.Used[api.ResourceMemory] = *resource.NewQuantity(used.Value()+deltaMemory.Value(), resource.DecimalSI)
				dirty = true
			}
		}
		hardCPU, hardCPUFound := status.Hard[api.ResourceCPU]
		if hardCPUFound {
			if set[api.ResourceCPU] && resourcequotacontroller.IsPodCPUUnbounded(pod) {
				return false, fmt.Errorf("Limited to %s CPU, but pod has no specified cpu limit", hardCPU.String())
			}
			used, usedFound := status.Used[api.ResourceCPU]
			if !usedFound {
				return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
			}
			if used.MilliValue()+deltaCPU.MilliValue() > hardCPU.MilliValue() {
				return false, fmt.Errorf("Limited to %s CPU", hardCPU.String())
			} else {
				status.Used[api.ResourceCPU] = *resource.NewMilliQuantity(used.MilliValue()+deltaCPU.MilliValue(), resource.DecimalSI)
				dirty = true
			}
		}
	}
	return dirty, nil
}