コード例 #1
0
ファイル: scc_exec.go プロジェクト: rrati/origin
func (d *sccExecRestrictions) Admit(a admission.Attributes) (err error) {
	if a.GetOperation() != admission.Connect {
		return nil
	}
	if a.GetResource() != kapi.Resource("pods") {
		return nil
	}
	if a.GetSubresource() != "attach" && a.GetSubresource() != "exec" {
		return nil
	}

	pod, err := d.client.Pods(a.GetNamespace()).Get(a.GetName())
	if err != nil {
		return admission.NewForbidden(a, err)
	}

	// create a synthentic admission attribute to check SCC admission status for this pod
	// clear the SA name, so that any permissions MUST be based on your user's power, not the SAs power.
	pod.Spec.ServiceAccountName = ""
	createAttributes := admission.NewAttributesRecord(pod, kapi.Kind("Pod"), a.GetNamespace(), a.GetName(), a.GetResource(), a.GetSubresource(), admission.Create, a.GetUserInfo())
	if err := d.constraintAdmission.Admit(createAttributes); err != nil {
		return admission.NewForbidden(a, err)
	}

	return nil
}
コード例 #2
0
ファイル: admission.go プロジェクト: rlugojr/kubernetes
// SupportsAttributes ignores all calls that do not deal with pod resources since that is
// all this supports now.  Also ignores any call that has a subresource defined.
func (d *DefaultLimitRangerActions) SupportsAttributes(a admission.Attributes) bool {
	if a.GetSubresource() != "" {
		return false
	}

	return a.GetKind().GroupKind() == api.Kind("Pod")
}
コード例 #3
0
ファイル: admission.go プロジェクト: Xmagicer/origin
// SupportsAttributes is a helper that returns true if the resource is supported by the plugin.
// Implements the LimitRangerActions interface.
func (a *imageLimitRangerPlugin) SupportsAttributes(attr kadmission.Attributes) bool {
	if attr.GetSubresource() != "" {
		return false
	}

	return attr.GetKind().GroupKind() == imageapi.Kind("ImageStreamMapping")
}
コード例 #4
0
ファイル: admission.go プロジェクト: rrati/origin
func (a *runOnceDuration) Admit(attributes admission.Attributes) error {
	switch {
	case a.config == nil,
		!a.config.Enabled,
		attributes.GetResource() != kapi.Resource("pods"),
		len(attributes.GetSubresource()) > 0:
		return nil
	}
	pod, ok := attributes.GetObject().(*kapi.Pod)
	if !ok {
		return admission.NewForbidden(attributes, fmt.Errorf("unexpected object: %#v", attributes.GetObject()))
	}

	// Only update pods with a restart policy of Never or OnFailure
	switch pod.Spec.RestartPolicy {
	case kapi.RestartPolicyNever,
		kapi.RestartPolicyOnFailure:
		// continue
	default:
		return nil
	}

	appliedProjectOverride, err := a.applyProjectAnnotationOverride(attributes.GetNamespace(), pod)
	if err != nil {
		return admission.NewForbidden(attributes, err)
	}

	if !appliedProjectOverride && a.config.ActiveDeadlineSecondsOverride != nil {
		pod.Spec.ActiveDeadlineSeconds = a.config.ActiveDeadlineSecondsOverride
	}
	return nil
}
コード例 #5
0
ファイル: gc_admission.go プロジェクト: humblec/kubernetes
func (a *gcPermissionsEnforcement) Admit(attributes admission.Attributes) (err error) {
	// if we aren't changing owner references, then the edit is always allowed
	if !isChangingOwnerReference(attributes.GetObject(), attributes.GetOldObject()) {
		return nil
	}

	deleteAttributes := authorizer.AttributesRecord{
		User:            attributes.GetUserInfo(),
		Verb:            "delete",
		Namespace:       attributes.GetNamespace(),
		APIGroup:        attributes.GetResource().Group,
		APIVersion:      attributes.GetResource().Version,
		Resource:        attributes.GetResource().Resource,
		Subresource:     attributes.GetSubresource(),
		Name:            attributes.GetName(),
		ResourceRequest: true,
		Path:            "",
	}
	allowed, reason, err := a.authorizer.Authorize(deleteAttributes)
	if allowed {
		return nil
	}

	return admission.NewForbidden(attributes, fmt.Errorf("cannot set an ownerRef on a resource you can't delete: %v, %v", reason, err))
}
コード例 #6
0
ファイル: admission.go プロジェクト: ethernetdan/kubernetes
// Admit admits resources into cluster that do not violate any defined LimitRange in the namespace
func (l *limitRanger) Admit(a admission.Attributes) (err error) {

	// Ignore all calls to subresources
	if a.GetSubresource() != "" {
		return nil
	}

	obj := a.GetObject()
	name := "Unknown"
	if obj != nil {
		name, _ = meta.NewAccessor().Name(obj)
		if len(name) == 0 {
			name, _ = meta.NewAccessor().GenerateName(obj)
		}
	}

	key := &api.LimitRange{
		ObjectMeta: api.ObjectMeta{
			Namespace: a.GetNamespace(),
			Name:      "",
		},
	}
	items, err := l.indexer.Index("namespace", key)
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Unable to %s %v at this time because there was an error enforcing limit ranges", a.GetOperation(), a.GetResource()))
	}

	// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
	if len(items) == 0 {
		lruItemObj, ok := l.liveLookupCache.Get(a.GetNamespace())
		if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
			liveList, err := l.client.Core().LimitRanges(a.GetNamespace()).List(api.ListOptions{})
			if err != nil {
				return admission.NewForbidden(a, err)
			}
			newEntry := liveLookupEntry{expiry: time.Now().Add(l.liveTTL)}
			for i := range liveList.Items {
				newEntry.items = append(newEntry.items, &liveList.Items[i])
			}
			l.liveLookupCache.Add(a.GetNamespace(), newEntry)
			lruItemObj = newEntry
		}
		lruEntry := lruItemObj.(liveLookupEntry)

		for i := range lruEntry.items {
			items = append(items, lruEntry.items[i])
		}

	}

	// ensure it meets each prescribed min/max
	for i := range items {
		limitRange := items[i].(*api.LimitRange)
		err = l.limitFunc(limitRange, a.GetResource().Resource, a.GetObject())
		if err != nil {
			return admission.NewForbidden(a, err)
		}
	}
	return nil
}
コード例 #7
0
ファイル: scc_exec.go プロジェクト: Xmagicer/origin
func (d *sccExecRestrictions) Admit(a admission.Attributes) (err error) {
	if a.GetOperation() != admission.Connect {
		return nil
	}
	if a.GetResource().GroupResource() != kapi.Resource("pods") {
		return nil
	}
	if a.GetSubresource() != "attach" && a.GetSubresource() != "exec" {
		return nil
	}

	pod, err := d.client.Core().Pods(a.GetNamespace()).Get(a.GetName())
	if err != nil {
		return admission.NewForbidden(a, err)
	}

	// TODO, if we want to actually limit who can use which service account, then we'll need to add logic here to make sure that
	// we're allowed to use the SA the pod is using.  Otherwise, user-A creates pod and user-B (who can't use the SA) can exec into it.
	createAttributes := admission.NewAttributesRecord(pod, pod, kapi.Kind("Pod").WithVersion(""), a.GetNamespace(), a.GetName(), a.GetResource(), "", admission.Create, a.GetUserInfo())
	if err := d.constraintAdmission.Admit(createAttributes); err != nil {
		return admission.NewForbidden(a, err)
	}

	return nil
}
コード例 #8
0
ファイル: admission.go プロジェクト: humblec/kubernetes
// Admit will deny any pod that defines AntiAffinity topology key other than unversioned.LabelHostname i.e. "kubernetes.io/hostname"
// in  requiredDuringSchedulingRequiredDuringExecution and requiredDuringSchedulingIgnoredDuringExecution.
func (p *plugin) Admit(attributes admission.Attributes) (err error) {
	// Ignore all calls to subresources or resources other than pods.
	if len(attributes.GetSubresource()) != 0 || attributes.GetResource().GroupResource() != api.Resource("pods") {
		return nil
	}
	pod, ok := attributes.GetObject().(*api.Pod)
	if !ok {
		return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted")
	}
	affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
	if err != nil {
		glog.V(5).Infof("Invalid Affinity detected, but we will leave handling of this to validation phase")
		return nil
	}
	if affinity != nil && affinity.PodAntiAffinity != nil {
		var podAntiAffinityTerms []api.PodAffinityTerm
		if len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
			podAntiAffinityTerms = affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
		}
		// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
		//if len(affinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
		//        podAntiAffinityTerms = append(podAntiAffinityTerms, affinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
		//}
		for _, v := range podAntiAffinityTerms {
			if v.TopologyKey != unversioned.LabelHostname {
				return apierrors.NewForbidden(attributes.GetResource().GroupResource(), pod.Name, fmt.Errorf("affinity.PodAntiAffinity.RequiredDuringScheduling has TopologyKey %v but only key %v is allowed", v.TopologyKey, unversioned.LabelHostname))
			}
		}
	}
	return nil
}
コード例 #9
0
ファイル: admission.go プロジェクト: Xmagicer/origin
// Admit makes admission decisions while enforcing quota
func (q *quotaAdmission) Admit(a admission.Attributes) (err error) {
	// ignore all operations that correspond to sub-resource actions
	if a.GetSubresource() != "" {
		return nil
	}

	return q.evaluator.Evaluate(a)
}
コード例 #10
0
ファイル: admission.go プロジェクト: asiainfoLDP/datafactory
// TODO this will need to update when we have pod requests/limits
func (a *clusterResourceOverridePlugin) Admit(attr admission.Attributes) error {
	glog.V(6).Infof("%s admission controller is invoked", api.PluginName)
	if a.config == nil || attr.GetResource() != kapi.Resource("pods") || attr.GetSubresource() != "" {
		return nil // not applicable
	}
	pod, ok := attr.GetObject().(*kapi.Pod)
	if !ok {
		return admission.NewForbidden(attr, fmt.Errorf("unexpected object: %#v", attr.GetObject()))
	}
	glog.V(5).Infof("%s is looking at creating pod %s in project %s", api.PluginName, pod.Name, attr.GetNamespace())

	// allow annotations on project to override
	if ns, err := a.ProjectCache.GetNamespace(attr.GetNamespace()); err != nil {
		glog.Warningf("%s got an error retrieving namespace: %v", api.PluginName, err)
		return admission.NewForbidden(attr, err) // this should not happen though
	} else {
		projectEnabledPlugin, exists := ns.Annotations[clusterResourceOverrideAnnotation]
		if exists && projectEnabledPlugin != "true" {
			glog.V(5).Infof("%s is disabled for project %s", api.PluginName, attr.GetNamespace())
			return nil // disabled for this project, do nothing
		}
	}

	// Reuse LimitRanger logic to apply limit/req defaults from the project. Ignore validation
	// errors, assume that LimitRanger will run after this plugin to validate.
	glog.V(5).Infof("%s: initial pod limits are: %#v", api.PluginName, pod.Spec.Containers[0].Resources)
	if err := a.LimitRanger.Admit(attr); err != nil {
		glog.V(5).Infof("%s: error from LimitRanger: %#v", api.PluginName, err)
	}
	glog.V(5).Infof("%s: pod limits after LimitRanger are: %#v", api.PluginName, pod.Spec.Containers[0].Resources)
	for _, container := range pod.Spec.Containers {
		resources := container.Resources
		memLimit, memFound := resources.Limits[kapi.ResourceMemory]
		if memFound && a.config.memoryRequestToLimitRatio.Cmp(zeroDec) != 0 {
			resources.Requests[kapi.ResourceMemory] = resource.Quantity{
				Amount: multiply(memLimit.Amount, a.config.memoryRequestToLimitRatio),
				Format: resource.BinarySI,
			}
		}
		if memFound && a.config.limitCPUToMemoryRatio.Cmp(zeroDec) != 0 {
			resources.Limits[kapi.ResourceCPU] = resource.Quantity{
				// float math is necessary here as there is no way to create an inf.Dec to represent cpuBaseScaleFactor < 0.001
				Amount: multiply(inf.NewDec(int64(float64(memLimit.Value())*cpuBaseScaleFactor), 3), a.config.limitCPUToMemoryRatio),
				Format: resource.DecimalSI,
			}
		}
		cpuLimit, cpuFound := resources.Limits[kapi.ResourceCPU]
		if cpuFound && a.config.cpuRequestToLimitRatio.Cmp(zeroDec) != 0 {
			resources.Requests[kapi.ResourceCPU] = resource.Quantity{
				Amount: multiply(cpuLimit.Amount, a.config.cpuRequestToLimitRatio),
				Format: resource.DecimalSI,
			}
		}
	}
	glog.V(5).Infof("%s: pod limits after overrides are: %#v", api.PluginName, pod.Spec.Containers[0].Resources)
	return nil
}
コード例 #11
0
// IsBuildPod returns true if a pod is a pod generated for a Build
func IsBuildPod(a admission.Attributes) bool {
	if a.GetResource() != kapi.Resource("pods") {
		return false
	}
	if len(a.GetSubresource()) != 0 {
		return false
	}
	pod, err := GetPod(a)
	if err != nil {
		return false
	}
	return hasBuildAnnotation(pod) && hasBuildEnvVar(pod)
}
コード例 #12
0
ファイル: admission.go プロジェクト: rhamilto/origin
// Admit makes admission decisions while enforcing quota
func (q *quotaAdmission) Admit(a admission.Attributes) (err error) {
	startTime := time.Now()
	defer func() {
		duration := time.Now().Sub(startTime)
		runningTotal = duration + runningTotal
	}()
	// ignore all operations that correspond to sub-resource actions
	if a.GetSubresource() != "" {
		return nil
	}

	return q.evaluator.Evaluate(a)
}
コード例 #13
0
ファイル: admission.go プロジェクト: koori02/kubernetes
func (ir initialResources) Admit(a admission.Attributes) (err error) {
	// Ignore all calls to subresources or resources other than pods.
	if a.GetSubresource() != "" || a.GetResource() != api.Resource("pods") {
		return nil
	}
	pod, ok := a.GetObject().(*api.Pod)
	if !ok {
		return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted")
	}

	ir.estimateAndFillResourcesIfNotSet(pod)
	return nil
}
コード例 #14
0
ファイル: admission.go プロジェクト: humblec/kubernetes
// Admit will deny any pod that defines SELinuxOptions or RunAsUser.
func (p *plugin) Admit(a admission.Attributes) (err error) {
	if a.GetSubresource() != "" || a.GetResource().GroupResource() != api.Resource("pods") {
		return nil
	}

	pod, ok := a.GetObject().(*api.Pod)
	if !ok {
		return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted")
	}

	if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SupplementalGroups != nil {
		return apierrors.NewForbidden(a.GetResource().GroupResource(), pod.Name, fmt.Errorf("SecurityContext.SupplementalGroups is forbidden"))
	}
	if pod.Spec.SecurityContext != nil {
		if pod.Spec.SecurityContext.SELinuxOptions != nil {
			return apierrors.NewForbidden(a.GetResource().GroupResource(), pod.Name, fmt.Errorf("pod.Spec.SecurityContext.SELinuxOptions is forbidden"))
		}
		if pod.Spec.SecurityContext.RunAsUser != nil {
			return apierrors.NewForbidden(a.GetResource().GroupResource(), pod.Name, fmt.Errorf("pod.Spec.SecurityContext.RunAsUser is forbidden"))
		}
	}

	if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.FSGroup != nil {
		return apierrors.NewForbidden(a.GetResource().GroupResource(), pod.Name, fmt.Errorf("SecurityContext.FSGroup is forbidden"))
	}

	for _, v := range pod.Spec.InitContainers {
		if v.SecurityContext != nil {
			if v.SecurityContext.SELinuxOptions != nil {
				return apierrors.NewForbidden(a.GetResource().GroupResource(), pod.Name, fmt.Errorf("SecurityContext.SELinuxOptions is forbidden"))
			}
			if v.SecurityContext.RunAsUser != nil {
				return apierrors.NewForbidden(a.GetResource().GroupResource(), pod.Name, fmt.Errorf("SecurityContext.RunAsUser is forbidden"))
			}
		}
	}

	for _, v := range pod.Spec.Containers {
		if v.SecurityContext != nil {
			if v.SecurityContext.SELinuxOptions != nil {
				return apierrors.NewForbidden(a.GetResource().GroupResource(), pod.Name, fmt.Errorf("SecurityContext.SELinuxOptions is forbidden"))
			}
			if v.SecurityContext.RunAsUser != nil {
				return apierrors.NewForbidden(a.GetResource().GroupResource(), pod.Name, fmt.Errorf("SecurityContext.RunAsUser is forbidden"))
			}
		}
	}
	return nil
}
コード例 #15
0
ファイル: admission.go プロジェクト: ethernetdan/kubernetes
func (a *alwaysPullImages) Admit(attributes admission.Attributes) (err error) {
	// Ignore all calls to subresources or resources other than pods.
	if len(attributes.GetSubresource()) != 0 || attributes.GetResource() != api.Resource("pods") {
		return nil
	}
	pod, ok := attributes.GetObject().(*api.Pod)
	if !ok {
		return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted")
	}

	for i := range pod.Spec.Containers {
		pod.Spec.Containers[i].ImagePullPolicy = api.PullAlways
	}

	return nil
}
コード例 #16
0
ファイル: admission.go プロジェクト: ZenoRewn/origin
func (a *buildByStrategy) Admit(attr admission.Attributes) error {
	if resource := attr.GetResource().GroupResource(); resource != buildsResource && resource != buildConfigsResource {
		return nil
	}
	// Explicitly exclude the builds/details subresource because it's only
	// updating commit info and cannot change build type.
	if attr.GetResource().GroupResource() == buildsResource && attr.GetSubresource() == "details" {
		return nil
	}
	switch obj := attr.GetObject().(type) {
	case *buildapi.Build:
		return a.checkBuildAuthorization(obj, attr)
	case *buildapi.BuildConfig:
		return a.checkBuildConfigAuthorization(obj, attr)
	case *buildapi.BuildRequest:
		return a.checkBuildRequestAuthorization(obj, attr)
	default:
		return admission.NewForbidden(attr, fmt.Errorf("unrecognized request object %#v", obj))
	}
}
コード例 #17
0
ファイル: admission.go プロジェクト: jpartner/kubernetes
// Admit admits resources into cluster that do not violate any defined LimitRange in the namespace
func (l *limitRanger) Admit(a admission.Attributes) (err error) {

	// Ignore all calls to subresources
	if a.GetSubresource() != "" {
		return nil
	}

	obj := a.GetObject()
	resource := a.GetResource()
	name := "Unknown"
	if obj != nil {
		name, _ = meta.NewAccessor().Name(obj)
		if len(name) == 0 {
			name, _ = meta.NewAccessor().GenerateName(obj)
		}
	}

	key := &api.LimitRange{
		ObjectMeta: api.ObjectMeta{
			Namespace: a.GetNamespace(),
			Name:      "",
		},
	}
	items, err := l.indexer.Index("namespace", key)
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Unable to %s %s at this time because there was an error enforcing limit ranges", a.GetOperation(), resource))
	}
	if len(items) == 0 {
		return nil
	}

	// ensure it meets each prescribed min/max
	for i := range items {
		limitRange := items[i].(*api.LimitRange)
		err = l.limitFunc(limitRange, a.GetResource(), a.GetObject())
		if err != nil {
			return admission.NewForbidden(a, err)
		}
	}
	return nil
}
コード例 #18
0
ファイル: admission.go プロジェクト: tracyrankin/origin
// Admit makes admission decisions while enforcing clusterQuota
func (q *clusterQuotaAdmission) Admit(a admission.Attributes) (err error) {
	// ignore all operations that correspond to sub-resource actions
	if len(a.GetSubresource()) != 0 {
		return nil
	}
	// ignore cluster level resources
	if len(a.GetNamespace()) == 0 {
		return nil
	}

	if !q.waitForSyncedStore(time.After(timeToWaitForCacheSync)) {
		return admission.NewForbidden(a, errors.New("caches not synchronized"))
	}

	q.init.Do(func() {
		clusterQuotaAccessor := newQuotaAccessor(q.clusterQuotaLister, q.namespaceLister, q.clusterQuotaClient, q.clusterQuotaMapper)
		q.evaluator = resourcequota.NewQuotaEvaluator(clusterQuotaAccessor, q.registry, q.lockAquisition, numEvaluatorThreads, utilwait.NeverStop)
	})

	return q.evaluator.Evaluate(a)
}
コード例 #19
0
ファイル: admission.go プロジェクト: johnmccawley/origin
// Admit enforces that pod and its project node label selectors matches at least a node in the cluster.
func (p *podNodeEnvironment) Admit(a admission.Attributes) (err error) {
	resource := a.GetResource()
	if resource != "pods" {
		return nil
	}
	if a.GetSubresource() != "" {
		// only run the checks below on pods proper and not subresources
		return nil
	}

	obj := a.GetObject()
	pod, ok := obj.(*kapi.Pod)
	if !ok {
		return nil
	}

	name := pod.Name

	projects, err := projectcache.GetProjectCache()
	if err != nil {
		return err
	}
	namespace, err := projects.GetNamespaceObject(a.GetNamespace())
	if err != nil {
		return apierrors.NewForbidden(resource, name, err)
	}
	projectNodeSelector, err := projects.GetNodeSelectorMap(namespace)
	if err != nil {
		return err
	}

	if labelselector.Conflicts(projectNodeSelector, pod.Spec.NodeSelector) {
		return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its project node label selector"))
	}

	// modify pod node selector = project node selector + current pod node selector
	pod.Spec.NodeSelector = labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector)

	return nil
}
コード例 #20
0
ファイル: admission.go プロジェクト: ncdc/kubernetes
// Admit sets the default value of a PersistentVolumeClaim's storage class, in case the user did
// not provide a value.
//
// 1.  Find available StorageClasses.
// 2.  Figure which is the default
// 3.  Write to the PVClaim
func (c *claimDefaulterPlugin) Admit(a admission.Attributes) error {
	if a.GetResource().GroupResource() != api.Resource("persistentvolumeclaims") {
		return nil
	}

	if len(a.GetSubresource()) != 0 {
		return nil
	}

	pvc, ok := a.GetObject().(*api.PersistentVolumeClaim)
	// if we can't convert then we don't handle this object so just return
	if !ok {
		return nil
	}

	_, found := pvc.Annotations[classAnnotation]
	if found {
		// The user asked for a class.
		return nil
	}

	glog.V(4).Infof("no storage class for claim %s (generate: %s)", pvc.Name, pvc.GenerateName)

	def, err := getDefaultClass(c.store)
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	if def == nil {
		// No default class selected, do nothing about the PVC.
		return nil
	}

	glog.V(4).Infof("defaulting storage class for claim %s (generate: %s) to %s", pvc.Name, pvc.GenerateName, def.Name)
	if pvc.ObjectMeta.Annotations == nil {
		pvc.ObjectMeta.Annotations = map[string]string{}
	}
	pvc.Annotations[classAnnotation] = def.Name
	return nil
}
コード例 #21
0
ファイル: imagepolicy.go プロジェクト: abhgupta/origin
// Admit attempts to apply the image policy to the incoming resource.
func (a *imagePolicyPlugin) Admit(attr admission.Attributes) error {
	switch attr.GetOperation() {
	case admission.Create, admission.Update:
		if len(attr.GetSubresource()) > 0 {
			return nil
		}
		// only create and update are tested, and only on core resources
		// TODO: scan all resources
		// TODO: Create a general equivalence map for admission - operation X on subresource Y is equivalent to reduced operation
	default:
		return nil
	}

	gr := attr.GetResource().GroupResource()
	if !a.accepter.Covers(gr) {
		return nil
	}

	m, err := meta.GetImageReferenceMutator(attr.GetObject())
	if err != nil {
		return apierrs.NewForbidden(gr, attr.GetName(), fmt.Errorf("unable to apply image policy against objects of type %T: %v", attr.GetObject(), err))
	}

	// load exclusion rules from the namespace cache
	var excluded sets.String
	if ns := attr.GetNamespace(); len(ns) > 0 {
		if ns, err := a.projectCache.GetNamespace(ns); err == nil {
			if value := ns.Annotations[api.IgnorePolicyRulesAnnotation]; len(value) > 0 {
				excluded = sets.NewString(strings.Split(value, ",")...)
			}
		}
	}

	if err := accept(a.accepter, a.resolver, m, attr, excluded); err != nil {
		return err
	}

	return nil
}
コード例 #22
0
ファイル: admission.go プロジェクト: Clarifai/kubernetes
// Admit makes admission decisions while enforcing quota
func (q *quotaAdmission) Admit(a admission.Attributes) (err error) {
	// ignore all operations that correspond to sub-resource actions
	if a.GetSubresource() != "" {
		return nil
	}

	// if we do not know how to evaluate use for this kind, just ignore
	evaluators := q.evaluator.registry.Evaluators()
	evaluator, found := evaluators[a.GetKind().GroupKind()]
	if !found {
		return nil
	}

	// for this kind, check if the operation could mutate any quota resources
	// if no resources tracked by quota are impacted, then just return
	op := a.GetOperation()
	operationResources := evaluator.OperationResources(op)
	if len(operationResources) == 0 {
		return nil
	}

	return q.evaluator.evaluate(a)
}
コード例 #23
0
ファイル: admission.go プロジェクト: RomainVabre/origin
func (o *podNodeConstraints) Admit(attr admission.Attributes) error {
	switch {
	case o.config == nil,
		attr.GetSubresource() != "":
		return nil
	}
	shouldCheck, err := shouldCheckResource(attr.GetResource().GroupResource(), attr.GetKind().GroupKind())
	if err != nil {
		return err
	}
	if !shouldCheck {
		return nil
	}
	// Only check Create operation on pods
	if attr.GetResource().GroupResource() == kapi.Resource("pods") && attr.GetOperation() != admission.Create {
		return nil
	}
	ps, err := o.getPodSpec(attr)
	if err == nil {
		return o.admitPodSpec(attr, ps)
	}
	return err
}
コード例 #24
0
ファイル: admission.go プロジェクト: juanluisvaladas/origin
func (a *imagePolicyWebhook) Admit(attributes admission.Attributes) (err error) {
	// Ignore all calls to subresources or resources other than pods.
	allowedResources := map[unversioned.GroupResource]bool{
		api.Resource("pods"): true,
	}

	if len(attributes.GetSubresource()) != 0 || !allowedResources[attributes.GetResource().GroupResource()] {
		return nil
	}

	pod, ok := attributes.GetObject().(*api.Pod)
	if !ok {
		return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted")
	}

	// Build list of ImageReviewContainerSpec
	var imageReviewContainerSpecs []v1alpha1.ImageReviewContainerSpec
	containers := make([]api.Container, 0, len(pod.Spec.Containers)+len(pod.Spec.InitContainers))
	containers = append(containers, pod.Spec.Containers...)
	containers = append(containers, pod.Spec.InitContainers...)
	for _, c := range containers {
		imageReviewContainerSpecs = append(imageReviewContainerSpecs, v1alpha1.ImageReviewContainerSpec{
			Image: c.Image,
		})
	}
	imageReview := v1alpha1.ImageReview{
		Spec: v1alpha1.ImageReviewSpec{
			Containers:  imageReviewContainerSpecs,
			Annotations: a.filterAnnotations(pod.Annotations),
			Namespace:   attributes.GetNamespace(),
		},
	}
	if err := a.admitPod(attributes, &imageReview); err != nil {
		return admission.NewForbidden(attributes, err)
	}
	return nil
}
コード例 #25
0
ファイル: admission.go プロジェクト: Cloven/minikube
// Admit determines if the pod should be admitted based on the requested security context
// and the available PSPs.
//
// 1.  Find available PSPs.
// 2.  Create the providers, includes setting pre-allocated values if necessary.
// 3.  Try to generate and validate a PSP with providers.  If we find one then admit the pod
//     with the validated PSP.  If we don't find any reject the pod and give all errors from the
//     failed attempts.
func (c *podSecurityPolicyPlugin) Admit(a admission.Attributes) error {
	if a.GetResource().GroupResource() != api.Resource("pods") {
		return nil
	}

	if len(a.GetSubresource()) != 0 {
		return nil
	}

	pod, ok := a.GetObject().(*api.Pod)
	// if we can't convert then we don't handle this object so just return
	if !ok {
		return nil
	}

	// get all constraints that are usable by the user
	glog.V(4).Infof("getting pod security policies for pod %s (generate: %s)", pod.Name, pod.GenerateName)
	var saInfo user.Info
	if len(pod.Spec.ServiceAccountName) > 0 {
		saInfo = serviceaccount.UserInfo(a.GetNamespace(), pod.Spec.ServiceAccountName, "")
	}

	matchedPolicies, err := c.pspMatcher(c.store, a.GetUserInfo(), saInfo)
	if err != nil {
		return admission.NewForbidden(a, err)
	}

	// if we have no policies and want to succeed then return.  Otherwise we'll end up with no
	// providers and fail with "unable to validate against any pod security policy" below.
	if len(matchedPolicies) == 0 && !c.failOnNoPolicies {
		return nil
	}

	providers, errs := c.createProvidersFromPolicies(matchedPolicies, pod.Namespace)
	logProviders(pod, providers, errs)

	if len(providers) == 0 {
		return admission.NewForbidden(a, fmt.Errorf("no providers available to validate pod request"))
	}

	// all containers in a single pod must validate under a single provider or we will reject the request
	validationErrs := field.ErrorList{}
	for _, provider := range providers {
		if errs := assignSecurityContext(provider, pod, field.NewPath(fmt.Sprintf("provider %s: ", provider.GetPSPName()))); len(errs) > 0 {
			validationErrs = append(validationErrs, errs...)
			continue
		}

		// the entire pod validated, annotate and accept the pod
		glog.V(4).Infof("pod %s (generate: %s) validated against provider %s", pod.Name, pod.GenerateName, provider.GetPSPName())
		if pod.ObjectMeta.Annotations == nil {
			pod.ObjectMeta.Annotations = map[string]string{}
		}
		pod.ObjectMeta.Annotations[psputil.ValidatedPSPAnnotation] = provider.GetPSPName()
		return nil
	}

	// we didn't validate against any provider, reject the pod and give the errors for each attempt
	glog.V(4).Infof("unable to validate pod %s (generate: %s) against any pod security policy: %v", pod.Name, pod.GenerateName, validationErrs)
	return admission.NewForbidden(a, fmt.Errorf("unable to validate against any pod security policy: %v", validationErrs))
}
コード例 #26
0
ファイル: admission.go プロジェクト: juanluisvaladas/origin
func (a *jenkinsBootstrapper) Admit(attributes admission.Attributes) error {
	if a.jenkinsConfig.AutoProvisionEnabled != nil && !*a.jenkinsConfig.AutoProvisionEnabled {
		return nil
	}
	if len(attributes.GetSubresource()) != 0 {
		return nil
	}
	if attributes.GetResource().GroupResource() != buildapi.Resource("buildconfigs") && attributes.GetResource().GroupResource() != buildapi.Resource("builds") {
		return nil
	}
	if !needsJenkinsTemplate(attributes.GetObject()) {
		return nil
	}

	namespace := attributes.GetNamespace()

	svcName := a.jenkinsConfig.ServiceName
	if len(svcName) == 0 {
		return nil
	}

	// TODO pull this from a cache.
	if _, err := a.serviceClient.Services(namespace).Get(svcName); !kapierrors.IsNotFound(err) {
		// if it isn't a "not found" error, return the error.  Either its nil and there's nothing to do or something went really wrong
		return err
	}

	glog.V(3).Infof("Adding new jenkins service %q to the project %q", svcName, namespace)
	jenkinsTemplate := jenkinscontroller.NewPipelineTemplate(namespace, a.jenkinsConfig, a.openshiftClient)
	objects, errs := jenkinsTemplate.Process()
	if len(errs) > 0 {
		return kutilerrors.NewAggregate(errs)
	}
	if !jenkinsTemplate.HasJenkinsService(objects) {
		return fmt.Errorf("template %s/%s does not contain required service %q", a.jenkinsConfig.TemplateNamespace, a.jenkinsConfig.TemplateName, a.jenkinsConfig.ServiceName)
	}

	impersonatingConfig := a.privilegedRESTClientConfig
	oldWrapTransport := impersonatingConfig.WrapTransport
	impersonatingConfig.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
		return authenticationclient.NewImpersonatingRoundTripper(attributes.GetUserInfo(), oldWrapTransport(rt))
	}

	var bulkErr error

	bulk := &cmd.Bulk{
		Mapper: &resource.Mapper{
			RESTMapper:  registered.RESTMapper(),
			ObjectTyper: kapi.Scheme,
			ClientMapper: resource.ClientMapperFunc(func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
				if latest.OriginKind(mapping.GroupVersionKind) {
					return client.New(&impersonatingConfig)
				}
				return kclient.New(&impersonatingConfig)
			}),
		},
		Op: cmd.Create,
		After: func(info *resource.Info, err error) bool {
			if kapierrors.IsAlreadyExists(err) {
				return false
			}
			if err != nil {
				bulkErr = err
				return true
			}
			return false
		},
	}
	// we're intercepting the error we care about using After
	bulk.Run(objects, namespace)
	if bulkErr != nil {
		return bulkErr
	}

	glog.V(1).Infof("Jenkins Pipeline service %q created", svcName)

	return nil

}
コード例 #27
0
ファイル: admission.go プロジェクト: nak3/kubernetes
// Admit enforces that pod and its namespace node label selectors matches at least a node in the cluster.
func (p *podNodeSelector) Admit(a admission.Attributes) error {
	resource := a.GetResource().GroupResource()
	if resource != api.Resource("pods") {
		return nil
	}
	if a.GetSubresource() != "" {
		// only run the checks below on pods proper and not subresources
		return nil
	}

	obj := a.GetObject()
	pod, ok := obj.(*api.Pod)
	if !ok {
		glog.Errorf("expected pod but got %s", a.GetKind().Kind)
		return nil
	}

	if !p.WaitForReady() {
		return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
	}

	name := pod.Name
	nsName := a.GetNamespace()
	var namespace *api.Namespace

	namespaceObj, exists, err := p.namespaceInformer.GetStore().Get(&api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      nsName,
			Namespace: "",
		},
	})
	if err != nil {
		return errors.NewInternalError(err)
	}
	if exists {
		namespace = namespaceObj.(*api.Namespace)
	} else {
		namespace, err = p.defaultGetNamespace(nsName)
		if err != nil {
			if errors.IsNotFound(err) {
				return err
			}
			return errors.NewInternalError(err)
		}
	}
	namespaceNodeSelector, err := p.getNodeSelectorMap(namespace)
	if err != nil {
		return err
	}

	if labels.Conflicts(namespaceNodeSelector, labels.Set(pod.Spec.NodeSelector)) {
		return errors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its namespace node label selector"))
	}

	whitelist, err := labels.ConvertSelectorToLabelsMap(p.clusterNodeSelectors[namespace.Name])
	if err != nil {
		return err
	}

	// Merge pod node selector = namespace node selector + current pod node selector
	podNodeSelectorLabels := labels.Merge(namespaceNodeSelector, pod.Spec.NodeSelector)

	// whitelist verification
	if !labels.AreLabelsInWhiteList(podNodeSelectorLabels, whitelist) {
		return errors.NewForbidden(resource, name, fmt.Errorf("pod node label selector labels conflict with its namespace whitelist"))
	}

	// Updated pod node selector = namespace node selector + current pod node selector
	pod.Spec.NodeSelector = map[string]string(podNodeSelectorLabels)
	return nil
}
コード例 #28
0
ファイル: admission.go プロジェクト: JauntyWard/kubernetes
func (q *quota) Admit(a admission.Attributes) (err error) {
	if a.GetSubresource() != "" {
		return nil
	}

	if a.GetOperation() == "DELETE" {
		return nil
	}

	key := &api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{
			Namespace: a.GetNamespace(),
			Name:      "",
		},
	}

	// concurrent operations that modify quota tracked resources can cause a conflict when incrementing usage
	// as a result, we will attempt to increment quota usage per request up to numRetries limit
	// we fuzz each retry with an interval period to attempt to improve end-user experience during concurrent operations
	numRetries := 10
	interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond

	items, err := q.indexer.Index("namespace", key)
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("unable to %s %s at this time because there was an error enforcing quota", a.GetOperation(), a.GetResource()))
	}
	if len(items) == 0 {
		return nil
	}

	for i := range items {

		quota := items[i].(*api.ResourceQuota)

		for retry := 1; retry <= numRetries; retry++ {

			// we cannot modify the value directly in the cache, so we copy
			status := &api.ResourceQuotaStatus{
				Hard: api.ResourceList{},
				Used: api.ResourceList{},
			}
			for k, v := range quota.Status.Hard {
				status.Hard[k] = *v.Copy()
			}
			for k, v := range quota.Status.Used {
				status.Used[k] = *v.Copy()
			}

			dirty, err := IncrementUsage(a, status, q.client)
			if err != nil {
				return admission.NewForbidden(a, err)
			}

			if dirty {
				// construct a usage record
				usage := api.ResourceQuota{
					ObjectMeta: api.ObjectMeta{
						Name:            quota.Name,
						Namespace:       quota.Namespace,
						ResourceVersion: quota.ResourceVersion,
						Labels:          quota.Labels,
						Annotations:     quota.Annotations},
				}
				usage.Status = *status
				_, err = q.client.ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
				if err == nil {
					break
				}

				// we have concurrent requests to update quota, so look to retry if needed
				if retry == numRetries {
					return admission.NewForbidden(a, fmt.Errorf("unable to %s %s at this time because there are too many concurrent requests to increment quota", a.GetOperation(), a.GetResource()))
				}
				time.Sleep(interval)
				// manually get the latest quota
				quota, err = q.client.ResourceQuotas(usage.Namespace).Get(quota.Name)
				if err != nil {
					return admission.NewForbidden(a, err)
				}
			}
		}
	}
	return nil
}
コード例 #29
0
ファイル: admission.go プロジェクト: jwforres/origin
// Admit admits resources into cluster that do not violate any defined LimitRange in the namespace
func (l *limitRanger) Admit(a admission.Attributes) (err error) {

	// Ignore all calls to subresources
	if a.GetSubresource() != "" {
		return nil
	}

	// ignore all calls that do not deal with pod resources since that is all this supports now.
	if a.GetKind() != api.Kind("Pod") {
		return nil
	}

	obj := a.GetObject()
	name := "Unknown"
	if obj != nil {
		name, _ = meta.NewAccessor().Name(obj)
		if len(name) == 0 {
			name, _ = meta.NewAccessor().GenerateName(obj)
		}
	}

	key := &api.LimitRange{
		ObjectMeta: api.ObjectMeta{
			Namespace: a.GetNamespace(),
			Name:      "",
		},
	}
	items, err := l.indexer.Index("namespace", key)
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Unable to %s %v at this time because there was an error enforcing limit ranges", a.GetOperation(), a.GetResource()))
	}

	// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
	if len(items) == 0 {
		lruItemObj, ok := l.liveLookupCache.Get(a.GetNamespace())
		if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
			// TODO: If there are multiple operations at the same time and cache has just expired,
			// this may cause multiple List operations being issued at the same time.
			// If there is already in-flight List() for a given namespace, we should wait until
			// it is finished and cache is updated instead of doing the same, also to avoid
			// throttling - see #22422 for details.
			liveList, err := l.client.Core().LimitRanges(a.GetNamespace()).List(api.ListOptions{})
			if err != nil {
				return admission.NewForbidden(a, err)
			}
			newEntry := liveLookupEntry{expiry: time.Now().Add(l.liveTTL)}
			for i := range liveList.Items {
				newEntry.items = append(newEntry.items, &liveList.Items[i])
			}
			l.liveLookupCache.Add(a.GetNamespace(), newEntry)
			lruItemObj = newEntry
		}
		lruEntry := lruItemObj.(liveLookupEntry)

		for i := range lruEntry.items {
			items = append(items, lruEntry.items[i])
		}

	}

	// ensure it meets each prescribed min/max
	for i := range items {
		limitRange := items[i].(*api.LimitRange)
		err = l.limitFunc(limitRange, a.GetResource().Resource, a.GetObject())
		if err != nil {
			return admission.NewForbidden(a, err)
		}
	}
	return nil
}
コード例 #30
0
ファイル: admission.go プロジェクト: mataihang/kubernetes
// Admit makes admission decisions while enforcing quota
func (q *quotaAdmission) Admit(a admission.Attributes) (err error) {
	// ignore all operations that correspond to sub-resource actions
	if a.GetSubresource() != "" {
		return nil
	}

	// if we do not know how to evaluate use for this kind, just ignore
	evaluators := q.registry.Evaluators()
	evaluator, found := evaluators[a.GetKind()]
	if !found {
		return nil
	}

	// for this kind, check if the operation could mutate any quota resources
	// if no resources tracked by quota are impacted, then just return
	op := a.GetOperation()
	operationResources := evaluator.OperationResources(op)
	if len(operationResources) == 0 {
		return nil
	}

	// determine if there are any quotas in this namespace
	// if there are no quotas, we don't need to do anything
	namespace, name := a.GetNamespace(), a.GetName()
	items, err := q.indexer.Index("namespace", &api.ResourceQuota{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: ""}})
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Error resolving quota."))
	}
	// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
	if len(items) == 0 {
		lruItemObj, ok := q.liveLookupCache.Get(a.GetNamespace())
		if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
			// TODO: If there are multiple operations at the same time and cache has just expired,
			// this may cause multiple List operations being issued at the same time.
			// If there is already in-flight List() for a given namespace, we should wait until
			// it is finished and cache is updated instead of doing the same, also to avoid
			// throttling - see #22422 for details.
			liveList, err := q.client.Core().ResourceQuotas(namespace).List(api.ListOptions{})
			if err != nil {
				return admission.NewForbidden(a, err)
			}
			newEntry := liveLookupEntry{expiry: time.Now().Add(q.liveTTL)}
			for i := range liveList.Items {
				newEntry.items = append(newEntry.items, &liveList.Items[i])
			}
			q.liveLookupCache.Add(a.GetNamespace(), newEntry)
			lruItemObj = newEntry
		}
		lruEntry := lruItemObj.(liveLookupEntry)
		for i := range lruEntry.items {
			items = append(items, lruEntry.items[i])
		}
	}
	// if there are still no items, we can return
	if len(items) == 0 {
		return nil
	}

	// find the set of quotas that are pertinent to this request
	// reject if we match the quota, but usage is not calculated yet
	// reject if the input object does not satisfy quota constraints
	// if there are no pertinent quotas, we can just return
	inputObject := a.GetObject()
	resourceQuotas := []*api.ResourceQuota{}
	for i := range items {
		resourceQuota := items[i].(*api.ResourceQuota)
		match := evaluator.Matches(resourceQuota, inputObject)
		if !match {
			continue
		}
		hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
		evaluatorResources := evaluator.MatchesResources()
		requiredResources := quota.Intersection(hardResources, evaluatorResources)
		err := evaluator.Constraints(requiredResources, inputObject)
		if err != nil {
			return admission.NewForbidden(a, fmt.Errorf("Failed quota: %s: %v", resourceQuota.Name, err))
		}
		if !hasUsageStats(resourceQuota) {
			return admission.NewForbidden(a, fmt.Errorf("Status unknown for quota: %s", resourceQuota.Name))
		}
		resourceQuotas = append(resourceQuotas, resourceQuota)
	}
	if len(resourceQuotas) == 0 {
		return nil
	}

	// there is at least one quota that definitely matches our object
	// as a result, we need to measure the usage of this object for quota
	// on updates, we need to subtract the previous measured usage
	// if usage shows no change, just return since it has no impact on quota
	deltaUsage := evaluator.Usage(inputObject)
	if admission.Update == op {
		prevItem, err := evaluator.Get(namespace, name)
		if err != nil {
			return admission.NewForbidden(a, fmt.Errorf("Unable to get previous: %v", err))
		}
		prevUsage := evaluator.Usage(prevItem)
		deltaUsage = quota.Subtract(deltaUsage, prevUsage)
	}
	if quota.IsZero(deltaUsage) {
		return nil
	}

	// TODO: Move to a bucketing work queue
	// If we guaranteed that we processed the request in order it was received to server, we would reduce quota conflicts.
	// Until we have the bucketing work queue, we jitter requests and retry on conflict.
	numRetries := 10
	interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond

	// seed the retry loop with the initial set of quotas to process (should reduce each iteration)
	resourceQuotasToProcess := resourceQuotas
	for retry := 1; retry <= numRetries; retry++ {
		// the list of quotas we will try again if there is a version conflict
		tryAgain := []*api.ResourceQuota{}

		// check that we pass all remaining quotas so we do not prematurely charge
		// for each quota, mask the usage to the set of resources tracked by the quota
		// if request + used > hard, return an error describing the failure
		updatedUsage := map[string]api.ResourceList{}
		for _, resourceQuota := range resourceQuotasToProcess {
			hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
			requestedUsage := quota.Mask(deltaUsage, hardResources)
			newUsage := quota.Add(resourceQuota.Status.Used, requestedUsage)
			if allowed, exceeded := quota.LessThanOrEqual(newUsage, resourceQuota.Status.Hard); !allowed {
				failedRequestedUsage := quota.Mask(requestedUsage, exceeded)
				failedUsed := quota.Mask(resourceQuota.Status.Used, exceeded)
				failedHard := quota.Mask(resourceQuota.Status.Hard, exceeded)
				return admission.NewForbidden(a,
					fmt.Errorf("Exceeded quota: %s, requested: %s, used: %s, limited: %s",
						resourceQuota.Name,
						prettyPrint(failedRequestedUsage),
						prettyPrint(failedUsed),
						prettyPrint(failedHard)))
			}
			updatedUsage[resourceQuota.Name] = newUsage
		}

		// update the status for each quota with its new usage
		// if we get a conflict, get updated quota, and enqueue
		for i, resourceQuota := range resourceQuotasToProcess {
			newUsage := updatedUsage[resourceQuota.Name]
			quotaToUpdate := &api.ResourceQuota{
				ObjectMeta: api.ObjectMeta{
					Name:            resourceQuota.Name,
					Namespace:       resourceQuota.Namespace,
					ResourceVersion: resourceQuota.ResourceVersion,
				},
				Status: api.ResourceQuotaStatus{
					Hard: quota.Add(api.ResourceList{}, resourceQuota.Status.Hard),
					Used: newUsage,
				},
			}
			_, err = q.client.Core().ResourceQuotas(quotaToUpdate.Namespace).UpdateStatus(quotaToUpdate)
			if err != nil {
				if !errors.IsConflict(err) {
					return admission.NewForbidden(a, fmt.Errorf("Unable to update quota status: %s %v", resourceQuota.Name, err))
				}
				// if we get a conflict, we get the latest copy of the quota documents that were not yet modified so we retry all with latest state.
				for fetchIndex := i; fetchIndex < len(resourceQuotasToProcess); fetchIndex++ {
					latestQuota, err := q.client.Core().ResourceQuotas(namespace).Get(resourceQuotasToProcess[fetchIndex].Name)
					if err != nil {
						return admission.NewForbidden(a, fmt.Errorf("Unable to get quota: %s %v", resourceQuotasToProcess[fetchIndex].Name, err))
					}
					tryAgain = append(tryAgain, latestQuota)
				}
				break
			}
		}

		// all quotas were updated, so we can return
		if len(tryAgain) == 0 {
			return nil
		}

		// we have concurrent requests to update quota, so look to retry if needed
		// next iteration, we need to process the items that have to try again
		// pause the specified interval to encourage jitter
		if retry == numRetries {
			names := []string{}
			for _, quota := range tryAgain {
				names = append(names, quota.Name)
			}
			return admission.NewForbidden(a, fmt.Errorf("Unable to update status for quota: %s, ", strings.Join(names, ",")))
		}
		resourceQuotasToProcess = tryAgain
		time.Sleep(interval)
	}
	return nil
}