コード例 #1
0
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota v1.ResourceQuota) (err error) {
	// quota is dirty if any part of spec hard limits differs from the status hard limits
	dirty := !api.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard)

	resourceQuota := api.ResourceQuota{}
	if err := v1.Convert_v1_ResourceQuota_To_api_ResourceQuota(&v1ResourceQuota, &resourceQuota, nil); err != nil {
		return err
	}

	// dirty tracks if the usage status differs from the previous sync,
	// if so, we send a new usage with latest status
	// if this is our first sync, it will be dirty by default, since we need track usage
	dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil)

	used := api.ResourceList{}
	if resourceQuota.Status.Used != nil {
		used = quota.Add(api.ResourceList{}, resourceQuota.Status.Used)
	}
	hardLimits := quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard)

	newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry)
	if err != nil {
		return err
	}
	for key, value := range newUsage {
		used[key] = value
	}

	// ensure set of used values match those that have hard constraints
	hardResources := quota.ResourceNames(hardLimits)
	used = quota.Mask(used, hardResources)

	// Create a usage object that is based on the quota resource version that will handle updates
	// by default, we preserve the past usage observation, and set hard to the current spec
	usage := api.ResourceQuota{
		ObjectMeta: metav1.ObjectMeta{
			Name:            resourceQuota.Name,
			Namespace:       resourceQuota.Namespace,
			ResourceVersion: resourceQuota.ResourceVersion,
			Labels:          resourceQuota.Labels,
			Annotations:     resourceQuota.Annotations},
		Status: api.ResourceQuotaStatus{
			Hard: hardLimits,
			Used: used,
		},
	}

	dirty = dirty || !quota.Equals(usage.Status.Used, resourceQuota.Status.Used)

	// there was a change observed by this controller that requires we update quota
	if dirty {
		v1Usage := &v1.ResourceQuota{}
		if err := v1.Convert_api_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil {
			return err
		}
		_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage)
		return err
	}
	return nil
}
コード例 #2
0
ファイル: pods.go プロジェクト: Q-Lee/kubernetes
// PodUsageFunc knows how to measure usage associated with pods
func PodUsageFunc(object runtime.Object) api.ResourceList {
	pod, ok := object.(*api.Pod)
	if !ok {
		return api.ResourceList{}
	}

	// by convention, we do not quota pods that have reached an end-of-life state
	if !QuotaPod(pod) {
		return api.ResourceList{}
	}

	// TODO: fix this when we have pod level cgroups
	// when we have pod level cgroups, we can just read pod level requests/limits
	requests := api.ResourceList{}
	limits := api.ResourceList{}

	for i := range pod.Spec.Containers {
		requests = quota.Add(requests, pod.Spec.Containers[i].Resources.Requests)
		limits = quota.Add(limits, pod.Spec.Containers[i].Resources.Limits)
	}
	// InitContainers are run sequentially before other containers start, so the highest
	// init container resource is compared against the sum of app containers to determine
	// the effective usage for both requests and limits.
	for i := range pod.Spec.InitContainers {
		requests = quota.Max(requests, pod.Spec.InitContainers[i].Resources.Requests)
		limits = quota.Max(limits, pod.Spec.InitContainers[i].Resources.Limits)
	}

	return podUsageHelper(requests, limits)
}
コード例 #3
0
ファイル: evaluator.go プロジェクト: XbinZh/kubernetes
// UsageStats calculates latest observed usage stats for all objects
func (g *GenericEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
	// default each tracked resource to zero
	result := quota.UsageStats{Used: api.ResourceList{}}
	for _, resourceName := range g.MatchedResourceNames {
		result.Used[resourceName] = resource.MustParse("0")
	}
	list, err := g.ListFuncByNamespace(options.Namespace, api.ListOptions{})
	if err != nil {
		return result, fmt.Errorf("%s: Failed to list %v: %v", g.Name, g.GroupKind(), err)
	}
	_, err = meta.Accessor(list)
	if err != nil {
		return result, fmt.Errorf("%s: Unable to understand list result %#v", g.Name, list)
	}
	items, err := meta.ExtractList(list)
	if err != nil {
		return result, fmt.Errorf("%s: Unable to understand list result %#v (%v)", g.Name, list, err)
	}
	for _, item := range items {
		// need to verify that the item matches the set of scopes
		matchesScopes := true
		for _, scope := range options.Scopes {
			if !g.MatchesScope(scope, item) {
				matchesScopes = false
			}
		}
		// only count usage if there was a match
		if matchesScopes {
			result.Used = quota.Add(result.Used, g.Usage(item))
		}
	}
	return result, nil
}
コード例 #4
0
ファイル: evaluator.go プロジェクト: alex-mohr/kubernetes
// UsageStats calculates latest observed usage stats for all objects
func (g *GenericEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
	// default each tracked resource to zero
	result := quota.UsageStats{Used: api.ResourceList{}}
	for _, resourceName := range g.MatchedResourceNames {
		result.Used[resourceName] = resource.MustParse("0")
	}
	items, err := g.ListFuncByNamespace(options.Namespace, v1.ListOptions{
		LabelSelector: labels.Everything().String(),
	})
	if err != nil {
		return result, fmt.Errorf("%s: Failed to list %v: %v", g.Name, g.GroupKind(), err)
	}
	for _, item := range items {
		// need to verify that the item matches the set of scopes
		matchesScopes := true
		for _, scope := range options.Scopes {
			if !g.MatchesScope(scope, item) {
				matchesScopes = false
			}
		}
		// only count usage if there was a match
		if matchesScopes {
			result.Used = quota.Add(result.Used, g.Usage(item))
		}
	}
	return result, nil
}
コード例 #5
0
ファイル: pods.go プロジェクト: RomainVabre/origin
// PodUsageFunc knows how to measure usage associated with pods
func PodUsageFunc(object runtime.Object) api.ResourceList {
	pod, ok := object.(*api.Pod)
	if !ok {
		return api.ResourceList{}
	}

	// by convention, we do not quota pods that have reached an end-of-life state
	if !QuotaPod(pod) {
		return api.ResourceList{}
	}

	// TODO: fix this when we have pod level cgroups
	// when we have pod level cgroups, we can just read pod level requests/limits
	requests := api.ResourceList{}
	limits := api.ResourceList{}
	for i := range pod.Spec.Containers {
		requests = quota.Add(requests, pod.Spec.Containers[i].Resources.Requests)
		limits = quota.Add(limits, pod.Spec.Containers[i].Resources.Limits)
	}

	return podUsageHelper(requests, limits)
}
コード例 #6
0
ファイル: accessor.go プロジェクト: juanluisvaladas/origin
// UpdateQuotaStatus the newQuota coming in will be incremented from the original.  The difference between the original
// and the new is the amount to add to the namespace total, but the total status is the used value itself
func (e *clusterQuotaAccessor) UpdateQuotaStatus(newQuota *kapi.ResourceQuota) error {
	clusterQuota, err := e.clusterQuotaLister.Get(newQuota.Name)
	if err != nil {
		return err
	}
	clusterQuota = e.checkCache(clusterQuota)

	// make a copy
	obj, err := kapi.Scheme.Copy(clusterQuota)
	if err != nil {
		return err
	}
	// re-assign objectmeta
	clusterQuota = obj.(*quotaapi.ClusterResourceQuota)
	clusterQuota.ObjectMeta = newQuota.ObjectMeta
	clusterQuota.Namespace = ""

	// determine change in usage
	usageDiff := utilquota.Subtract(newQuota.Status.Used, clusterQuota.Status.Total.Used)

	// update aggregate usage
	clusterQuota.Status.Total.Used = newQuota.Status.Used

	// update per namespace totals
	oldNamespaceTotals, _ := clusterQuota.Status.Namespaces.Get(newQuota.Namespace)
	namespaceTotalCopy, err := kapi.Scheme.DeepCopy(oldNamespaceTotals)
	if err != nil {
		return err
	}
	newNamespaceTotals := namespaceTotalCopy.(kapi.ResourceQuotaStatus)
	newNamespaceTotals.Used = utilquota.Add(oldNamespaceTotals.Used, usageDiff)
	clusterQuota.Status.Namespaces.Insert(newQuota.Namespace, newNamespaceTotals)

	updatedQuota, err := e.clusterQuotaClient.ClusterResourceQuotas().UpdateStatus(clusterQuota)
	if err != nil {
		return err
	}

	e.updatedClusterQuotas.Add(clusterQuota.Name, updatedQuota)
	return nil
}
コード例 #7
0
ファイル: evaluator.go プロジェクト: kubernetes/kubernetes
// CalculateUsageStats is a utility function that knows how to calculate aggregate usage.
func CalculateUsageStats(options quota.UsageStatsOptions,
	listFunc ListFuncByNamespace,
	scopeFunc MatchesScopeFunc,
	usageFunc UsageFunc) (quota.UsageStats, error) {
	// default each tracked resource to zero
	result := quota.UsageStats{Used: api.ResourceList{}}
	for _, resourceName := range options.Resources {
		result.Used[resourceName] = resource.Quantity{Format: resource.DecimalSI}
	}
	items, err := listFunc(options.Namespace, v1.ListOptions{
		LabelSelector: labels.Everything().String(),
	})
	if err != nil {
		return result, fmt.Errorf("failed to list content: %v", err)
	}
	for _, item := range items {
		// need to verify that the item matches the set of scopes
		matchesScopes := true
		for _, scope := range options.Scopes {
			innerMatch, err := scopeFunc(scope, item)
			if err != nil {
				return result, nil
			}
			if !innerMatch {
				matchesScopes = false
			}
		}
		// only count usage if there was a match
		if matchesScopes {
			usage, err := usageFunc(item)
			if err != nil {
				return result, err
			}
			result.Used = quota.Add(result.Used, usage)
		}
	}
	return result, nil
}
コード例 #8
0
// admitBlobWrite checks whether the blob does not exceed image quota, if set. Returns
// ErrAccessDenied error if the quota is exceeded.
func admitBlobWrite(ctx context.Context, repo *repository) error {
	rqs, err := repo.quotaClient.ResourceQuotas(repo.namespace).List(kapi.ListOptions{})
	if err != nil {
		if kerrors.IsForbidden(err) {
			context.GetLogger(ctx).Warnf("Cannot list resourcequotas because of outdated cluster roles: %v", err)
			return nil
		}
		context.GetLogger(ctx).Errorf("Failed to list resourcequotas: %v", err)
		return err
	}

	usage := kapi.ResourceList{
		// we are about to tag a single image to an image stream
		imageapi.ResourceImages: *resource.NewQuantity(1, resource.DecimalSI),
	}
	resources := quota.ResourceNames(usage)

	for _, rq := range rqs.Items {
		newUsage := quota.Add(usage, rq.Status.Used)
		newUsage = quota.Mask(newUsage, resources)
		requested := quota.Mask(rq.Spec.Hard, resources)

		allowed, exceeded := quota.LessThanOrEqual(newUsage, requested)
		if !allowed {
			details := make([]string, len(exceeded))
			by := quota.Subtract(newUsage, requested)
			for i, r := range exceeded {
				details[i] = fmt.Sprintf("%s limited to %s by %s", r, requested[r], by[r])
			}
			context.GetLogger(ctx).Error("Refusing to write blob exceeding quota: " + strings.Join(details, ", "))
			return distribution.ErrAccessDenied
		}
	}

	return nil
}
コード例 #9
0
ファイル: controller.go プロジェクト: openshift/kubernetes
// checkRequest verifies that the request does not exceed any quota constraint. it returns back a copy of quotas not yet persisted
// that capture what the usage would be if the request succeeded.  It return an error if the is insufficient quota to satisfy the request
func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.Attributes) ([]api.ResourceQuota, error) {
	namespace := a.GetNamespace()
	evaluators := e.registry.Evaluators()
	evaluator, found := evaluators[a.GetKind().GroupKind()]
	if !found {
		return quotas, nil
	}

	op := a.GetOperation()
	operationResources := evaluator.OperationResources(op)
	if len(operationResources) == 0 {
		return quotas, nil
	}

	// find the set of quotas that are pertinent to this request
	// reject if we match the quota, but usage is not calculated yet
	// reject if the input object does not satisfy quota constraints
	// if there are no pertinent quotas, we can just return
	inputObject := a.GetObject()
	interestingQuotaIndexes := []int{}
	for i := range quotas {
		resourceQuota := quotas[i]
		match := evaluator.Matches(&resourceQuota, inputObject)
		if !match {
			continue
		}

		hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
		evaluatorResources := evaluator.MatchesResources()
		requiredResources := quota.Intersection(hardResources, evaluatorResources)
		err := evaluator.Constraints(requiredResources, inputObject)
		if err != nil {
			return nil, admission.NewForbidden(a, fmt.Errorf("Failed quota: %s: %v", resourceQuota.Name, err))
		}
		if !hasUsageStats(&resourceQuota) {
			return nil, admission.NewForbidden(a, fmt.Errorf("Status unknown for quota: %s", resourceQuota.Name))
		}

		interestingQuotaIndexes = append(interestingQuotaIndexes, i)
	}
	if len(interestingQuotaIndexes) == 0 {
		return quotas, nil
	}

	// Usage of some resources cannot be counted in isolation. For example when
	// the resource represents a number of unique references to external
	// resource. In such a case an evaluator needs to process other objects in
	// the same namespace which needs to be known.
	if accessor, err := meta.Accessor(inputObject); namespace != "" && err == nil {
		if accessor.GetNamespace() == "" {
			accessor.SetNamespace(namespace)
		}
	}

	// there is at least one quota that definitely matches our object
	// as a result, we need to measure the usage of this object for quota
	// on updates, we need to subtract the previous measured usage
	// if usage shows no change, just return since it has no impact on quota
	deltaUsage := evaluator.Usage(inputObject)
	if admission.Update == op {
		prevItem := a.GetOldObject()
		if prevItem == nil {
			return nil, admission.NewForbidden(a, fmt.Errorf("Unable to get previous usage since prior version of object was not found"))
		}

		// if we can definitively determine that this is not a case of "create on update",
		// then charge based on the delta.  Otherwise, bill the maximum
		metadata, err := meta.Accessor(prevItem)
		if err == nil && len(metadata.GetResourceVersion()) > 0 {
			prevUsage := evaluator.Usage(prevItem)
			deltaUsage = quota.Subtract(deltaUsage, prevUsage)
		}
	}
	if quota.IsZero(deltaUsage) {
		return quotas, nil
	}

	for _, index := range interestingQuotaIndexes {
		resourceQuota := quotas[index]

		hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
		requestedUsage := quota.Mask(deltaUsage, hardResources)
		newUsage := quota.Add(resourceQuota.Status.Used, requestedUsage)
		maskedNewUsage := quota.Mask(newUsage, quota.ResourceNames(requestedUsage))

		if allowed, exceeded := quota.LessThanOrEqual(maskedNewUsage, resourceQuota.Status.Hard); !allowed {
			failedRequestedUsage := quota.Mask(requestedUsage, exceeded)
			failedUsed := quota.Mask(resourceQuota.Status.Used, exceeded)
			failedHard := quota.Mask(resourceQuota.Status.Hard, exceeded)
			return nil, admission.NewForbidden(a,
				fmt.Errorf("Exceeded quota: %s, requested: %s, used: %s, limited: %s",
					resourceQuota.Name,
					prettyPrint(failedRequestedUsage),
					prettyPrint(failedUsed),
					prettyPrint(failedHard)))
		}

		// update to the new usage number
		quotas[index].Status.Used = newUsage
	}

	return quotas, nil
}
コード例 #10
0
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota api.ResourceQuota) (err error) {
	// quota is dirty if any part of spec hard limits differs from the status hard limits
	dirty := !api.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)

	// dirty tracks if the usage status differs from the previous sync,
	// if so, we send a new usage with latest status
	// if this is our first sync, it will be dirty by default, since we need track usage
	dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil)

	// Create a usage object that is based on the quota resource version that will handle updates
	// by default, we preserve the past usage observation, and set hard to the current spec
	previousUsed := api.ResourceList{}
	if resourceQuota.Status.Used != nil {
		previousUsed = quota.Add(api.ResourceList{}, resourceQuota.Status.Used)
	}
	usage := api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{
			Name:            resourceQuota.Name,
			Namespace:       resourceQuota.Namespace,
			ResourceVersion: resourceQuota.ResourceVersion,
			Labels:          resourceQuota.Labels,
			Annotations:     resourceQuota.Annotations},
		Status: api.ResourceQuotaStatus{
			Hard: quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard),
			Used: previousUsed,
		},
	}

	// find the intersection between the hard resources on the quota
	// and the resources this controller can track to know what we can
	// look to measure updated usage stats for
	hardResources := quota.ResourceNames(usage.Status.Hard)
	potentialResources := []api.ResourceName{}
	evaluators := rq.registry.Evaluators()
	for _, evaluator := range evaluators {
		potentialResources = append(potentialResources, evaluator.MatchesResources()...)
	}
	matchedResources := quota.Intersection(hardResources, potentialResources)

	// sum the observed usage from each evaluator
	newUsage := api.ResourceList{}
	usageStatsOptions := quota.UsageStatsOptions{Namespace: resourceQuota.Namespace, Scopes: resourceQuota.Spec.Scopes}
	for _, evaluator := range evaluators {
		stats, err := evaluator.UsageStats(usageStatsOptions)
		if err != nil {
			return err
		}
		newUsage = quota.Add(newUsage, stats.Used)
	}

	// mask the observed usage to only the set of resources tracked by this quota
	// merge our observed usage with the quota usage status
	// if the new usage is different than the last usage, we will need to do an update
	newUsage = quota.Mask(newUsage, matchedResources)
	for key, value := range newUsage {
		usage.Status.Used[key] = value
	}

	dirty = dirty || !quota.Equals(usage.Status.Used, resourceQuota.Status.Used)

	// there was a change observed by this controller that requires we update quota
	if dirty {
		_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
		return err
	}
	return nil
}
コード例 #11
0
// syncResourceQuotaFromKey syncs a quota key
func (c *ClusterQuotaReconcilationController) syncQuotaForNamespaces(originalQuota *quotaapi.ClusterResourceQuota, workItems []workItem) (error, []workItem /* to retry */) {
	obj, err := kapi.Scheme.Copy(originalQuota)
	if err != nil {
		return err, workItems
	}
	quota := obj.(*quotaapi.ClusterResourceQuota)

	// get the list of namespaces that match this cluster quota
	matchingNamespaceNamesList, quotaSelector := c.clusterQuotaMapper.GetNamespacesFor(quota.Name)
	if !kapi.Semantic.DeepEqual(quotaSelector, quota.Spec.Selector) {
		return fmt.Errorf("mapping not up to date, have=%v need=%v", quotaSelector, quota.Spec.Selector), workItems
	}
	matchingNamespaceNames := sets.NewString(matchingNamespaceNamesList...)

	reconcilationErrors := []error{}
	retryItems := []workItem{}
	for _, item := range workItems {
		namespaceName := item.namespaceName
		namespaceTotals, namespaceLoaded := quota.Status.Namespaces.Get(namespaceName)
		if !matchingNamespaceNames.Has(namespaceName) {
			if namespaceLoaded {
				// remove this item from all totals
				quota.Status.Total.Used = utilquota.Subtract(quota.Status.Total.Used, namespaceTotals.Used)
				quota.Status.Namespaces.Remove(namespaceName)
			}
			continue
		}

		// if there's no work for us to do, do nothing
		if !item.forceRecalculation && namespaceLoaded && kapi.Semantic.DeepEqual(namespaceTotals.Hard, quota.Spec.Quota.Hard) {
			continue
		}

		actualUsage, err := quotaUsageCalculationFunc(namespaceName, quota.Spec.Quota.Scopes, quota.Spec.Quota.Hard, c.registry)
		if err != nil {
			// tally up errors, but calculate everything you can
			reconcilationErrors = append(reconcilationErrors, err)
			retryItems = append(retryItems, item)
			continue
		}
		recalculatedStatus := kapi.ResourceQuotaStatus{
			Used: actualUsage,
			Hard: quota.Spec.Quota.Hard,
		}

		// subtract old usage, add new usage
		quota.Status.Total.Used = utilquota.Subtract(quota.Status.Total.Used, namespaceTotals.Used)
		quota.Status.Total.Used = utilquota.Add(quota.Status.Total.Used, recalculatedStatus.Used)
		quota.Status.Namespaces.Insert(namespaceName, recalculatedStatus)
	}

	quota.Status.Total.Hard = quota.Spec.Quota.Hard

	// if there's no change, no update, return early.  NewAggregate returns nil on empty input
	if kapi.Semantic.DeepEqual(quota, originalQuota) {
		return kutilerrors.NewAggregate(reconcilationErrors), retryItems
	}

	if _, err := c.clusterQuotaClient.ClusterResourceQuotas().UpdateStatus(quota); err != nil {
		return kutilerrors.NewAggregate(append(reconcilationErrors, err)), workItems
	}

	return kutilerrors.NewAggregate(reconcilationErrors), retryItems
}
コード例 #12
0
ファイル: admission.go プロジェクト: mataihang/kubernetes
// Admit makes admission decisions while enforcing quota
func (q *quotaAdmission) Admit(a admission.Attributes) (err error) {
	// ignore all operations that correspond to sub-resource actions
	if a.GetSubresource() != "" {
		return nil
	}

	// if we do not know how to evaluate use for this kind, just ignore
	evaluators := q.registry.Evaluators()
	evaluator, found := evaluators[a.GetKind()]
	if !found {
		return nil
	}

	// for this kind, check if the operation could mutate any quota resources
	// if no resources tracked by quota are impacted, then just return
	op := a.GetOperation()
	operationResources := evaluator.OperationResources(op)
	if len(operationResources) == 0 {
		return nil
	}

	// determine if there are any quotas in this namespace
	// if there are no quotas, we don't need to do anything
	namespace, name := a.GetNamespace(), a.GetName()
	items, err := q.indexer.Index("namespace", &api.ResourceQuota{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: ""}})
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Error resolving quota."))
	}
	// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
	if len(items) == 0 {
		lruItemObj, ok := q.liveLookupCache.Get(a.GetNamespace())
		if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
			// TODO: If there are multiple operations at the same time and cache has just expired,
			// this may cause multiple List operations being issued at the same time.
			// If there is already in-flight List() for a given namespace, we should wait until
			// it is finished and cache is updated instead of doing the same, also to avoid
			// throttling - see #22422 for details.
			liveList, err := q.client.Core().ResourceQuotas(namespace).List(api.ListOptions{})
			if err != nil {
				return admission.NewForbidden(a, err)
			}
			newEntry := liveLookupEntry{expiry: time.Now().Add(q.liveTTL)}
			for i := range liveList.Items {
				newEntry.items = append(newEntry.items, &liveList.Items[i])
			}
			q.liveLookupCache.Add(a.GetNamespace(), newEntry)
			lruItemObj = newEntry
		}
		lruEntry := lruItemObj.(liveLookupEntry)
		for i := range lruEntry.items {
			items = append(items, lruEntry.items[i])
		}
	}
	// if there are still no items, we can return
	if len(items) == 0 {
		return nil
	}

	// find the set of quotas that are pertinent to this request
	// reject if we match the quota, but usage is not calculated yet
	// reject if the input object does not satisfy quota constraints
	// if there are no pertinent quotas, we can just return
	inputObject := a.GetObject()
	resourceQuotas := []*api.ResourceQuota{}
	for i := range items {
		resourceQuota := items[i].(*api.ResourceQuota)
		match := evaluator.Matches(resourceQuota, inputObject)
		if !match {
			continue
		}
		hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
		evaluatorResources := evaluator.MatchesResources()
		requiredResources := quota.Intersection(hardResources, evaluatorResources)
		err := evaluator.Constraints(requiredResources, inputObject)
		if err != nil {
			return admission.NewForbidden(a, fmt.Errorf("Failed quota: %s: %v", resourceQuota.Name, err))
		}
		if !hasUsageStats(resourceQuota) {
			return admission.NewForbidden(a, fmt.Errorf("Status unknown for quota: %s", resourceQuota.Name))
		}
		resourceQuotas = append(resourceQuotas, resourceQuota)
	}
	if len(resourceQuotas) == 0 {
		return nil
	}

	// there is at least one quota that definitely matches our object
	// as a result, we need to measure the usage of this object for quota
	// on updates, we need to subtract the previous measured usage
	// if usage shows no change, just return since it has no impact on quota
	deltaUsage := evaluator.Usage(inputObject)
	if admission.Update == op {
		prevItem, err := evaluator.Get(namespace, name)
		if err != nil {
			return admission.NewForbidden(a, fmt.Errorf("Unable to get previous: %v", err))
		}
		prevUsage := evaluator.Usage(prevItem)
		deltaUsage = quota.Subtract(deltaUsage, prevUsage)
	}
	if quota.IsZero(deltaUsage) {
		return nil
	}

	// TODO: Move to a bucketing work queue
	// If we guaranteed that we processed the request in order it was received to server, we would reduce quota conflicts.
	// Until we have the bucketing work queue, we jitter requests and retry on conflict.
	numRetries := 10
	interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond

	// seed the retry loop with the initial set of quotas to process (should reduce each iteration)
	resourceQuotasToProcess := resourceQuotas
	for retry := 1; retry <= numRetries; retry++ {
		// the list of quotas we will try again if there is a version conflict
		tryAgain := []*api.ResourceQuota{}

		// check that we pass all remaining quotas so we do not prematurely charge
		// for each quota, mask the usage to the set of resources tracked by the quota
		// if request + used > hard, return an error describing the failure
		updatedUsage := map[string]api.ResourceList{}
		for _, resourceQuota := range resourceQuotasToProcess {
			hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
			requestedUsage := quota.Mask(deltaUsage, hardResources)
			newUsage := quota.Add(resourceQuota.Status.Used, requestedUsage)
			if allowed, exceeded := quota.LessThanOrEqual(newUsage, resourceQuota.Status.Hard); !allowed {
				failedRequestedUsage := quota.Mask(requestedUsage, exceeded)
				failedUsed := quota.Mask(resourceQuota.Status.Used, exceeded)
				failedHard := quota.Mask(resourceQuota.Status.Hard, exceeded)
				return admission.NewForbidden(a,
					fmt.Errorf("Exceeded quota: %s, requested: %s, used: %s, limited: %s",
						resourceQuota.Name,
						prettyPrint(failedRequestedUsage),
						prettyPrint(failedUsed),
						prettyPrint(failedHard)))
			}
			updatedUsage[resourceQuota.Name] = newUsage
		}

		// update the status for each quota with its new usage
		// if we get a conflict, get updated quota, and enqueue
		for i, resourceQuota := range resourceQuotasToProcess {
			newUsage := updatedUsage[resourceQuota.Name]
			quotaToUpdate := &api.ResourceQuota{
				ObjectMeta: api.ObjectMeta{
					Name:            resourceQuota.Name,
					Namespace:       resourceQuota.Namespace,
					ResourceVersion: resourceQuota.ResourceVersion,
				},
				Status: api.ResourceQuotaStatus{
					Hard: quota.Add(api.ResourceList{}, resourceQuota.Status.Hard),
					Used: newUsage,
				},
			}
			_, err = q.client.Core().ResourceQuotas(quotaToUpdate.Namespace).UpdateStatus(quotaToUpdate)
			if err != nil {
				if !errors.IsConflict(err) {
					return admission.NewForbidden(a, fmt.Errorf("Unable to update quota status: %s %v", resourceQuota.Name, err))
				}
				// if we get a conflict, we get the latest copy of the quota documents that were not yet modified so we retry all with latest state.
				for fetchIndex := i; fetchIndex < len(resourceQuotasToProcess); fetchIndex++ {
					latestQuota, err := q.client.Core().ResourceQuotas(namespace).Get(resourceQuotasToProcess[fetchIndex].Name)
					if err != nil {
						return admission.NewForbidden(a, fmt.Errorf("Unable to get quota: %s %v", resourceQuotasToProcess[fetchIndex].Name, err))
					}
					tryAgain = append(tryAgain, latestQuota)
				}
				break
			}
		}

		// all quotas were updated, so we can return
		if len(tryAgain) == 0 {
			return nil
		}

		// we have concurrent requests to update quota, so look to retry if needed
		// next iteration, we need to process the items that have to try again
		// pause the specified interval to encourage jitter
		if retry == numRetries {
			names := []string{}
			for _, quota := range tryAgain {
				names = append(names, quota.Name)
			}
			return admission.NewForbidden(a, fmt.Errorf("Unable to update status for quota: %s, ", strings.Join(names, ",")))
		}
		resourceQuotasToProcess = tryAgain
		time.Sleep(interval)
	}
	return nil
}