Ejemplo n.º 1
0
func (d *sccExecRestrictions) Admit(a admission.Attributes) (err error) {
	if a.GetOperation() != admission.Connect {
		return nil
	}
	if a.GetResource() != kapi.Resource("pods") {
		return nil
	}
	if a.GetSubresource() != "attach" && a.GetSubresource() != "exec" {
		return nil
	}

	pod, err := d.client.Pods(a.GetNamespace()).Get(a.GetName())
	if err != nil {
		return admission.NewForbidden(a, err)
	}

	// create a synthentic admission attribute to check SCC admission status for this pod
	// clear the SA name, so that any permissions MUST be based on your user's power, not the SAs power.
	pod.Spec.ServiceAccountName = ""
	createAttributes := admission.NewAttributesRecord(pod, kapi.Kind("Pod"), a.GetNamespace(), a.GetName(), a.GetResource(), a.GetSubresource(), admission.Create, a.GetUserInfo())
	if err := d.constraintAdmission.Admit(createAttributes); err != nil {
		return admission.NewForbidden(a, err)
	}

	return nil
}
Ejemplo n.º 2
0
func (d *sccExecRestrictions) Admit(a admission.Attributes) (err error) {
	if a.GetOperation() != admission.Connect {
		return nil
	}
	if a.GetResource().GroupResource() != kapi.Resource("pods") {
		return nil
	}
	if a.GetSubresource() != "attach" && a.GetSubresource() != "exec" {
		return nil
	}

	pod, err := d.client.Core().Pods(a.GetNamespace()).Get(a.GetName())
	if err != nil {
		return admission.NewForbidden(a, err)
	}

	// TODO, if we want to actually limit who can use which service account, then we'll need to add logic here to make sure that
	// we're allowed to use the SA the pod is using.  Otherwise, user-A creates pod and user-B (who can't use the SA) can exec into it.
	createAttributes := admission.NewAttributesRecord(pod, pod, kapi.Kind("Pod").WithVersion(""), a.GetNamespace(), a.GetName(), a.GetResource(), "", admission.Create, a.GetUserInfo())
	if err := d.constraintAdmission.Admit(createAttributes); err != nil {
		return admission.NewForbidden(a, err)
	}

	return nil
}
Ejemplo n.º 3
0
func (e *quotaEvaluator) Evaluate(a admission.Attributes) error {
	e.init.Do(func() {
		go e.run()
	})

	// if we do not know how to evaluate use for this kind, just ignore
	evaluators := e.registry.Evaluators()
	evaluator, found := evaluators[a.GetKind().GroupKind()]
	if !found {
		return nil
	}
	// for this kind, check if the operation could mutate any quota resources
	// if no resources tracked by quota are impacted, then just return
	op := a.GetOperation()
	operationResources := evaluator.OperationResources(op)
	if len(operationResources) == 0 {
		return nil
	}

	waiter := newAdmissionWaiter(a)

	e.addWork(waiter)

	// wait for completion or timeout
	select {
	case <-waiter.finished:
	case <-time.After(10 * time.Second):
		return fmt.Errorf("timeout")
	}

	return waiter.result
}
Ejemplo n.º 4
0
// Admit determines if the service should be admitted based on the configured network CIDR.
func (r *externalIPRanger) Admit(a kadmission.Attributes) error {
	if a.GetResource().GroupResource() != kapi.Resource("services") {
		return nil
	}

	svc, ok := a.GetObject().(*kapi.Service)
	// if we can't convert then we don't handle this object so just return
	if !ok {
		return nil
	}

	// Determine if an ingress ip address should be allowed as an
	// external ip by checking the loadbalancer status of the previous
	// object state. Only updates need to be validated against the
	// ingress ip since the loadbalancer status cannot be set on
	// create.
	ingressIP := ""
	retrieveIngressIP := a.GetOperation() == kadmission.Update &&
		r.allowIngressIP && svc.Spec.Type == kapi.ServiceTypeLoadBalancer
	if retrieveIngressIP {
		old, ok := a.GetOldObject().(*kapi.Service)
		ipPresent := ok && old != nil && len(old.Status.LoadBalancer.Ingress) > 0
		if ipPresent {
			ingressIP = old.Status.LoadBalancer.Ingress[0].IP
		}
	}

	var errs field.ErrorList
	switch {
	// administrator disabled externalIPs
	case len(svc.Spec.ExternalIPs) > 0 && len(r.admit) == 0:
		onlyIngressIP := len(svc.Spec.ExternalIPs) == 1 && svc.Spec.ExternalIPs[0] == ingressIP
		if !onlyIngressIP {
			errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs"), "externalIPs have been disabled"))
		}
	// administrator has limited the range
	case len(svc.Spec.ExternalIPs) > 0 && len(r.admit) > 0:
		for i, s := range svc.Spec.ExternalIPs {
			ip := net.ParseIP(s)
			if ip == nil {
				errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs").Index(i), "externalIPs must be a valid address"))
				continue
			}
			notIngressIP := s != ingressIP
			if (NetworkSlice(r.reject).Contains(ip) || !NetworkSlice(r.admit).Contains(ip)) && notIngressIP {
				errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs").Index(i), "externalIP is not allowed"))
				continue
			}
		}
	}
	if len(errs) > 0 {
		return apierrs.NewInvalid(a.GetKind().GroupKind(), a.GetName(), errs)
	}
	return nil
}
Ejemplo n.º 5
0
func (l *lifecycle) Admit(a admission.Attributes) (err error) {

	// prevent deletion of immortal namespaces
	if a.GetOperation() == admission.Delete && a.GetKind() == "Namespace" && l.immortalNamespaces.Has(a.GetName()) {
		return errors.NewForbidden(a.GetKind(), a.GetName(), fmt.Errorf("this namespace may not be deleted"))
	}

	gvk, err := api.RESTMapper.KindFor(a.GetResource())
	if err != nil {
		return errors.NewInternalError(err)
	}
	mapping, err := api.RESTMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
	if err != nil {
		return errors.NewInternalError(err)
	}
	if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
		return nil
	}
	namespaceObj, exists, err := l.store.Get(&api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
	})
	if err != nil {
		return errors.NewInternalError(err)
	}

	// refuse to operate on non-existent namespaces
	if !exists {
		// in case of latency in our caches, make a call direct to storage to verify that it truly exists or not
		namespaceObj, err = l.client.Namespaces().Get(a.GetNamespace())
		if err != nil {
			if errors.IsNotFound(err) {
				return err
			}
			return errors.NewInternalError(err)
		}
	}

	// ensure that we're not trying to create objects in terminating namespaces
	if a.GetOperation() == admission.Create {
		namespace := namespaceObj.(*api.Namespace)
		if namespace.Status.Phase != api.NamespaceTerminating {
			return nil
		}

		// TODO: This should probably not be a 403
		return admission.NewForbidden(a, fmt.Errorf("Unable to create new content in namespace %s because it is being terminated.", a.GetNamespace()))
	}

	return nil
}
Ejemplo n.º 6
0
func (l *lifecycle) Admit(a admission.Attributes) (err error) {
	// prevent deletion of immortal namespaces
	if a.GetOperation() == admission.Delete && a.GetKind() == api.Kind("Namespace") && l.immortalNamespaces.Has(a.GetName()) {
		return errors.NewForbidden(a.GetResource(), a.GetName(), fmt.Errorf("this namespace may not be deleted"))
	}

	// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
	// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
	// its a namespaced resource.
	if len(a.GetNamespace()) == 0 || a.GetKind() == api.Kind("Namespace") {
		return nil
	}

	namespaceObj, exists, err := l.store.Get(&api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
	})
	if err != nil {
		return errors.NewInternalError(err)
	}

	// refuse to operate on non-existent namespaces
	if !exists {
		// in case of latency in our caches, make a call direct to storage to verify that it truly exists or not
		namespaceObj, err = l.client.Legacy().Namespaces().Get(a.GetNamespace())
		if err != nil {
			if errors.IsNotFound(err) {
				return err
			}
			return errors.NewInternalError(err)
		}
	}

	// ensure that we're not trying to create objects in terminating namespaces
	if a.GetOperation() == admission.Create {
		namespace := namespaceObj.(*api.Namespace)
		if namespace.Status.Phase != api.NamespaceTerminating {
			return nil
		}

		// TODO: This should probably not be a 403
		return admission.NewForbidden(a, fmt.Errorf("Unable to create new content in namespace %s because it is being terminated.", a.GetNamespace()))
	}

	return nil
}
Ejemplo n.º 7
0
// Admit enforces that pod and its project node label selectors matches at least a node in the cluster.
func (p *podNodeEnvironment) Admit(a admission.Attributes) (err error) {
	// ignore anything except create or update of pods
	if !(a.GetOperation() == admission.Create || a.GetOperation() == admission.Update) {
		return nil
	}
	resource := a.GetResource()
	if resource != "pods" {
		return nil
	}

	obj := a.GetObject()
	pod, ok := obj.(*kapi.Pod)
	if !ok {
		return nil
	}

	name := pod.Name

	projects, err := projectcache.GetProjectCache()
	if err != nil {
		return err
	}
	namespace, err := projects.GetNamespaceObject(a.GetNamespace())
	if err != nil {
		return apierrors.NewForbidden(resource, name, err)
	}
	projectNodeSelector, err := projects.GetNodeSelectorMap(namespace)
	if err != nil {
		return err
	}

	if labelselector.Conflicts(projectNodeSelector, pod.Spec.NodeSelector) {
		return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its project node label selector"))
	}

	// modify pod node selector = project node selector + current pod node selector
	pod.Spec.NodeSelector = labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector)

	return nil
}
Ejemplo n.º 8
0
func (l *lifecycle) Admit(a admission.Attributes) (err error) {

	// prevent deletion of immortal namespaces
	if a.GetOperation() == admission.Delete {
		if a.GetKind() == "Namespace" && l.immortalNamespaces.Has(a.GetName()) {
			return errors.NewForbidden(a.GetKind(), a.GetName(), fmt.Errorf("namespace can never be deleted"))
		}
		return nil
	}

	defaultVersion, kind, err := api.RESTMapper.VersionAndKindForResource(a.GetResource())
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	mapping, err := api.RESTMapper.RESTMapping(kind, defaultVersion)
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
		return nil
	}
	namespaceObj, exists, err := l.store.Get(&api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
	})
	if err != nil {
		return admission.NewForbidden(a, err)
	}
	if !exists {
		return nil
	}
	namespace := namespaceObj.(*api.Namespace)
	if namespace.Status.Phase != api.NamespaceTerminating {
		return nil
	}

	return admission.NewForbidden(a, fmt.Errorf("Unable to create new content in namespace %s because it is being terminated.", a.GetNamespace()))
}
Ejemplo n.º 9
0
// Admit attempts to apply the image policy to the incoming resource.
func (a *imagePolicyPlugin) Admit(attr admission.Attributes) error {
	switch attr.GetOperation() {
	case admission.Create, admission.Update:
		if len(attr.GetSubresource()) > 0 {
			return nil
		}
		// only create and update are tested, and only on core resources
		// TODO: scan all resources
		// TODO: Create a general equivalence map for admission - operation X on subresource Y is equivalent to reduced operation
	default:
		return nil
	}

	gr := attr.GetResource().GroupResource()
	if !a.accepter.Covers(gr) {
		return nil
	}

	m, err := meta.GetImageReferenceMutator(attr.GetObject())
	if err != nil {
		return apierrs.NewForbidden(gr, attr.GetName(), fmt.Errorf("unable to apply image policy against objects of type %T: %v", attr.GetObject(), err))
	}

	// load exclusion rules from the namespace cache
	var excluded sets.String
	if ns := attr.GetNamespace(); len(ns) > 0 {
		if ns, err := a.projectCache.GetNamespace(ns); err == nil {
			if value := ns.Annotations[api.IgnorePolicyRulesAnnotation]; len(value) > 0 {
				excluded = sets.NewString(strings.Split(value, ",")...)
			}
		}
	}

	if err := accept(a.accepter, a.resolver, m, attr, excluded); err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 10
0
// Admit makes admission decisions while enforcing quota
func (q *quotaAdmission) Admit(a admission.Attributes) (err error) {
	// ignore all operations that correspond to sub-resource actions
	if a.GetSubresource() != "" {
		return nil
	}

	// if we do not know how to evaluate use for this kind, just ignore
	evaluators := q.evaluator.registry.Evaluators()
	evaluator, found := evaluators[a.GetKind().GroupKind()]
	if !found {
		return nil
	}

	// for this kind, check if the operation could mutate any quota resources
	// if no resources tracked by quota are impacted, then just return
	op := a.GetOperation()
	operationResources := evaluator.OperationResources(op)
	if len(operationResources) == 0 {
		return nil
	}

	return q.evaluator.evaluate(a)
}
Ejemplo n.º 11
0
func (o *podNodeConstraints) Admit(attr admission.Attributes) error {
	switch {
	case o.config == nil,
		attr.GetSubresource() != "":
		return nil
	}
	shouldCheck, err := shouldCheckResource(attr.GetResource().GroupResource(), attr.GetKind().GroupKind())
	if err != nil {
		return err
	}
	if !shouldCheck {
		return nil
	}
	// Only check Create operation on pods
	if attr.GetResource().GroupResource() == kapi.Resource("pods") && attr.GetOperation() != admission.Create {
		return nil
	}
	ps, err := o.getPodSpec(attr)
	if err == nil {
		return o.admitPodSpec(attr, ps)
	}
	return err
}
Ejemplo n.º 12
0
// Admit admits resources into cluster that do not violate any defined LimitRange in the namespace
func (l *limitRanger) Admit(a admission.Attributes) (err error) {

	// Ignore all calls to subresources
	if a.GetSubresource() != "" {
		return nil
	}

	obj := a.GetObject()
	resource := a.GetResource()
	name := "Unknown"
	if obj != nil {
		name, _ = meta.NewAccessor().Name(obj)
		if len(name) == 0 {
			name, _ = meta.NewAccessor().GenerateName(obj)
		}
	}

	key := &api.LimitRange{
		ObjectMeta: api.ObjectMeta{
			Namespace: a.GetNamespace(),
			Name:      "",
		},
	}
	items, err := l.indexer.Index("namespace", key)
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Unable to %s %s at this time because there was an error enforcing limit ranges", a.GetOperation(), resource))
	}
	if len(items) == 0 {
		return nil
	}

	// ensure it meets each prescribed min/max
	for i := range items {
		limitRange := items[i].(*api.LimitRange)
		err = l.limitFunc(limitRange, a.GetResource(), a.GetObject())
		if err != nil {
			return admission.NewForbidden(a, err)
		}
	}
	return nil
}
Ejemplo n.º 13
0
func (q *quota) Admit(a admission.Attributes) (err error) {
	if a.GetSubresource() != "" {
		return nil
	}

	if a.GetOperation() == "DELETE" {
		return nil
	}

	key := &api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{
			Namespace: a.GetNamespace(),
			Name:      "",
		},
	}

	// concurrent operations that modify quota tracked resources can cause a conflict when incrementing usage
	// as a result, we will attempt to increment quota usage per request up to numRetries limit
	// we fuzz each retry with an interval period to attempt to improve end-user experience during concurrent operations
	numRetries := 10
	interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond

	items, err := q.indexer.Index("namespace", key)
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("unable to %s %s at this time because there was an error enforcing quota", a.GetOperation(), a.GetResource()))
	}
	if len(items) == 0 {
		return nil
	}

	for i := range items {

		quota := items[i].(*api.ResourceQuota)

		for retry := 1; retry <= numRetries; retry++ {

			// we cannot modify the value directly in the cache, so we copy
			status := &api.ResourceQuotaStatus{
				Hard: api.ResourceList{},
				Used: api.ResourceList{},
			}
			for k, v := range quota.Status.Hard {
				status.Hard[k] = *v.Copy()
			}
			for k, v := range quota.Status.Used {
				status.Used[k] = *v.Copy()
			}

			dirty, err := IncrementUsage(a, status, q.client)
			if err != nil {
				return admission.NewForbidden(a, err)
			}

			if dirty {
				// construct a usage record
				usage := api.ResourceQuota{
					ObjectMeta: api.ObjectMeta{
						Name:            quota.Name,
						Namespace:       quota.Namespace,
						ResourceVersion: quota.ResourceVersion,
						Labels:          quota.Labels,
						Annotations:     quota.Annotations},
				}
				usage.Status = *status
				_, err = q.client.ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
				if err == nil {
					break
				}

				// we have concurrent requests to update quota, so look to retry if needed
				if retry == numRetries {
					return admission.NewForbidden(a, fmt.Errorf("unable to %s %s at this time because there are too many concurrent requests to increment quota", a.GetOperation(), a.GetResource()))
				}
				time.Sleep(interval)
				// manually get the latest quota
				quota, err = q.client.ResourceQuotas(usage.Namespace).Get(quota.Name)
				if err != nil {
					return admission.NewForbidden(a, err)
				}
			}
		}
	}
	return nil
}
Ejemplo n.º 14
0
// IncrementUsage updates the supplied ResourceQuotaStatus object based on the incoming operation
// Return true if the usage must be recorded prior to admitting the new resource
// Return an error if the operation should not pass admission control
func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, client client.Interface) (bool, error) {
	var errs []error
	dirty := true
	set := map[api.ResourceName]bool{}
	for k := range status.Hard {
		set[k] = true
	}
	obj := a.GetObject()
	// handle max counts for each kind of resource (pods, services, replicationControllers, etc.)
	if a.GetOperation() == admission.Create {
		resourceName := resourceToResourceName[a.GetResource()]
		hard, hardFound := status.Hard[resourceName]
		if hardFound {
			used, usedFound := status.Used[resourceName]
			if !usedFound {
				return false, fmt.Errorf("quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
			}
			if used.Value() >= hard.Value() {
				errs = append(errs, fmt.Errorf("limited to %s %s", hard.String(), resourceName))
				dirty = false
			} else {
				status.Used[resourceName] = *resource.NewQuantity(used.Value()+int64(1), resource.DecimalSI)
			}
		}
	}

	if a.GetResource() == "pods" {
		for _, resourceName := range []api.ResourceName{api.ResourceMemory, api.ResourceCPU} {

			// ignore tracking the resource if it's not in the quota document
			if !set[resourceName] {
				continue
			}

			hard, hardFound := status.Hard[resourceName]
			if !hardFound {
				continue
			}

			// if we do not yet know how much of the current resource is used, we cannot accept any request
			used, usedFound := status.Used[resourceName]
			if !usedFound {
				return false, fmt.Errorf("unable to admit pod until quota usage stats are calculated.")
			}

			// the amount of resource being requested, or an error if it does not make a request that is tracked
			pod := obj.(*api.Pod)
			delta, err := resourcequotacontroller.PodRequests(pod, resourceName)

			if err != nil {
				return false, fmt.Errorf("must make a non-zero request for %s since it is tracked by quota.", resourceName)
			}

			// if this operation is an update, we need to find the delta usage from the previous state
			if a.GetOperation() == admission.Update {
				oldPod, err := client.Pods(a.GetNamespace()).Get(pod.Name)
				if err != nil {
					return false, err
				}

				// if the previous version of the resource made a resource request, we need to subtract the old request
				// from the current to get the actual resource request delta.  if the previous version of the pod
				// made no request on the resource, then we get an err value.  we ignore the err value, and delta
				// will just be equal to the total resource request on the pod since there is nothing to subtract.
				oldRequest, err := resourcequotacontroller.PodRequests(oldPod, resourceName)
				if err == nil {
					err = delta.Sub(*oldRequest)
					if err != nil {
						return false, err
					}
				}
			}

			newUsage := used.Copy()
			newUsage.Add(*delta)

			// make the most precise comparison possible
			newUsageValue := newUsage.Value()
			hardUsageValue := hard.Value()
			if newUsageValue <= resource.MaxMilliValue && hardUsageValue <= resource.MaxMilliValue {
				newUsageValue = newUsage.MilliValue()
				hardUsageValue = hard.MilliValue()
			}

			if newUsageValue > hardUsageValue {
				errs = append(errs, fmt.Errorf("unable to admit pod without exceeding quota for resource %s:  limited to %s but require %s to succeed.", resourceName, hard.String(), newUsage.String()))
				dirty = false
			} else {
				status.Used[resourceName] = *newUsage
			}

		}
	}

	return dirty, utilerrors.NewAggregate(errs)
}
Ejemplo n.º 15
0
// Admit enforces that a namespace must exist in order to associate content with it.
// Admit enforces that a namespace that is terminating cannot accept new content being associated with it.
func (e *lifecycle) Admit(a admission.Attributes) (err error) {
	if len(a.GetNamespace()) == 0 {
		return nil
	}
	// always allow a SAR request through, the SAR will return information about
	// the ability to take action on the object, no need to verify it here.
	if isSubjectAccessReview(a) {
		return nil
	}
	mapping, err := latest.RESTMapper.RESTMapping(a.GetKind())
	if err != nil {
		glog.V(4).Infof("Ignoring life-cycle enforcement for resource %v; no associated default version and kind could be found.", a.GetResource())
		return nil
	}
	if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
		return nil
	}

	// we want to allow someone to delete something in case it was phantom created somehow
	if a.GetOperation() == "DELETE" {
		return nil
	}

	name := "Unknown"
	obj := a.GetObject()
	if obj != nil {
		name, _ = meta.NewAccessor().Name(obj)
	}

	if !e.cache.Running() {
		return admission.NewForbidden(a, err)
	}

	namespace, err := e.cache.GetNamespace(a.GetNamespace())
	if err != nil {
		return admission.NewForbidden(a, err)
	}

	if a.GetOperation() != "CREATE" {
		return nil
	}

	if namespace.Status.Phase == kapi.NamespaceTerminating && !e.creatableResources.Has(strings.ToLower(a.GetResource().Resource)) {
		return apierrors.NewForbidden(a.GetKind().Kind, name, fmt.Errorf("Namespace %s is terminating", a.GetNamespace()))
	}

	// in case of concurrency issues, we will retry this logic
	numRetries := 10
	interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond
	for retry := 1; retry <= numRetries; retry++ {

		// associate this namespace with openshift
		_, err = projectutil.Associate(e.client, namespace)
		if err == nil {
			break
		}

		// we have exhausted all reasonable efforts to retry so give up now
		if retry == numRetries {
			return admission.NewForbidden(a, err)
		}

		// get the latest namespace for the next pass in case of resource version updates
		time.Sleep(interval)

		// it's possible the namespace actually was deleted, so just forbid if this occurs
		namespace, err = e.client.Namespaces().Get(a.GetNamespace())
		if err != nil {
			return admission.NewForbidden(a, err)
		}
	}
	return nil
}
Ejemplo n.º 16
0
// checkRequest verifies that the request does not exceed any quota constraint. it returns back a copy of quotas not yet persisted
// that capture what the usage would be if the request succeeded.  It return an error if the is insufficient quota to satisfy the request
func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.Attributes) ([]api.ResourceQuota, error) {
	namespace := a.GetNamespace()
	evaluators := e.registry.Evaluators()
	evaluator, found := evaluators[a.GetKind().GroupKind()]
	if !found {
		return quotas, nil
	}

	op := a.GetOperation()
	operationResources := evaluator.OperationResources(op)
	if len(operationResources) == 0 {
		return quotas, nil
	}

	// find the set of quotas that are pertinent to this request
	// reject if we match the quota, but usage is not calculated yet
	// reject if the input object does not satisfy quota constraints
	// if there are no pertinent quotas, we can just return
	inputObject := a.GetObject()
	interestingQuotaIndexes := []int{}
	for i := range quotas {
		resourceQuota := quotas[i]
		match := evaluator.Matches(&resourceQuota, inputObject)
		if !match {
			continue
		}

		hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
		evaluatorResources := evaluator.MatchesResources()
		requiredResources := quota.Intersection(hardResources, evaluatorResources)
		err := evaluator.Constraints(requiredResources, inputObject)
		if err != nil {
			return nil, admission.NewForbidden(a, fmt.Errorf("Failed quota: %s: %v", resourceQuota.Name, err))
		}
		if !hasUsageStats(&resourceQuota) {
			return nil, admission.NewForbidden(a, fmt.Errorf("Status unknown for quota: %s", resourceQuota.Name))
		}

		interestingQuotaIndexes = append(interestingQuotaIndexes, i)
	}
	if len(interestingQuotaIndexes) == 0 {
		return quotas, nil
	}

	// Usage of some resources cannot be counted in isolation. For example when
	// the resource represents a number of unique references to external
	// resource. In such a case an evaluator needs to process other objects in
	// the same namespace which needs to be known.
	if accessor, err := meta.Accessor(inputObject); namespace != "" && err == nil {
		if accessor.GetNamespace() == "" {
			accessor.SetNamespace(namespace)
		}
	}

	// there is at least one quota that definitely matches our object
	// as a result, we need to measure the usage of this object for quota
	// on updates, we need to subtract the previous measured usage
	// if usage shows no change, just return since it has no impact on quota
	deltaUsage := evaluator.Usage(inputObject)
	if admission.Update == op {
		prevItem := a.GetOldObject()
		if prevItem == nil {
			return nil, admission.NewForbidden(a, fmt.Errorf("Unable to get previous usage since prior version of object was not found"))
		}

		// if we can definitively determine that this is not a case of "create on update",
		// then charge based on the delta.  Otherwise, bill the maximum
		metadata, err := meta.Accessor(prevItem)
		if err == nil && len(metadata.GetResourceVersion()) > 0 {
			prevUsage := evaluator.Usage(prevItem)
			deltaUsage = quota.Subtract(deltaUsage, prevUsage)
		}
	}
	if quota.IsZero(deltaUsage) {
		return quotas, nil
	}

	for _, index := range interestingQuotaIndexes {
		resourceQuota := quotas[index]

		hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
		requestedUsage := quota.Mask(deltaUsage, hardResources)
		newUsage := quota.Add(resourceQuota.Status.Used, requestedUsage)
		maskedNewUsage := quota.Mask(newUsage, quota.ResourceNames(requestedUsage))

		if allowed, exceeded := quota.LessThanOrEqual(maskedNewUsage, resourceQuota.Status.Hard); !allowed {
			failedRequestedUsage := quota.Mask(requestedUsage, exceeded)
			failedUsed := quota.Mask(resourceQuota.Status.Used, exceeded)
			failedHard := quota.Mask(resourceQuota.Status.Hard, exceeded)
			return nil, admission.NewForbidden(a,
				fmt.Errorf("Exceeded quota: %s, requested: %s, used: %s, limited: %s",
					resourceQuota.Name,
					prettyPrint(failedRequestedUsage),
					prettyPrint(failedUsed),
					prettyPrint(failedHard)))
		}

		// update to the new usage number
		quotas[index].Status.Used = newUsage
	}

	return quotas, nil
}
Ejemplo n.º 17
0
// IncrementUsage updates the supplied ResourceQuotaStatus object based on the incoming operation
// Return true if the usage must be recorded prior to admitting the new resource
// Return an error if the operation should not pass admission control
func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, client client.Interface) (bool, error) {
	dirty := false
	set := map[api.ResourceName]bool{}
	for k := range status.Hard {
		set[k] = true
	}
	obj := a.GetObject()
	// handle max counts for each kind of resource (pods, services, replicationControllers, etc.)
	if a.GetOperation() == admission.Create {
		resourceName := resourceToResourceName[a.GetResource()]
		hard, hardFound := status.Hard[resourceName]
		if hardFound {
			used, usedFound := status.Used[resourceName]
			if !usedFound {
				return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
			}
			if used.Value() >= hard.Value() {
				return false, fmt.Errorf("Limited to %s %s", hard.String(), resourceName)
			} else {
				status.Used[resourceName] = *resource.NewQuantity(used.Value()+int64(1), resource.DecimalSI)
				dirty = true
			}
		}
	}
	// handle memory/cpu constraints, and any diff of usage based on memory/cpu on updates
	if a.GetResource() == "pods" && (set[api.ResourceMemory] || set[api.ResourceCPU]) {
		pod := obj.(*api.Pod)
		deltaCPU := resourcequota.PodCPU(pod)
		deltaMemory := resourcequota.PodMemory(pod)
		// if this is an update, we need to find the delta cpu/memory usage from previous state
		if a.GetOperation() == admission.Update {
			oldPod, err := client.Pods(a.GetNamespace()).Get(pod.Name)
			if err != nil {
				return false, err
			}
			oldCPU := resourcequota.PodCPU(oldPod)
			oldMemory := resourcequota.PodMemory(oldPod)
			deltaCPU = resource.NewMilliQuantity(deltaCPU.MilliValue()-oldCPU.MilliValue(), resource.DecimalSI)
			deltaMemory = resource.NewQuantity(deltaMemory.Value()-oldMemory.Value(), resource.DecimalSI)
		}

		hardMem, hardMemFound := status.Hard[api.ResourceMemory]
		if hardMemFound {
			if set[api.ResourceMemory] && resourcequota.IsPodMemoryUnbounded(pod) {
				return false, fmt.Errorf("Limited to %s memory, but pod has no specified memory limit", hardMem.String())
			}
			used, usedFound := status.Used[api.ResourceMemory]
			if !usedFound {
				return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
			}
			if used.Value()+deltaMemory.Value() > hardMem.Value() {
				return false, fmt.Errorf("Limited to %s memory", hardMem.String())
			} else {
				status.Used[api.ResourceMemory] = *resource.NewQuantity(used.Value()+deltaMemory.Value(), resource.DecimalSI)
				dirty = true
			}
		}
		hardCPU, hardCPUFound := status.Hard[api.ResourceCPU]
		if hardCPUFound {
			if set[api.ResourceCPU] && resourcequota.IsPodCPUUnbounded(pod) {
				return false, fmt.Errorf("Limited to %s CPU, but pod has no specified cpu limit", hardCPU.String())
			}
			used, usedFound := status.Used[api.ResourceCPU]
			if !usedFound {
				return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
			}
			if used.MilliValue()+deltaCPU.MilliValue() > hardCPU.MilliValue() {
				return false, fmt.Errorf("Limited to %s CPU", hardCPU.String())
			} else {
				status.Used[api.ResourceCPU] = *resource.NewMilliQuantity(used.MilliValue()+deltaCPU.MilliValue(), resource.DecimalSI)
				dirty = true
			}
		}
	}
	return dirty, nil
}
Ejemplo n.º 18
0
func (l *lifecycle) Admit(a admission.Attributes) error {
	// prevent deletion of immortal namespaces
	if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == api.Kind("Namespace") && l.immortalNamespaces.Has(a.GetName()) {
		return errors.NewForbidden(a.GetResource().GroupResource(), a.GetName(), fmt.Errorf("this namespace may not be deleted"))
	}

	// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
	// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
	// its a namespaced resource.
	if len(a.GetNamespace()) == 0 || a.GetKind().GroupKind() == api.Kind("Namespace") {
		// if a namespace is deleted, we want to prevent all further creates into it
		// while it is undergoing termination.  to reduce incidences where the cache
		// is slow to update, we add the namespace into a force live lookup list to ensure
		// we are not looking at stale state.
		if a.GetOperation() == admission.Delete {
			l.forceLiveLookupCache.Add(a.GetName(), true, forceLiveLookupTTL)
		}
		return nil
	}

	// always allow access review checks.  Returning status about the namespace would be leaking information
	if isAccessReview(a) {
		return nil
	}

	// we need to wait for our caches to warm
	if !l.WaitForReady() {
		return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
	}

	var (
		namespaceObj interface{}
		exists       bool
		err          error
	)

	key := makeNamespaceKey(a.GetNamespace())
	namespaceObj, exists, err = l.namespaceInformer.GetStore().Get(key)
	if err != nil {
		return errors.NewInternalError(err)
	}

	if !exists && a.GetOperation() == admission.Create {
		// give the cache time to observe the namespace before rejecting a create.
		// this helps when creating a namespace and immediately creating objects within it.
		time.Sleep(missingNamespaceWait)
		namespaceObj, exists, err = l.namespaceInformer.GetStore().Get(key)
		if err != nil {
			return errors.NewInternalError(err)
		}
		if exists {
			glog.V(4).Infof("found %s in cache after waiting", a.GetNamespace())
		}
	}

	// forceLiveLookup if true will skip looking at local cache state and instead always make a live call to server.
	forceLiveLookup := false
	if _, ok := l.forceLiveLookupCache.Get(a.GetNamespace()); ok {
		// we think the namespace was marked for deletion, but our current local cache says otherwise, we will force a live lookup.
		forceLiveLookup = exists && namespaceObj.(*api.Namespace).Status.Phase == api.NamespaceActive
	}

	// refuse to operate on non-existent namespaces
	if !exists || forceLiveLookup {
		// as a last resort, make a call directly to storage
		namespaceObj, err = l.client.Core().Namespaces().Get(a.GetNamespace())
		if err != nil {
			if errors.IsNotFound(err) {
				return err
			}
			return errors.NewInternalError(err)
		}
		glog.V(4).Infof("found %s via storage lookup", a.GetNamespace())
	}

	// ensure that we're not trying to create objects in terminating namespaces
	if a.GetOperation() == admission.Create {
		namespace := namespaceObj.(*api.Namespace)
		if namespace.Status.Phase != api.NamespaceTerminating {
			return nil
		}

		// TODO: This should probably not be a 403
		return admission.NewForbidden(a, fmt.Errorf("unable to create new content in namespace %s because it is being terminated.", a.GetNamespace()))
	}

	return nil
}
Ejemplo n.º 19
0
// Admit makes admission decisions while enforcing quota
func (q *quotaAdmission) Admit(a admission.Attributes) (err error) {
	// ignore all operations that correspond to sub-resource actions
	if a.GetSubresource() != "" {
		return nil
	}

	// if we do not know how to evaluate use for this kind, just ignore
	evaluators := q.registry.Evaluators()
	evaluator, found := evaluators[a.GetKind()]
	if !found {
		return nil
	}

	// for this kind, check if the operation could mutate any quota resources
	// if no resources tracked by quota are impacted, then just return
	op := a.GetOperation()
	operationResources := evaluator.OperationResources(op)
	if len(operationResources) == 0 {
		return nil
	}

	// determine if there are any quotas in this namespace
	// if there are no quotas, we don't need to do anything
	namespace, name := a.GetNamespace(), a.GetName()
	items, err := q.indexer.Index("namespace", &api.ResourceQuota{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: ""}})
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Error resolving quota."))
	}
	// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
	if len(items) == 0 {
		lruItemObj, ok := q.liveLookupCache.Get(a.GetNamespace())
		if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
			// TODO: If there are multiple operations at the same time and cache has just expired,
			// this may cause multiple List operations being issued at the same time.
			// If there is already in-flight List() for a given namespace, we should wait until
			// it is finished and cache is updated instead of doing the same, also to avoid
			// throttling - see #22422 for details.
			liveList, err := q.client.Core().ResourceQuotas(namespace).List(api.ListOptions{})
			if err != nil {
				return admission.NewForbidden(a, err)
			}
			newEntry := liveLookupEntry{expiry: time.Now().Add(q.liveTTL)}
			for i := range liveList.Items {
				newEntry.items = append(newEntry.items, &liveList.Items[i])
			}
			q.liveLookupCache.Add(a.GetNamespace(), newEntry)
			lruItemObj = newEntry
		}
		lruEntry := lruItemObj.(liveLookupEntry)
		for i := range lruEntry.items {
			items = append(items, lruEntry.items[i])
		}
	}
	// if there are still no items, we can return
	if len(items) == 0 {
		return nil
	}

	// find the set of quotas that are pertinent to this request
	// reject if we match the quota, but usage is not calculated yet
	// reject if the input object does not satisfy quota constraints
	// if there are no pertinent quotas, we can just return
	inputObject := a.GetObject()
	resourceQuotas := []*api.ResourceQuota{}
	for i := range items {
		resourceQuota := items[i].(*api.ResourceQuota)
		match := evaluator.Matches(resourceQuota, inputObject)
		if !match {
			continue
		}
		hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
		evaluatorResources := evaluator.MatchesResources()
		requiredResources := quota.Intersection(hardResources, evaluatorResources)
		err := evaluator.Constraints(requiredResources, inputObject)
		if err != nil {
			return admission.NewForbidden(a, fmt.Errorf("Failed quota: %s: %v", resourceQuota.Name, err))
		}
		if !hasUsageStats(resourceQuota) {
			return admission.NewForbidden(a, fmt.Errorf("Status unknown for quota: %s", resourceQuota.Name))
		}
		resourceQuotas = append(resourceQuotas, resourceQuota)
	}
	if len(resourceQuotas) == 0 {
		return nil
	}

	// there is at least one quota that definitely matches our object
	// as a result, we need to measure the usage of this object for quota
	// on updates, we need to subtract the previous measured usage
	// if usage shows no change, just return since it has no impact on quota
	deltaUsage := evaluator.Usage(inputObject)
	if admission.Update == op {
		prevItem, err := evaluator.Get(namespace, name)
		if err != nil {
			return admission.NewForbidden(a, fmt.Errorf("Unable to get previous: %v", err))
		}
		prevUsage := evaluator.Usage(prevItem)
		deltaUsage = quota.Subtract(deltaUsage, prevUsage)
	}
	if quota.IsZero(deltaUsage) {
		return nil
	}

	// TODO: Move to a bucketing work queue
	// If we guaranteed that we processed the request in order it was received to server, we would reduce quota conflicts.
	// Until we have the bucketing work queue, we jitter requests and retry on conflict.
	numRetries := 10
	interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond

	// seed the retry loop with the initial set of quotas to process (should reduce each iteration)
	resourceQuotasToProcess := resourceQuotas
	for retry := 1; retry <= numRetries; retry++ {
		// the list of quotas we will try again if there is a version conflict
		tryAgain := []*api.ResourceQuota{}

		// check that we pass all remaining quotas so we do not prematurely charge
		// for each quota, mask the usage to the set of resources tracked by the quota
		// if request + used > hard, return an error describing the failure
		updatedUsage := map[string]api.ResourceList{}
		for _, resourceQuota := range resourceQuotasToProcess {
			hardResources := quota.ResourceNames(resourceQuota.Status.Hard)
			requestedUsage := quota.Mask(deltaUsage, hardResources)
			newUsage := quota.Add(resourceQuota.Status.Used, requestedUsage)
			if allowed, exceeded := quota.LessThanOrEqual(newUsage, resourceQuota.Status.Hard); !allowed {
				failedRequestedUsage := quota.Mask(requestedUsage, exceeded)
				failedUsed := quota.Mask(resourceQuota.Status.Used, exceeded)
				failedHard := quota.Mask(resourceQuota.Status.Hard, exceeded)
				return admission.NewForbidden(a,
					fmt.Errorf("Exceeded quota: %s, requested: %s, used: %s, limited: %s",
						resourceQuota.Name,
						prettyPrint(failedRequestedUsage),
						prettyPrint(failedUsed),
						prettyPrint(failedHard)))
			}
			updatedUsage[resourceQuota.Name] = newUsage
		}

		// update the status for each quota with its new usage
		// if we get a conflict, get updated quota, and enqueue
		for i, resourceQuota := range resourceQuotasToProcess {
			newUsage := updatedUsage[resourceQuota.Name]
			quotaToUpdate := &api.ResourceQuota{
				ObjectMeta: api.ObjectMeta{
					Name:            resourceQuota.Name,
					Namespace:       resourceQuota.Namespace,
					ResourceVersion: resourceQuota.ResourceVersion,
				},
				Status: api.ResourceQuotaStatus{
					Hard: quota.Add(api.ResourceList{}, resourceQuota.Status.Hard),
					Used: newUsage,
				},
			}
			_, err = q.client.Core().ResourceQuotas(quotaToUpdate.Namespace).UpdateStatus(quotaToUpdate)
			if err != nil {
				if !errors.IsConflict(err) {
					return admission.NewForbidden(a, fmt.Errorf("Unable to update quota status: %s %v", resourceQuota.Name, err))
				}
				// if we get a conflict, we get the latest copy of the quota documents that were not yet modified so we retry all with latest state.
				for fetchIndex := i; fetchIndex < len(resourceQuotasToProcess); fetchIndex++ {
					latestQuota, err := q.client.Core().ResourceQuotas(namespace).Get(resourceQuotasToProcess[fetchIndex].Name)
					if err != nil {
						return admission.NewForbidden(a, fmt.Errorf("Unable to get quota: %s %v", resourceQuotasToProcess[fetchIndex].Name, err))
					}
					tryAgain = append(tryAgain, latestQuota)
				}
				break
			}
		}

		// all quotas were updated, so we can return
		if len(tryAgain) == 0 {
			return nil
		}

		// we have concurrent requests to update quota, so look to retry if needed
		// next iteration, we need to process the items that have to try again
		// pause the specified interval to encourage jitter
		if retry == numRetries {
			names := []string{}
			for _, quota := range tryAgain {
				names = append(names, quota.Name)
			}
			return admission.NewForbidden(a, fmt.Errorf("Unable to update status for quota: %s, ", strings.Join(names, ",")))
		}
		resourceQuotasToProcess = tryAgain
		time.Sleep(interval)
	}
	return nil
}
Ejemplo n.º 20
0
func (l *lifecycle) Admit(a admission.Attributes) error {
	// prevent deletion of immortal namespaces
	if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == api.Kind("Namespace") && l.immortalNamespaces.Has(a.GetName()) {
		return errors.NewForbidden(a.GetResource().GroupResource(), a.GetName(), fmt.Errorf("this namespace may not be deleted"))
	}

	// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
	// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
	// its a namespaced resource.
	if len(a.GetNamespace()) == 0 || a.GetKind().GroupKind() == api.Kind("Namespace") {
		// if a namespace is deleted, we want to prevent all further creates into it
		// while it is undergoing termination.  to reduce incidences where the cache
		// is slow to update, we add the namespace into a force live lookup list to ensure
		// we are not looking at stale state.
		if a.GetOperation() == admission.Delete {
			newEntry := forceLiveLookupEntry{
				expiry: time.Now().Add(forceLiveLookupTTL),
			}
			l.forceLiveLookupCache.Add(a.GetName(), newEntry)
		}
		return nil
	}

	// we need to wait for our caches to warm
	if !l.WaitForReady() {
		return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
	}

	var (
		namespaceObj interface{}
		exists       bool
		err          error
	)

	key := makeNamespaceKey(a.GetNamespace())
	namespaceObj, exists, err = l.namespaceInformer.GetStore().Get(key)
	if err != nil {
		return errors.NewInternalError(err)
	}

	// forceLiveLookup if true will skip looking at local cache state and instead always make a live call to server.
	forceLiveLookup := false
	lruItemObj, ok := l.forceLiveLookupCache.Get(a.GetNamespace())
	if ok && lruItemObj.(forceLiveLookupEntry).expiry.Before(time.Now()) {
		// we think the namespace was marked for deletion, but our current local cache says otherwise, we will force a live lookup.
		forceLiveLookup = exists && namespaceObj.(*api.Namespace).Status.Phase == api.NamespaceActive
	}

	// refuse to operate on non-existent namespaces
	if !exists || forceLiveLookup {
		// in case of latency in our caches, make a call direct to storage to verify that it truly exists or not
		namespaceObj, err = l.client.Core().Namespaces().Get(a.GetNamespace())
		if err != nil {
			if errors.IsNotFound(err) {
				return err
			}
			return errors.NewInternalError(err)
		}
	}

	// ensure that we're not trying to create objects in terminating namespaces
	if a.GetOperation() == admission.Create {
		namespace := namespaceObj.(*api.Namespace)
		if namespace.Status.Phase != api.NamespaceTerminating {
			return nil
		}

		// TODO: This should probably not be a 403
		return admission.NewForbidden(a, fmt.Errorf("unable to create new content in namespace %s because it is being terminated.", a.GetNamespace()))
	}

	return nil
}
Ejemplo n.º 21
0
// Admit admits resources into cluster that do not violate any defined LimitRange in the namespace
func (l *limitRanger) Admit(a admission.Attributes) (err error) {
	if !l.actions.SupportsAttributes(a) {
		return nil
	}

	obj := a.GetObject()
	name := "Unknown"
	if obj != nil {
		name, _ = meta.NewAccessor().Name(obj)
		if len(name) == 0 {
			name, _ = meta.NewAccessor().GenerateName(obj)
		}
	}

	key := &api.LimitRange{
		ObjectMeta: api.ObjectMeta{
			Namespace: a.GetNamespace(),
			Name:      "",
		},
	}
	items, err := l.indexer.Index("namespace", key)
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Unable to %s %v at this time because there was an error enforcing limit ranges", a.GetOperation(), a.GetResource()))
	}

	// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
	if len(items) == 0 {
		lruItemObj, ok := l.liveLookupCache.Get(a.GetNamespace())
		if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
			// TODO: If there are multiple operations at the same time and cache has just expired,
			// this may cause multiple List operations being issued at the same time.
			// If there is already in-flight List() for a given namespace, we should wait until
			// it is finished and cache is updated instead of doing the same, also to avoid
			// throttling - see #22422 for details.
			liveList, err := l.client.Core().LimitRanges(a.GetNamespace()).List(api.ListOptions{})
			if err != nil {
				return admission.NewForbidden(a, err)
			}
			newEntry := liveLookupEntry{expiry: time.Now().Add(l.liveTTL)}
			for i := range liveList.Items {
				newEntry.items = append(newEntry.items, &liveList.Items[i])
			}
			l.liveLookupCache.Add(a.GetNamespace(), newEntry)
			lruItemObj = newEntry
		}
		lruEntry := lruItemObj.(liveLookupEntry)

		for i := range lruEntry.items {
			items = append(items, lruEntry.items[i])
		}

	}

	// ensure it meets each prescribed min/max
	for i := range items {
		limitRange := items[i].(*api.LimitRange)

		if !l.actions.SupportsLimit(limitRange) {
			continue
		}

		err = l.actions.Limit(limitRange, a.GetResource().Resource, a.GetObject())
		if err != nil {
			return admission.NewForbidden(a, err)
		}
	}
	return nil
}
Ejemplo n.º 22
0
func (l *lifecycle) Admit(a admission.Attributes) (err error) {
	// prevent deletion of immortal namespaces
	if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == api.Kind("Namespace") && l.immortalNamespaces.Has(a.GetName()) {
		return errors.NewForbidden(a.GetResource().GroupResource(), a.GetName(), fmt.Errorf("this namespace may not be deleted"))
	}

	// if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do
	// if we're here, then the API server has found a route, which means that if we have a non-empty namespace
	// its a namespaced resource.
	if len(a.GetNamespace()) == 0 || a.GetKind().GroupKind() == api.Kind("Namespace") {
		// if a namespace is deleted, we want to prevent all further creates into it
		// while it is undergoing termination.  to reduce incidences where the cache
		// is slow to update, we forcefully remove the namespace from our local cache.
		// this will cause a live lookup of the namespace to get its latest state even
		// before the watch notification is received.
		if a.GetOperation() == admission.Delete {
			l.store.Delete(&api.Namespace{
				ObjectMeta: api.ObjectMeta{
					Name: a.GetName(),
				},
			})
		}
		return nil
	}

	// always allow access review checks.  Returning status about the namespace would be leaking information
	if isAccessReview(a) {
		return nil
	}

	namespaceObj, exists, err := l.store.Get(&api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:      a.GetNamespace(),
			Namespace: "",
		},
	})
	if err != nil {
		return errors.NewInternalError(err)
	}

	// refuse to operate on non-existent namespaces
	if !exists {
		// in case of latency in our caches, make a call direct to storage to verify that it truly exists or not
		namespaceObj, err = l.client.Core().Namespaces().Get(a.GetNamespace())
		if err != nil {
			if errors.IsNotFound(err) {
				return err
			}
			return errors.NewInternalError(err)
		}
	}

	// ensure that we're not trying to create objects in terminating namespaces
	if a.GetOperation() == admission.Create {
		namespace := namespaceObj.(*api.Namespace)
		if namespace.Status.Phase != api.NamespaceTerminating {
			return nil
		}

		// TODO: This should probably not be a 403
		return admission.NewForbidden(a, fmt.Errorf("Unable to create new content in namespace %s because it is being terminated.", a.GetNamespace()))
	}

	return nil
}
Ejemplo n.º 23
0
// Admit admits resources into cluster that do not violate any defined LimitRange in the namespace
func (l *limitRanger) Admit(a admission.Attributes) (err error) {

	// Ignore all calls to subresources
	if a.GetSubresource() != "" {
		return nil
	}

	obj := a.GetObject()
	name := "Unknown"
	if obj != nil {
		name, _ = meta.NewAccessor().Name(obj)
		if len(name) == 0 {
			name, _ = meta.NewAccessor().GenerateName(obj)
		}
	}

	key := &api.LimitRange{
		ObjectMeta: api.ObjectMeta{
			Namespace: a.GetNamespace(),
			Name:      "",
		},
	}
	items, err := l.indexer.Index("namespace", key)
	if err != nil {
		return admission.NewForbidden(a, fmt.Errorf("Unable to %s %v at this time because there was an error enforcing limit ranges", a.GetOperation(), a.GetResource()))
	}

	// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
	if len(items) == 0 {
		lruItemObj, ok := l.liveLookupCache.Get(a.GetNamespace())
		if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
			liveList, err := l.client.Core().LimitRanges(a.GetNamespace()).List(api.ListOptions{})
			if err != nil {
				return admission.NewForbidden(a, err)
			}
			newEntry := liveLookupEntry{expiry: time.Now().Add(l.liveTTL)}
			for i := range liveList.Items {
				newEntry.items = append(newEntry.items, &liveList.Items[i])
			}
			l.liveLookupCache.Add(a.GetNamespace(), newEntry)
			lruItemObj = newEntry
		}
		lruEntry := lruItemObj.(liveLookupEntry)

		for i := range lruEntry.items {
			items = append(items, lruEntry.items[i])
		}

	}

	// ensure it meets each prescribed min/max
	for i := range items {
		limitRange := items[i].(*api.LimitRange)
		err = l.limitFunc(limitRange, a.GetResource().Resource, a.GetObject())
		if err != nil {
			return admission.NewForbidden(a, err)
		}
	}
	return nil
}