func (p *provision) Admit(a admission.Attributes) (err error) { defaultVersion, kind, err := latest.RESTMapper.VersionAndKindForResource(a.GetResource()) if err != nil { return admission.NewForbidden(a, err) } mapping, err := latest.RESTMapper.RESTMapping(kind, defaultVersion) if err != nil { return admission.NewForbidden(a, err) } if mapping.Scope.Name() != meta.RESTScopeNameNamespace { return nil } namespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: a.GetNamespace(), Namespace: "", }, Status: api.NamespaceStatus{}, } _, exists, err := p.store.Get(namespace) if err != nil { return admission.NewForbidden(a, err) } if exists { return nil } _, err = p.client.Namespaces().Create(namespace) if err != nil && !errors.IsAlreadyExists(err) { return admission.NewForbidden(a, err) } return nil }
// Admit admits resources into cluster that do not violate any defined LimitRange in the namespace func (l *limitRanger) Admit(a admission.Attributes) (err error) { // Ignore all calls to subresources if a.GetSubresource() != "" { return nil } obj := a.GetObject() resource := a.GetResource() name := "Unknown" if obj != nil { name, _ = meta.NewAccessor().Name(obj) if len(name) == 0 { name, _ = meta.NewAccessor().GenerateName(obj) } } key := &api.LimitRange{ ObjectMeta: api.ObjectMeta{ Namespace: a.GetNamespace(), Name: "", }, } items, err := l.indexer.Index("namespace", key) if err != nil { return admission.NewForbidden(a, fmt.Errorf("Unable to %s %s at this time because there was an error enforcing limit ranges", a.GetOperation(), resource)) } if len(items) == 0 { return nil } // ensure it meets each prescribed min/max for i := range items { limitRange := items[i].(*api.LimitRange) err = l.limitFunc(limitRange, a.GetResource(), a.GetObject()) if err != nil { return admission.NewForbidden(a, err) } } return nil }
func (l *lifecycle) Admit(a admission.Attributes) (err error) { // prevent deletion of immortal namespaces if a.GetOperation() == admission.Delete { if a.GetKind() == "Namespace" && l.immortalNamespaces.Has(a.GetName()) { return errors.NewForbidden(a.GetKind(), a.GetName(), fmt.Errorf("namespace can never be deleted")) } return nil } defaultVersion, kind, err := latest.RESTMapper.VersionAndKindForResource(a.GetResource()) if err != nil { return admission.NewForbidden(a, err) } mapping, err := latest.RESTMapper.RESTMapping(kind, defaultVersion) if err != nil { return admission.NewForbidden(a, err) } if mapping.Scope.Name() != meta.RESTScopeNameNamespace { return nil } namespaceObj, exists, err := l.store.Get(&api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: a.GetNamespace(), Namespace: "", }, }) if err != nil { return admission.NewForbidden(a, err) } if !exists { return nil } namespace := namespaceObj.(*api.Namespace) if namespace.Status.Phase != api.NamespaceTerminating { return nil } return admission.NewForbidden(a, fmt.Errorf("Unable to create new content in namespace %s because it is being terminated.", a.GetNamespace())) }
// Admit will deny any SecurityContext that defines options that were not previously available in the api.Container // struct (Capabilities and Privileged) func (p *plugin) Admit(a admission.Attributes) (err error) { if a.GetResource() != string(api.ResourcePods) { return nil } pod, ok := a.GetObject().(*api.Pod) if !ok { return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted") } for _, v := range pod.Spec.Containers { if v.SecurityContext != nil { if v.SecurityContext.SELinuxOptions != nil { return apierrors.NewForbidden(a.GetResource(), pod.Name, fmt.Errorf("SecurityContext.SELinuxOptions is forbidden")) } if v.SecurityContext.RunAsUser != nil { return apierrors.NewForbidden(a.GetResource(), pod.Name, fmt.Errorf("SecurityContext.RunAsUser is forbidden")) } } } return nil }
func (q *quota) Admit(a admission.Attributes) (err error) { if a.GetSubresource() != "" { return nil } if a.GetOperation() == "DELETE" { return nil } key := &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{ Namespace: a.GetNamespace(), Name: "", }, } // concurrent operations that modify quota tracked resources can cause a conflict when incrementing usage // as a result, we will attempt to increment quota usage per request up to numRetries limit // we fuzz each retry with an interval period to attempt to improve end-user experience during concurrent operations numRetries := 10 interval := time.Duration(rand.Int63n(90)+int64(10)) * time.Millisecond items, err := q.indexer.Index("namespace", key) if err != nil { return admission.NewForbidden(a, fmt.Errorf("Unable to %s %s at this time because there was an error enforcing quota", a.GetOperation(), a.GetResource())) } if len(items) == 0 { return nil } for i := range items { quota := items[i].(*api.ResourceQuota) for retry := 1; retry <= numRetries; retry++ { // we cannot modify the value directly in the cache, so we copy status := &api.ResourceQuotaStatus{ Hard: api.ResourceList{}, Used: api.ResourceList{}, } for k, v := range quota.Status.Hard { status.Hard[k] = *v.Copy() } for k, v := range quota.Status.Used { status.Used[k] = *v.Copy() } dirty, err := IncrementUsage(a, status, q.client) if err != nil { return admission.NewForbidden(a, err) } if dirty { // construct a usage record usage := api.ResourceQuota{ ObjectMeta: api.ObjectMeta{ Name: quota.Name, Namespace: quota.Namespace, ResourceVersion: quota.ResourceVersion, Labels: quota.Labels, Annotations: quota.Annotations}, } usage.Status = *status _, err = q.client.ResourceQuotas(usage.Namespace).UpdateStatus(&usage) if err == nil { break } // we have concurrent requests to update quota, so look to retry if needed if retry == numRetries { return admission.NewForbidden(a, fmt.Errorf("Unable to %s %s at this time because there are too many concurrent requests to increment quota", a.GetOperation(), a.GetResource())) } time.Sleep(interval) // manually get the latest quota quota, err = q.client.ResourceQuotas(usage.Namespace).Get(quota.Name) if err != nil { return admission.NewForbidden(a, err) } } } } return nil }
// IncrementUsage updates the supplied ResourceQuotaStatus object based on the incoming operation // Return true if the usage must be recorded prior to admitting the new resource // Return an error if the operation should not pass admission control func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, client client.Interface) (bool, error) { dirty := false set := map[api.ResourceName]bool{} for k := range status.Hard { set[k] = true } obj := a.GetObject() // handle max counts for each kind of resource (pods, services, replicationControllers, etc.) if a.GetOperation() == admission.Create { // TODO v1beta1 had camel case, v1beta3 went to all lower, we can remove this line when we deprecate v1beta1 resourceNormalized := strings.ToLower(a.GetResource()) resourceName := resourceToResourceName[resourceNormalized] hard, hardFound := status.Hard[resourceName] if hardFound { used, usedFound := status.Used[resourceName] if !usedFound { return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.") } if used.Value() >= hard.Value() { return false, fmt.Errorf("Limited to %s %s", hard.String(), resourceName) } else { status.Used[resourceName] = *resource.NewQuantity(used.Value()+int64(1), resource.DecimalSI) dirty = true } } } // handle memory/cpu constraints, and any diff of usage based on memory/cpu on updates if a.GetResource() == "pods" && (set[api.ResourceMemory] || set[api.ResourceCPU]) { pod := obj.(*api.Pod) deltaCPU := resourcequota.PodCPU(pod) deltaMemory := resourcequota.PodMemory(pod) // if this is an update, we need to find the delta cpu/memory usage from previous state if a.GetOperation() == admission.Update { oldPod, err := client.Pods(a.GetNamespace()).Get(pod.Name) if err != nil { return false, err } oldCPU := resourcequota.PodCPU(oldPod) oldMemory := resourcequota.PodMemory(oldPod) deltaCPU = resource.NewMilliQuantity(deltaCPU.MilliValue()-oldCPU.MilliValue(), resource.DecimalSI) deltaMemory = resource.NewQuantity(deltaMemory.Value()-oldMemory.Value(), resource.DecimalSI) } hardMem, hardMemFound := status.Hard[api.ResourceMemory] if hardMemFound { if set[api.ResourceMemory] && resourcequota.IsPodMemoryUnbounded(pod) { return false, fmt.Errorf("Limited to %s memory, but pod has no specified memory limit", hardMem.String()) } used, usedFound := status.Used[api.ResourceMemory] if !usedFound { return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.") } if used.Value()+deltaMemory.Value() > hardMem.Value() { return false, fmt.Errorf("Limited to %s memory", hardMem.String()) } else { status.Used[api.ResourceMemory] = *resource.NewQuantity(used.Value()+deltaMemory.Value(), resource.DecimalSI) dirty = true } } hardCPU, hardCPUFound := status.Hard[api.ResourceCPU] if hardCPUFound { if set[api.ResourceCPU] && resourcequota.IsPodCPUUnbounded(pod) { return false, fmt.Errorf("Limited to %s CPU, but pod has no specified cpu limit", hardCPU.String()) } used, usedFound := status.Used[api.ResourceCPU] if !usedFound { return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.") } if used.MilliValue()+deltaCPU.MilliValue() > hardCPU.MilliValue() { return false, fmt.Errorf("Limited to %s CPU", hardCPU.String()) } else { status.Used[api.ResourceCPU] = *resource.NewMilliQuantity(used.MilliValue()+deltaCPU.MilliValue(), resource.DecimalSI) dirty = true } } } return dirty, nil }
func (s *serviceAccount) Admit(a admission.Attributes) (err error) { if a.GetResource() != string(api.ResourcePods) { return nil } obj := a.GetObject() if obj == nil { return nil } pod, ok := obj.(*api.Pod) if !ok { return nil } // Don't modify the spec of mirror pods. // That makes the qinglet very angry and confused, and it immediately deletes the pod (because the spec doesn't match) // That said, don't allow mirror pods to reference ServiceAccounts or SecretVolumeSources either if _, isMirrorPod := pod.Annotations[qinglet.ConfigMirrorAnnotationKey]; isMirrorPod { if len(pod.Spec.ServiceAccountName) != 0 { return admission.NewForbidden(a, fmt.Errorf("A mirror pod may not reference service accounts")) } for _, volume := range pod.Spec.Volumes { if volume.VolumeSource.Secret != nil { return admission.NewForbidden(a, fmt.Errorf("A mirror pod may not reference secrets")) } } return nil } // Set the default service account if needed if len(pod.Spec.ServiceAccountName) == 0 { pod.Spec.ServiceAccountName = DefaultServiceAccountName } // Ensure the referenced service account exists serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName) if err != nil { return admission.NewForbidden(a, fmt.Errorf("Error looking up service account %s/%s: %v", a.GetNamespace(), pod.Spec.ServiceAccountName, err)) } if serviceAccount == nil { return admission.NewForbidden(a, fmt.Errorf("Missing service account %s/%s: %v", a.GetNamespace(), pod.Spec.ServiceAccountName, err)) } if s.LimitSecretReferences { if err := s.limitSecretReferences(serviceAccount, pod); err != nil { return admission.NewForbidden(a, err) } } if s.MountServiceAccountToken { if err := s.mountServiceAccountToken(serviceAccount, pod); err != nil { return admission.NewForbidden(a, err) } } if len(pod.Spec.ImagePullSecrets) == 0 { pod.Spec.ImagePullSecrets = make([]api.LocalObjectReference, len(serviceAccount.ImagePullSecrets)) copy(pod.Spec.ImagePullSecrets, serviceAccount.ImagePullSecrets) } return nil }