// Admit will deny any pod that defines SELinuxOptions or RunAsUser. func (p *plugin) Admit(a admission.Attributes) (err error) { if a.GetResource() != string(api.ResourcePods) { return nil } pod, ok := a.GetObject().(*api.Pod) if !ok { return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted") } if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SupplementalGroups != nil { return apierrors.NewForbidden(a.GetResource(), pod.Name, fmt.Errorf("SecurityContext.SupplementalGroups is forbidden")) } if pod.Spec.SecurityContext != nil { if pod.Spec.SecurityContext.SELinuxOptions != nil { return apierrors.NewForbidden(a.GetResource(), pod.Name, fmt.Errorf("pod.Spec.SecurityContext.SELinuxOptions is forbidden")) } if pod.Spec.SecurityContext.RunAsUser != nil { return apierrors.NewForbidden(a.GetResource(), pod.Name, fmt.Errorf("pod.Spec.SecurityContext.RunAsUser is forbidden")) } } for _, v := range pod.Spec.Containers { if v.SecurityContext != nil { if v.SecurityContext.SELinuxOptions != nil { return apierrors.NewForbidden(a.GetResource(), pod.Name, fmt.Errorf("SecurityContext.SELinuxOptions is forbidden")) } if v.SecurityContext.RunAsUser != nil { return apierrors.NewForbidden(a.GetResource(), pod.Name, fmt.Errorf("SecurityContext.RunAsUser is forbidden")) } } } return nil }
// checkAndDecrement checks if the provided PodDisruptionBudget allows any disruption. func (r *EvictionREST) checkAndDecrement(namespace string, podName string, pdb policy.PodDisruptionBudget) (ok bool, err error) { if pdb.Status.ObservedGeneration < pdb.Generation { return false, nil } if pdb.Status.PodDisruptionsAllowed < 0 { return false, errors.NewForbidden(policy.Resource("poddisruptionbudget"), pdb.Name, fmt.Errorf("pdb disruptions allowed is negative")) } if len(pdb.Status.DisruptedPods) > MaxDisruptedPodSize { return false, errors.NewForbidden(policy.Resource("poddisruptionbudget"), pdb.Name, fmt.Errorf("DisrputedPods map too big - too many evictions not confirmed by PDB controller")) } if pdb.Status.PodDisruptionsAllowed == 0 { return false, nil } pdb.Status.PodDisruptionsAllowed-- if pdb.Status.DisruptedPods == nil { pdb.Status.DisruptedPods = make(map[string]metav1.Time) } // Eviction handler needs to inform the PDB controller that it is about to delete a pod // so it should not consider it as available in calculations when updating PodDisruptions allowed. // If the pod is not deleted within a reasonable time limit PDB controller will assume that it won't // be deleted at all and remove it from DisruptedPod map. pdb.Status.DisruptedPods[podName] = metav1.Time{Time: time.Now()} if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(&pdb); err != nil { return false, err } return true, nil }
func ConfirmNoEscalation(ctx kapi.Context, resource unversioned.GroupResource, name string, ruleResolver, cachedRuleResolver AuthorizationRuleResolver, role authorizationinterfaces.Role) error { var ruleResolutionErrors []error user, ok := kapi.UserFrom(ctx) if !ok { return kapierrors.NewForbidden(resource, name, fmt.Errorf("no user provided in context")) } namespace, _ := kapi.NamespaceFrom(ctx) // if a cached resolver is provided, attempt to verify coverage against the cache, then fall back to the normal // path otherwise if cachedRuleResolver != nil { if ownerRules, err := cachedRuleResolver.RulesFor(user, namespace); err == nil { if ownerRightsCover, _ := Covers(ownerRules, role.Rules()); ownerRightsCover { return nil } } } ownerRules, err := ruleResolver.RulesFor(user, namespace) if err != nil { // do not fail in this case. Rules are purely additive, so we can continue with a coverage check based on the rules we have glog.V(1).Infof("non-fatal error getting rules for %v: %v", user, err) ruleResolutionErrors = append(ruleResolutionErrors, err) } ownerRightsCover, missingRights := Covers(ownerRules, role.Rules()) if ownerRightsCover { return nil } // determine what resources the user is missing if compactedMissingRights, err := CompactRules(missingRights); err == nil { missingRights = compactedMissingRights } missingRightsStrings := make([]string, 0, len(missingRights)) for _, missingRight := range missingRights { missingRightsStrings = append(missingRightsStrings, missingRight.CompactString()) } sort.Strings(missingRightsStrings) var internalErr error if len(ruleResolutionErrors) > 0 { internalErr = fmt.Errorf("user %q cannot grant extra privileges:\n%v\nrule resolution errors: %v)", user.GetName(), strings.Join(missingRightsStrings, "\n"), ruleResolutionErrors) } else { internalErr = fmt.Errorf("user %q cannot grant extra privileges:\n%v", user.GetName(), strings.Join(missingRightsStrings, "\n")) } return kapierrors.NewForbidden(resource, name, internalErr) }
func TestStatusAdmitsRouteOnForbidden(t *testing.T) { now := nowFn() nowFn = func() unversioned.Time { return now } touched := unversioned.Time{Time: now.Add(-time.Minute)} p := &fakePlugin{} c := testclient.NewSimpleFake(&(errors.NewForbidden(kapi.Resource("Route"), "route1", nil).ErrStatus)) admitter := NewStatusAdmitter(p, c, "test") err := admitter.HandleRoute(watch.Added, &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}, Spec: routeapi.RouteSpec{Host: "route1.test.local"}, Status: routeapi.RouteStatus{ Ingress: []routeapi.RouteIngress{ { Host: "route2.test.local", RouterName: "test", Conditions: []routeapi.RouteIngressCondition{ { Type: routeapi.RouteAdmitted, Status: kapi.ConditionTrue, LastTransitionTime: &touched, }, }, }, }, }, }) checkResult(t, err, c, admitter, "route1.test.local", now, &touched.Time, 0, 0) }
// Admit will deny any pod that defines AntiAffinity topology key other than unversioned.LabelHostname i.e. "kubernetes.io/hostname" // in requiredDuringSchedulingRequiredDuringExecution and requiredDuringSchedulingIgnoredDuringExecution. func (p *plugin) Admit(attributes admission.Attributes) (err error) { if attributes.GetResource().GroupResource() != api.Resource("pods") { return nil } pod, ok := attributes.GetObject().(*api.Pod) if !ok { return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted") } affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations) if err != nil { // this is validated later return nil } if affinity.PodAntiAffinity != nil { var podAntiAffinityTerms []api.PodAffinityTerm if len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { podAntiAffinityTerms = affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution } // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //if len(affinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { // podAntiAffinityTerms = append(podAntiAffinityTerms, affinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} for _, v := range podAntiAffinityTerms { if v.TopologyKey != unversioned.LabelHostname { return apierrors.NewForbidden(attributes.GetResource().GroupResource(), pod.Name, fmt.Errorf("affinity.PodAntiAffinity.RequiredDuringScheduling has TopologyKey %v but only key %v is allowed", v.TopologyKey, unversioned.LabelHostname)) } } } return nil }
func TestStatusAdmitsRouteOnForbidden(t *testing.T) { now := nowFn() nowFn = func() unversioned.Time { return now } touched := unversioned.Time{Time: now.Add(-time.Minute)} p := &fakePlugin{} c := testclient.NewSimpleFake(&routeapi.Route{ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}}) c.PrependReactor("update", "routes", func(action core.Action) (handled bool, ret runtime.Object, err error) { if action.GetSubresource() != "status" { return false, nil, nil } return true, nil, errors.NewForbidden(kapi.Resource("Route"), "route1", nil) }) admitter := NewStatusAdmitter(p, c, "test") err := admitter.HandleRoute(watch.Added, &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}, Spec: routeapi.RouteSpec{Host: "route1.test.local"}, Status: routeapi.RouteStatus{ Ingress: []routeapi.RouteIngress{ { Host: "route2.test.local", RouterName: "test", Conditions: []routeapi.RouteIngressCondition{ { Type: routeapi.RouteAdmitted, Status: kapi.ConditionTrue, LastTransitionTime: &touched, }, }, }, }, }, }) checkResult(t, err, c, admitter, "route1.test.local", now, &touched.Time, 0, 0) }
func (s *Storage) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) { if u, ok := api.UserFrom(ctx); ok { if s.superUser != "" && u.GetName() == s.superUser { return s.StandardStorage.Create(ctx, obj) } // system:masters is special because the API server uses it for privileged loopback connections // therefore we know that a member of system:masters can always do anything for _, group := range u.GetGroups() { if group == user.SystemPrivilegedGroup { return s.StandardStorage.Create(ctx, obj) } } } clusterRoleBinding := obj.(*rbac.ClusterRoleBinding) rules, err := s.ruleResolver.GetRoleReferenceRules(ctx, clusterRoleBinding.RoleRef, clusterRoleBinding.Namespace) if err != nil { return nil, err } if err := validation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, clusterRoleBinding.Name, err) } return s.StandardStorage.Create(ctx, obj) }
func (l *lifecycle) Admit(a admission.Attributes) (err error) { // prevent deletion of immortal namespaces if a.GetOperation() == admission.Delete && a.GetKind().GroupKind() == api.Kind("Namespace") && l.immortalNamespaces.Has(a.GetName()) { return errors.NewForbidden(a.GetResource().GroupResource(), a.GetName(), fmt.Errorf("this namespace may not be deleted")) } // if we're here, then we've already passed authentication, so we're allowed to do what we're trying to do // if we're here, then the API server has found a route, which means that if we have a non-empty namespace // its a namespaced resource. if len(a.GetNamespace()) == 0 || a.GetKind().GroupKind() == api.Kind("Namespace") { // if a namespace is deleted, we want to prevent all further creates into it // while it is undergoing termination. to reduce incidences where the cache // is slow to update, we forcefully remove the namespace from our local cache. // this will cause a live lookup of the namespace to get its latest state even // before the watch notification is received. if a.GetOperation() == admission.Delete { l.store.Delete(&api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: a.GetName(), }, }) } return nil } namespaceObj, exists, err := l.store.Get(&api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: a.GetNamespace(), Namespace: "", }, }) if err != nil { return errors.NewInternalError(err) } // refuse to operate on non-existent namespaces if !exists { // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not namespaceObj, err = l.client.Core().Namespaces().Get(a.GetNamespace()) if err != nil { if errors.IsNotFound(err) { return err } return errors.NewInternalError(err) } } // ensure that we're not trying to create objects in terminating namespaces if a.GetOperation() == admission.Create { namespace := namespaceObj.(*api.Namespace) if namespace.Status.Phase != api.NamespaceTerminating { return nil } // TODO: This should probably not be a 403 return admission.NewForbidden(a, fmt.Errorf("Unable to create new content in namespace %s because it is being terminated.", a.GetNamespace())) } return nil }
// forbidden renders a simple forbidden error func forbidden(reason string, attributes authorizer.AuthorizationAttributes, w http.ResponseWriter, req *http.Request) { kind := "" name := "" // the attributes can be empty for two basic reasons: // 1. malformed API request // 2. not an API request at all // In these cases, just assume default that will work better than nothing if attributes != nil { kind = attributes.GetResource() if len(attributes.GetAPIGroup()) > 0 { kind = attributes.GetAPIGroup() + "." + kind } name = attributes.GetResourceName() } // Reason is an opaque string that describes why access is allowed or forbidden (forbidden by the time we reach here). // We don't have direct access to kind or name (not that those apply either in the general case) // We create a NewForbidden to stay close the API, but then we override the message to get a serialization // that makes sense when a human reads it. forbiddenError, _ := kapierrors.NewForbidden(unversioned.GroupResource{Group: attributes.GetAPIGroup(), Resource: attributes.GetResource()}, name, errors.New("") /*discarded*/).(*kapierrors.StatusError) forbiddenError.ErrStatus.Message = reason formatted := &bytes.Buffer{} output, err := runtime.Encode(kapi.Codecs.LegacyCodec(kapi.SchemeGroupVersion), &forbiddenError.ErrStatus) if err != nil { fmt.Fprintf(formatted, "%s", forbiddenError.Error()) } else { json.Indent(formatted, output, "", " ") } w.Header().Set("Content-Type", restful.MIME_JSON) w.WriteHeader(http.StatusForbidden) w.Write(formatted.Bytes()) }
// Admit will deny any pod that defines AntiAffinity topology key other than metav1.LabelHostname i.e. "kubernetes.io/hostname" // in requiredDuringSchedulingRequiredDuringExecution and requiredDuringSchedulingIgnoredDuringExecution. func (p *plugin) Admit(attributes admission.Attributes) (err error) { // Ignore all calls to subresources or resources other than pods. if len(attributes.GetSubresource()) != 0 || attributes.GetResource().GroupResource() != api.Resource("pods") { return nil } pod, ok := attributes.GetObject().(*api.Pod) if !ok { return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted") } affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations) if err != nil { glog.V(5).Infof("Invalid Affinity detected, but we will leave handling of this to validation phase") return nil } if affinity != nil && affinity.PodAntiAffinity != nil { var podAntiAffinityTerms []api.PodAffinityTerm if len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { podAntiAffinityTerms = affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution } // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //if len(affinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { // podAntiAffinityTerms = append(podAntiAffinityTerms, affinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} for _, v := range podAntiAffinityTerms { if v.TopologyKey != metav1.LabelHostname { return apierrors.NewForbidden(attributes.GetResource().GroupResource(), pod.Name, fmt.Errorf("affinity.PodAntiAffinity.RequiredDuringScheduling has TopologyKey %v but only key %v is allowed", v.TopologyKey, metav1.LabelHostname)) } } } return nil }
// isAllowed checks to see if the current user has rights to issue a LocalSubjectAccessReview on the namespace they're attempting to access func (r *REST) isAllowed(ctx kapi.Context, rar *authorizationapi.ResourceAccessReview) error { localRARAttributes := authorizer.DefaultAuthorizationAttributes{ Verb: "create", Resource: "localresourceaccessreviews", } allowed, reason, err := r.authorizer.Authorize(kapi.WithNamespace(ctx, rar.Action.Namespace), localRARAttributes) if err != nil { return kapierrors.NewForbidden(authorizationapi.Resource(localRARAttributes.GetResource()), localRARAttributes.GetResourceName(), err) } if !allowed { forbiddenError := kapierrors.NewForbidden(authorizationapi.Resource(localRARAttributes.GetResource()), localRARAttributes.GetResourceName(), errors.New("") /*discarded*/) forbiddenError.ErrStatus.Message = reason return forbiddenError } return nil }
// List retrieves a list of Projects that match label. func (s *REST) List(ctx kapi.Context, label labels.Selector, field fields.Selector) (runtime.Object, error) { user, ok := kapi.UserFrom(ctx) if !ok { return nil, kerrors.NewForbidden("Project", "", fmt.Errorf("unable to list projects without a user on the context")) } namespaceList, err := s.lister.List(user) if err != nil { return nil, err } return convertNamespaceList(namespaceList), nil }
// Validate validates a new image stream. func (s Strategy) Validate(ctx kapi.Context, obj runtime.Object) fielderrors.ValidationErrorList { stream := obj.(*api.ImageStream) user, ok := kapi.UserFrom(ctx) if !ok { return fielderrors.ValidationErrorList{kerrors.NewForbidden("imageStream", stream.Name, fmt.Errorf("unable to update an ImageStream without a user on the context"))} } errs := s.tagVerifier.Verify(nil, stream, user) errs = append(errs, s.tagsChanged(nil, stream)...) errs = append(errs, validation.ValidateImageStream(stream)...) return errs }
// NewForbidden is a utility function to return a well-formatted admission control error response func NewForbidden(a Attributes, internalError error) error { // do not double wrap an error of same type if apierrors.IsForbidden(internalError) { return internalError } name, kind, err := extractKindName(a) if err != nil { return apierrors.NewInternalError(utilerrors.NewAggregate([]error{internalError, err})) } return apierrors.NewForbidden(kind.Kind, name, internalError) }
func (v *limitVerifier) VerifyLimits(namespace string, is *imageapi.ImageStream) error { limits, err := v.limiter.LimitsForNamespace(namespace) if err != nil || len(limits) == 0 { return err } usage := GetImageStreamUsage(is) if err := verifyImageStreamUsage(usage, limits); err != nil { return kapierrors.NewForbidden(imageapi.Resource("ImageStream"), is.Name, err) } return nil }
func (l *lifecycle) Admit(a admission.Attributes) (err error) { // prevent deletion of immortal namespaces if a.GetOperation() == admission.Delete && a.GetKind() == "Namespace" && l.immortalNamespaces.Has(a.GetName()) { return errors.NewForbidden(a.GetKind(), a.GetName(), fmt.Errorf("this namespace may not be deleted")) } gvk, err := api.RESTMapper.KindFor(a.GetResource()) if err != nil { return errors.NewInternalError(err) } mapping, err := api.RESTMapper.RESTMapping(gvk.GroupKind(), gvk.Version) if err != nil { return errors.NewInternalError(err) } if mapping.Scope.Name() != meta.RESTScopeNameNamespace { return nil } namespaceObj, exists, err := l.store.Get(&api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: a.GetNamespace(), Namespace: "", }, }) if err != nil { return errors.NewInternalError(err) } // refuse to operate on non-existent namespaces if !exists { // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not namespaceObj, err = l.client.Namespaces().Get(a.GetNamespace()) if err != nil { if errors.IsNotFound(err) { return err } return errors.NewInternalError(err) } } // ensure that we're not trying to create objects in terminating namespaces if a.GetOperation() == admission.Create { namespace := namespaceObj.(*api.Namespace) if namespace.Status.Phase != api.NamespaceTerminating { return nil } // TODO: This should probably not be a 403 return admission.NewForbidden(a, fmt.Errorf("Unable to create new content in namespace %s because it is being terminated.", a.GetNamespace())) } return nil }
func (s *Storage) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) { if rbacregistry.EscalationAllowed(ctx, s.superUser) { return s.StandardStorage.Create(ctx, obj) } clusterRole := obj.(*rbac.ClusterRole) rules := clusterRole.Rules if err := validation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, clusterRole.Name, err) } return s.StandardStorage.Create(ctx, obj) }
// Admit enforces that pod and its project node label selectors matches at least a node in the cluster. func (p *podNodeEnvironment) Admit(a admission.Attributes) (err error) { // ignore anything except create or update of pods if !(a.GetOperation() == admission.Create || a.GetOperation() == admission.Update) { return nil } resource := a.GetResource() if resource != "pods" { return nil } obj := a.GetObject() pod, ok := obj.(*kapi.Pod) if !ok { return nil } name := pod.Name projects, err := projectcache.GetProjectCache() if err != nil { return err } namespace, err := projects.GetNamespaceObject(a.GetNamespace()) if err != nil { return apierrors.NewForbidden(resource, name, err) } projectNodeSelector, err := projects.GetNodeSelectorMap(namespace) if err != nil { return err } if labelselector.Conflicts(projectNodeSelector, pod.Spec.NodeSelector) { return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its project node label selector")) } // modify pod node selector = project node selector + current pod node selector pod.Spec.NodeSelector = labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector) return nil }
// Admit enforces that pod and its project node label selectors matches at least a node in the cluster. func (p *podNodeEnvironment) Admit(a admission.Attributes) (err error) { resource := a.GetResource() if resource != "pods" { return nil } if a.GetSubresource() != "" { // only run the checks below on pods proper and not subresources return nil } obj := a.GetObject() pod, ok := obj.(*kapi.Pod) if !ok { return nil } name := pod.Name projects, err := projectcache.GetProjectCache() if err != nil { return err } namespace, err := projects.GetNamespaceObject(a.GetNamespace()) if err != nil { return apierrors.NewForbidden(resource, name, err) } projectNodeSelector, err := projects.GetNodeSelectorMap(namespace) if err != nil { return err } if labelselector.Conflicts(projectNodeSelector, pod.Spec.NodeSelector) { return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its project node label selector")) } // modify pod node selector = project node selector + current pod node selector pod.Spec.NodeSelector = labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector) return nil }
func TestErrors(t *testing.T) { oc, _, _ := NewErrorClients(errors.NewNotFound(deployapi.Resource("DeploymentConfigList"), "")) _, err := oc.DeploymentConfigs("test").List(kapi.ListOptions{}) if !errors.IsNotFound(err) { t.Fatalf("unexpected error: %v", err) } oc, _, _ = NewErrorClients(errors.NewForbidden(deployapi.Resource("DeploymentConfigList"), "", nil)) _, err = oc.DeploymentConfigs("test").List(kapi.ListOptions{}) if !errors.IsForbidden(err) { t.Fatalf("unexpected error: %v", err) } }
func (s *Storage) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) { if user, ok := api.UserFrom(ctx); ok { if s.superUser != "" && user.GetName() == s.superUser { return s.StandardStorage.Create(ctx, obj) } } clusterRole := obj.(*rbac.ClusterRole) rules := clusterRole.Rules if err := validation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, clusterRole.Name, err) } return s.StandardStorage.Create(ctx, obj) }
// NewForbidden is a utility function to return a well-formatted admission control error response func NewForbidden(a Attributes, internalError error) error { // do not double wrap an error of same type if apierrors.IsForbidden(internalError) { return internalError } name := "Unknown" kind := a.GetKind() obj := a.GetObject() if obj != nil { objectMeta, err := api.ObjectMetaFor(obj) if err != nil { return apierrors.NewForbidden(kind, name, internalError) } // this is necessary because name object name generation has not occurred yet if len(objectMeta.Name) > 0 { name = objectMeta.Name } else if len(objectMeta.GenerateName) > 0 { name = objectMeta.GenerateName } } return apierrors.NewForbidden(kind, name, internalError) }
func (s *Storage) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) { if rbacregistry.EscalationAllowed(ctx) { return s.StandardStorage.Create(ctx, obj) } clusterRoleBinding := obj.(*rbac.ClusterRoleBinding) rules, err := s.ruleResolver.GetRoleReferenceRules(clusterRoleBinding.RoleRef, clusterRoleBinding.Namespace) if err != nil { return nil, err } if err := validation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, clusterRoleBinding.Name, err) } return s.StandardStorage.Create(ctx, obj) }
func TestRunTag_AddRestricted(t *testing.T) { client := testclient.NewSimpleFake() client.PrependReactor("create", "imagestreamtags", func(action ktc.Action) (handled bool, ret runtime.Object, err error) { return true, action.(ktc.CreateAction).GetObject(), nil }) client.PrependReactor("update", "imagestreamtags", func(action ktc.Action) (handled bool, ret runtime.Object, err error) { return true, nil, kapierrors.NewForbidden(imageapi.Resource("imagestreamtags"), "rails:tip", fmt.Errorf("dne")) }) test := struct { opts *TagOptions expectedActions []testAction expectedErr error }{ opts: &TagOptions{ out: os.Stdout, osClient: client, ref: imageapi.DockerImageReference{ Namespace: "openshift", Name: "ruby", Tag: "2.0", }, sourceKind: "ImageStreamTag", destNamespace: []string{"yourproject"}, destNameAndTag: []string{"rails:tip"}, }, expectedActions: []testAction{ {verb: "update", resource: "imagestreamtags"}, {verb: "create", resource: "imagestreamtags"}, }, expectedErr: nil, } if err := test.opts.RunTag(); err != test.expectedErr { t.Fatalf("error mismatch: expected %v, got %v", test.expectedErr, err) } got := client.Actions() if len(test.expectedActions) != len(got) { t.Fatalf("action length mismatch: expectedc %d, got %d", len(test.expectedActions), len(got)) } for i, action := range test.expectedActions { if !got[i].Matches(action.verb, action.resource) { t.Errorf("action mismatch: expected %s %s, got %s %s", action.verb, action.resource, got[i].GetVerb(), got[i].GetResource()) } } }
func TestRunTag_DeleteOld(t *testing.T) { streams := testData() client := testclient.NewSimpleFake(streams[1]) client.PrependReactor("delete", "imagestreamtags", func(action ktc.Action) (handled bool, ret runtime.Object, err error) { return true, nil, kapierrors.NewForbidden(imageapi.Resource("imagestreamtags"), "rails:tip", fmt.Errorf("dne")) }) client.PrependReactor("get", "imagestreams", func(action ktc.Action) (handled bool, ret runtime.Object, err error) { return true, testData()[1], nil }) client.PrependReactor("update", "imagestreams", func(action ktc.Action) (handled bool, ret runtime.Object, err error) { return true, nil, nil }) test := struct { opts *TagOptions expectedActions []testAction expectedErr error }{ opts: &TagOptions{ out: os.Stdout, osClient: client, deleteTag: true, destNamespace: []string{"yourproject"}, destNameAndTag: []string{"rails:tip"}, }, expectedActions: []testAction{ {verb: "delete", resource: "imagestreamtags"}, {verb: "get", resource: "imagestreams"}, {verb: "update", resource: "imagestreams"}, }, expectedErr: nil, } if err := test.opts.RunTag(); err != test.expectedErr { t.Fatalf("error mismatch: expected %v, got %v", test.expectedErr, err) } got := client.Actions() if len(test.expectedActions) != len(got) { t.Fatalf("action length mismatch: expectedc %d, got %d", len(test.expectedActions), len(got)) } for i, action := range test.expectedActions { if !got[i].Matches(action.verb, action.resource) { t.Errorf("action mismatch: expected %s %s, got %s %s", action.verb, action.resource, got[i].GetVerb(), got[i].GetResource()) } } }
func TestStatusAdmitsRouteOnForbidden(t *testing.T) { now := nowFn() nowFn = func() unversioned.Time { return now } touched := unversioned.Time{Time: now.Add(-time.Minute)} p := &fakePlugin{} c := testclient.NewSimpleFake(&(errors.NewForbidden(kapi.Resource("Route"), "route1", nil).(*errors.StatusError).ErrStatus)) admitter := NewStatusAdmitter(p, c, "test") err := admitter.HandleRoute(watch.Added, &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{Name: "route1", Namespace: "default", UID: types.UID("uid1")}, Spec: routeapi.RouteSpec{Host: "route1.test.local"}, Status: routeapi.RouteStatus{ Ingress: []routeapi.RouteIngress{ { Host: "route2.test.local", RouterName: "test", Conditions: []routeapi.RouteIngressCondition{ { Type: routeapi.RouteAdmitted, Status: kapi.ConditionTrue, LastTransitionTime: &touched, }, }, }, }, }, }) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(c.Actions()) != 1 { t.Fatalf("unexpected actions: %#v", c.Actions()) } action := c.Actions()[0] if action.GetVerb() != "update" || action.GetResource() != "routes" || action.GetSubresource() != "status" { t.Fatalf("unexpected action: %#v", action) } obj := c.Actions()[0].(ktestclient.UpdateAction).GetObject().(*routeapi.Route) if len(obj.Status.Ingress) != 1 || obj.Status.Ingress[0].Host != "route1.test.local" { t.Fatalf("expected route reset: %#v", obj) } condition := obj.Status.Ingress[0].Conditions[0] if condition.LastTransitionTime == nil || *condition.LastTransitionTime != touched || condition.Status != kapi.ConditionTrue || condition.Reason != "" { t.Fatalf("unexpected condition: %#v", condition) } if v, ok := admitter.expected.Peek(types.UID("uid1")); !ok || !reflect.DeepEqual(v, touched.Time) { t.Fatalf("did not record last modification time: %#v %#v", admitter.expected, v) } }
// List retrieves a list of Projects that match label. func (s *REST) List(ctx kapi.Context, options *kapi.ListOptions) (runtime.Object, error) { user, ok := kapi.UserFrom(ctx) if !ok { return nil, kerrors.NewForbidden(projectapi.Resource("project"), "", fmt.Errorf("unable to list projects without a user on the context")) } namespaceList, err := s.lister.List(user) if err != nil { return nil, err } m := nsregistry.MatchNamespace(oapi.ListOptionsToSelectors(options)) list, err := filterList(namespaceList, m, nil) if err != nil { return nil, err } return convertNamespaceList(list.(*kapi.NamespaceList)), nil }
func ConfirmNoEscalation(ctx kapi.Context, resource unversioned.GroupResource, name string, ruleResolver AuthorizationRuleResolver, role authorizationinterfaces.Role) error { ruleResolutionErrors := []error{} ownerLocalRules, err := ruleResolver.GetEffectivePolicyRules(ctx) if err != nil { // do not fail in this case. Rules are purely additive, so we can continue with a coverage check based on the rules we have user, _ := kapi.UserFrom(ctx) glog.V(1).Infof("non-fatal error getting local rules for %v: %v", user, err) ruleResolutionErrors = append(ruleResolutionErrors, err) } masterContext := kapi.WithNamespace(ctx, "") ownerGlobalRules, err := ruleResolver.GetEffectivePolicyRules(masterContext) if err != nil { // do not fail in this case. Rules are purely additive, so we can continue with a coverage check based on the rules we have user, _ := kapi.UserFrom(ctx) glog.V(1).Infof("non-fatal error getting global rules for %v: %v", user, err) ruleResolutionErrors = append(ruleResolutionErrors, err) } ownerRules := make([]authorizationapi.PolicyRule, 0, len(ownerGlobalRules)+len(ownerLocalRules)) ownerRules = append(ownerRules, ownerLocalRules...) ownerRules = append(ownerRules, ownerGlobalRules...) ownerRightsCover, missingRights := Covers(ownerRules, role.Rules()) if !ownerRightsCover { if compactedMissingRights, err := CompactRules(missingRights); err == nil { missingRights = compactedMissingRights } missingRightsStrings := make([]string, 0, len(missingRights)) for _, missingRight := range missingRights { missingRightsStrings = append(missingRightsStrings, missingRight.CompactString()) } sort.Strings(missingRightsStrings) user, _ := kapi.UserFrom(ctx) var internalErr error if len(ruleResolutionErrors) > 0 { internalErr = fmt.Errorf("user %q cannot grant extra privileges:\n%v\nrule resolution errors: %v)", user.GetName(), strings.Join(missingRightsStrings, "\n"), ruleResolutionErrors) } else { internalErr = fmt.Errorf("user %q cannot grant extra privileges:\n%v", user.GetName(), strings.Join(missingRightsStrings, "\n")) } return kapierrors.NewForbidden(resource, name, internalErr) } return nil }
func (s *Storage) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) { if user, ok := api.UserFrom(ctx); ok { if s.superUser != "" && user.GetName() == s.superUser { return s.StandardStorage.Create(ctx, obj) } } roleBinding := obj.(*rbac.RoleBinding) rules, err := s.ruleResolver.GetRoleReferenceRules(ctx, roleBinding.RoleRef, roleBinding.Namespace) if err != nil { return nil, err } if err := validation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, roleBinding.Name, err) } return s.StandardStorage.Create(ctx, obj) }
func (s *Storage) Update(ctx api.Context, name string, obj rest.UpdatedObjectInfo) (runtime.Object, bool, error) { if rbacregistry.EscalationAllowed(ctx, s.superUser) { return s.StandardStorage.Update(ctx, name, obj) } nonEscalatingInfo := wrapUpdatedObjectInfo(obj, func(ctx api.Context, obj runtime.Object, oldObj runtime.Object) (runtime.Object, error) { clusterRole := obj.(*rbac.ClusterRole) rules := clusterRole.Rules if err := validation.ConfirmNoEscalation(ctx, s.ruleResolver, rules); err != nil { return nil, errors.NewForbidden(groupResource, clusterRole.Name, err) } return obj, nil }) return s.StandardStorage.Update(ctx, name, nonEscalatingInfo) }