// createSchedulerServiceIfNeeded will create the specified service if it // doesn't already exist. func (m *SchedulerServer) createSchedulerServiceIfNeeded(serviceName string, servicePort int) error { ctx := api.NewDefaultContext() if _, err := m.client.Services(api.NamespaceValue(ctx)).Get(serviceName); err == nil { // The service already exists. return nil } svc := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: api.NamespaceDefault, Labels: map[string]string{"provider": "k8sm", "component": "scheduler"}, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}}, // maintained by this code, not by the pod selector Selector: nil, SessionAffinity: api.ServiceAffinityNone, }, } if m.ServiceAddress != nil { svc.Spec.ClusterIP = m.ServiceAddress.String() } _, err := m.client.Services(api.NamespaceValue(ctx)).Create(svc) if err != nil && errors.IsAlreadyExists(err) { err = nil } return err }
// Create transforms a LocalSAR into an ClusterSAR that is requesting a namespace. That collapses the code paths. // LocalSubjectAccessReview exists to allow clean expression of policy. func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { localSAR, ok := obj.(*authorizationapi.LocalSubjectAccessReview) if !ok { return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a localSubjectAccessReview: %#v", obj)) } if errs := authorizationvalidation.ValidateLocalSubjectAccessReview(localSAR); len(errs) > 0 { return nil, kapierrors.NewInvalid(authorizationapi.Kind(localSAR.Kind), "", errs) } if namespace := kapi.NamespaceValue(ctx); len(namespace) == 0 { return nil, kapierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace)) } else if (len(localSAR.Action.Namespace) > 0) && (namespace != localSAR.Action.Namespace) { return nil, field.Invalid(field.NewPath("namespace"), localSAR.Action.Namespace, fmt.Sprintf("namespace must be: %v", namespace)) } // transform this into a SubjectAccessReview clusterSAR := &authorizationapi.SubjectAccessReview{ Action: localSAR.Action, User: localSAR.User, Groups: localSAR.Groups, Scopes: localSAR.Scopes, } clusterSAR.Action.Namespace = kapi.NamespaceValue(ctx) return r.clusterSARRegistry.CreateSubjectAccessReview(kapi.WithNamespace(ctx, ""), clusterSAR) }
func (t *Tester) testListFound(obj runtime.Object, assignFn AssignFunc) { ctx := t.TestContext() foo1 := copyOrDie(obj) foo1Meta := t.getObjectMetaOrFail(foo1) foo1Meta.Name = "foo1" foo1Meta.Namespace = api.NamespaceValue(ctx) foo2 := copyOrDie(obj) foo2Meta := t.getObjectMetaOrFail(foo2) foo2Meta.Name = "foo2" foo2Meta.Namespace = api.NamespaceValue(ctx) existing := assignFn([]runtime.Object{foo1, foo2}) listObj, err := t.storage.(rest.Lister).List(ctx, labels.Everything(), fields.Everything()) if err != nil { t.Errorf("unexpected error: %v", err) } items, err := listToItems(listObj) if err != nil { t.Errorf("unexpected error: %v", err) } if len(items) != len(existing) { t.Errorf("unexpected number of items: %v", len(items)) } if !api.Semantic.DeepEqual(existing, items) { t.Errorf("expected: %#v, got: %#v", existing, items) } }
// setEndpoints sets the endpoints for the given service. // in a multi-master scenario only the master will be publishing an endpoint. // see SchedulerServer.bootstrap. func (m *SchedulerServer) setEndpoints(serviceName string, ip net.IP, port int) error { // The setting we want to find. want := []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: ip.String()}}, Ports: []api.EndpointPort{{Port: port, Protocol: api.ProtocolTCP}}, }} ctx := api.NewDefaultContext() e, err := m.client.Endpoints(api.NamespaceValue(ctx)).Get(serviceName) createOrUpdate := m.client.Endpoints(api.NamespaceValue(ctx)).Update if err != nil { if errors.IsNotFound(err) { createOrUpdate = m.client.Endpoints(api.NamespaceValue(ctx)).Create } e = &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: api.NamespaceDefault, }, } } if !reflect.DeepEqual(e.Subsets, want) { e.Subsets = want glog.Infof("setting endpoints for master service %q to %#v", serviceName, e) _, err = createOrUpdate(e) return err } // We didn't make any changes, no need to actually call update. return nil }
func (t *Tester) testListMatchLabels(obj runtime.Object, assignFn AssignFunc) { ctx := t.TestContext() testLabels := map[string]string{"key": "value"} foo1 := copyOrDie(obj) foo1Meta := t.getObjectMetaOrFail(foo1) foo1Meta.Name = "foo1" foo1Meta.Namespace = api.NamespaceValue(ctx) foo2 := copyOrDie(obj) foo2Meta := t.getObjectMetaOrFail(foo2) foo2Meta.Name = "foo2" foo2Meta.Namespace = api.NamespaceValue(ctx) foo2Meta.Labels = testLabels existing := assignFn([]runtime.Object{foo1, foo2}) filtered := []runtime.Object{existing[1]} selector := labels.SelectorFromSet(labels.Set(testLabels)) listObj, err := t.storage.(rest.Lister).List(ctx, selector, fields.Everything()) if err != nil { t.Errorf("unexpected error: %v", err) } items, err := listToItems(listObj) if err != nil { t.Errorf("unexpected error: %v", err) } if len(items) != len(filtered) { t.Errorf("unexpected number of items: %v", len(items)) } if !api.Semantic.DeepEqual(filtered, items) { t.Errorf("expected: %#v, got: %#v", filtered, items) } }
// ensurePolicyBindingToMaster returns a PolicyBinding object that has a PolicyRef pointing to the Policy in the passed namespace. func (m *VirtualStorage) ensurePolicyBindingToMaster(ctx kapi.Context, policyNamespace, policyBindingName string) (*authorizationapi.PolicyBinding, error) { policyBinding, err := m.BindingRegistry.GetPolicyBinding(ctx, policyBindingName) if err != nil { if !kapierrors.IsNotFound(err) { return nil, err } // if we have no policyBinding, go ahead and make one. creating one here collapses code paths below. We only take this hit once policyBinding = policybindingregistry.NewEmptyPolicyBinding(kapi.NamespaceValue(ctx), policyNamespace, policyBindingName) if err := m.BindingRegistry.CreatePolicyBinding(ctx, policyBinding); err != nil { return nil, err } policyBinding, err = m.BindingRegistry.GetPolicyBinding(ctx, policyBindingName) if err != nil { return nil, err } } if policyBinding.RoleBindings == nil { policyBinding.RoleBindings = make(map[string]*authorizationapi.RoleBinding) } return policyBinding, nil }
// Create registers a given new ResourceAccessReview instance to r.registry. func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { resourceAccessReview, ok := obj.(*authorizationapi.ResourceAccessReview) if !ok { return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a resourceAccessReview: %#v", obj)) } if errs := authorizationvalidation.ValidateResourceAccessReview(resourceAccessReview); len(errs) > 0 { return nil, kapierrors.NewInvalid(authorizationapi.Kind(resourceAccessReview.Kind), "", errs) } // if a namespace is present on the request, then the namespace on the on the RAR is overwritten. // This is to support backwards compatibility. To have gotten here in this state, it means that // the authorizer decided that a user could run an RAR against this namespace if namespace := kapi.NamespaceValue(ctx); len(namespace) > 0 { resourceAccessReview.Action.Namespace = namespace } else if err := r.isAllowed(ctx, resourceAccessReview); err != nil { // this check is mutually exclusive to the condition above. localSAR and localRAR both clear the namespace before delegating their calls // We only need to check if the RAR is allowed **again** if the authorizer didn't already approve the request for a legacy call. return nil, err } requestContext := kapi.WithNamespace(ctx, resourceAccessReview.Action.Namespace) attributes := authorizer.ToDefaultAuthorizationAttributes(resourceAccessReview.Action) users, groups, err := r.authorizer.GetAllowedSubjects(requestContext, attributes) response := &authorizationapi.ResourceAccessReviewResponse{ Namespace: resourceAccessReview.Action.Namespace, Users: users, Groups: groups, } if err != nil { response.EvaluationError = err.Error() } return response, nil }
// ListClusterPolicyBindings obtains list of clusterPolicyBindings that match a selector. func (r *ClusterPolicyBindingRegistry) ListClusterPolicyBindings(ctx kapi.Context, label labels.Selector, field fields.Selector) (*authorizationapi.ClusterPolicyBindingList, error) { if r.Err != nil { return nil, r.Err } namespace := kapi.NamespaceValue(ctx) list := make([]authorizationapi.ClusterPolicyBinding, 0) if namespace == kapi.NamespaceAll { for _, curr := range r.ClusterPolicyBindings { for _, binding := range curr { list = append(list, binding) } } } else { if namespacedBindings, ok := r.ClusterPolicyBindings[namespace]; ok { for _, curr := range namespacedBindings { list = append(list, curr) } } } return &authorizationapi.ClusterPolicyBindingList{ Items: list, }, nil }
// ListPolicies obtains a list of policies that match a selector. func (r *PolicyRegistry) ListPolicies(ctx kapi.Context, label labels.Selector, field fields.Selector) (*authorizationapi.PolicyList, error) { if r.Err != nil { return nil, r.Err } namespace := kapi.NamespaceValue(ctx) list := make([]authorizationapi.Policy, 0) if namespace == kapi.NamespaceAll { for _, curr := range r.Policies { for _, policy := range curr { list = append(list, policy) } } } else { if namespacedPolicies, ok := r.Policies[namespace]; ok { for _, curr := range namespacedPolicies { list = append(list, curr) } } } return &authorizationapi.PolicyList{ Items: list, }, nil }
func GetEffectivePolicyRules(ctx kapi.Context, ruleResolver rulevalidation.AuthorizationRuleResolver, clusterPolicyGetter client.ClusterPolicyLister) ([]authorizationapi.PolicyRule, []error) { namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { return nil, []error{kapierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace))} } user, exists := kapi.UserFrom(ctx) if !exists { return nil, []error{kapierrors.NewBadRequest(fmt.Sprintf("user missing from context"))} } var errors []error var rules []authorizationapi.PolicyRule namespaceRules, err := ruleResolver.RulesFor(user, namespace) if err != nil { errors = append(errors, err) } for _, rule := range namespaceRules { rules = append(rules, rulevalidation.BreakdownRule(rule)...) } if scopes := user.GetExtra()[authorizationapi.ScopesKey]; len(scopes) > 0 { rules, err = filterRulesByScopes(rules, scopes, namespace, clusterPolicyGetter) if err != nil { return nil, []error{kapierrors.NewInternalError(err)} } } if compactedRules, err := rulevalidation.CompactRules(rules); err == nil { rules = compactedRules } sort.Sort(authorizationapi.SortableRuleSlice(rules)) return rules, errors }
func (t *Tester) testCreateEquals(obj runtime.Object, getFn GetFunc) { ctx := t.TestContext() foo := copyOrDie(obj) fooMeta := t.getObjectMetaOrFail(foo) fooMeta.Name = "foo2" fooMeta.Namespace = api.NamespaceValue(ctx) fooMeta.GenerateName = "" created, err := t.storage.(rest.Creater).Create(ctx, foo) if err != nil { t.Errorf("unexpected error: %v", err) } got, err := getFn(ctx, foo) if err != nil { t.Errorf("unexpected error: %v", err) } // Set resource version which might be unset in created object. createdMeta := t.getObjectMetaOrFail(created) gotMeta := t.getObjectMetaOrFail(got) createdMeta.ResourceVersion = gotMeta.ResourceVersion if e, a := created, got; !api.Semantic.DeepEqual(e, a) { t.Errorf("unexpected obj: %#v, expected %#v", e, a) } }
// TestValidNamespace validates that namespace rules are enforced on a resource prior to create or update func TestValidNamespace(t *testing.T) { ctx := api.NewDefaultContext() namespace, _ := api.NamespaceFrom(ctx) resource := api.ReplicationController{} if !api.ValidNamespace(ctx, &resource.ObjectMeta) { t.Errorf("expected success") } if namespace != resource.Namespace { t.Errorf("expected resource to have the default namespace assigned during validation") } resource = api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: "other"}} if api.ValidNamespace(ctx, &resource.ObjectMeta) { t.Errorf("Expected error that resource and context errors do not match because resource has different namespace") } ctx = api.NewContext() if api.ValidNamespace(ctx, &resource.ObjectMeta) { t.Errorf("Expected error that resource and context errors do not match since context has no namespace") } ctx = api.NewContext() ns := api.NamespaceValue(ctx) if ns != "" { t.Errorf("Expected the empty string") } }
// Create registers a given new ResourceAccessReview instance to r.registry. func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { resourceAccessReview, ok := obj.(*authorizationapi.ResourceAccessReview) if !ok { return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a resourceAccessReview: %#v", obj)) } if err := kutilerrors.NewAggregate(authorizationvalidation.ValidateResourceAccessReview(resourceAccessReview)); err != nil { return nil, err } // if a namespace is present on the request, then the namespace on the on the RAR is overwritten. // This is to support backwards compatibility. To have gotten here in this state, it means that // the authorizer decided that a user could run an RAR against this namespace if namespace := kapi.NamespaceValue(ctx); len(namespace) > 0 { resourceAccessReview.Action.Namespace = namespace } if err := r.isAllowed(ctx, resourceAccessReview); err != nil { return nil, err } requestContext := kapi.WithNamespace(ctx, resourceAccessReview.Action.Namespace) attributes := authorizer.ToDefaultAuthorizationAttributes(resourceAccessReview.Action) users, groups, err := r.authorizer.GetAllowedSubjects(requestContext, attributes) if err != nil { return nil, err } response := &authorizationapi.ResourceAccessReviewResponse{ Namespace: resourceAccessReview.Action.Namespace, Users: users, Groups: groups, } return response, nil }
// ListClusterPolicies obtains list of ListClusterPolicy that match a selector. func (r *ClusterPolicyRegistry) ListClusterPolicies(ctx kapi.Context, options *kapi.ListOptions) (*authorizationapi.ClusterPolicyList, error) { if r.Err != nil { return nil, r.Err } namespace := kapi.NamespaceValue(ctx) list := make([]authorizationapi.ClusterPolicy, 0) if namespace == kapi.NamespaceAll { for _, curr := range r.clusterPolicies { for _, policy := range curr { list = append(list, policy) } } } else { if namespacedClusterPolicies, ok := r.clusterPolicies[namespace]; ok { for _, curr := range namespacedClusterPolicies { list = append(list, curr) } } } return &authorizationapi.ClusterPolicyList{ Items: list, }, nil }
// Create registers a given new ResourceAccessReview instance to r.registry. func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { resourceAccessReview, ok := obj.(*authorizationapi.ResourceAccessReview) if !ok { return nil, errors.NewBadRequest(fmt.Sprintf("not a resourceAccessReview: %#v", obj)) } if err := kutilerrors.NewAggregate(authorizationvalidation.ValidateResourceAccessReview(resourceAccessReview)); err != nil { return nil, err } namespace := kapi.NamespaceValue(ctx) attributes := &authorizer.DefaultAuthorizationAttributes{ Verb: resourceAccessReview.Verb, Resource: resourceAccessReview.Resource, } users, groups, err := r.authorizer.GetAllowedSubjects(ctx, attributes) if err != nil { return nil, err } response := &authorizationapi.ResourceAccessReviewResponse{ Namespace: namespace, Users: users, Groups: groups, } return response, nil }
// Create registers a given new ResourceAccessReview instance to r.registry. func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { rulesReview, ok := obj.(*authorizationapi.SubjectRulesReview) if !ok { return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a SubjectRulesReview: %#v", obj)) } namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { return nil, kapierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace)) } userToCheck := &user.DefaultInfo{ Name: rulesReview.Spec.User, Groups: rulesReview.Spec.Groups, Extra: map[string][]string{}, } if len(rulesReview.Spec.Scopes) > 0 { userToCheck.Extra[authorizationapi.ScopesKey] = rulesReview.Spec.Scopes } rules, errors := GetEffectivePolicyRules(kapi.WithUser(ctx, userToCheck), r.ruleResolver, r.clusterPolicyGetter) ret := &authorizationapi.SubjectRulesReview{ Status: authorizationapi.SubjectRulesReviewStatus{ Rules: rules, }, } if len(errors) != 0 { ret.Status.EvaluationError = kutilerrors.NewAggregate(errors).Error() } return ret, nil }
// EnsurePolicy returns the policy object for the specified namespace. If one does not exist, it is created for you. Permission to // create, update, or delete roles in a namespace implies the ability to create a Policy object itself. func (m *VirtualStorage) EnsurePolicy(ctx kapi.Context) (*authorizationapi.Policy, error) { policy, err := m.PolicyStorage.GetPolicy(ctx, authorizationapi.PolicyName) if err != nil { if !kapierrors.IsNotFound(err) { return nil, err } // if we have no policy, go ahead and make one. creating one here collapses code paths below. We only take this hit once policy = NewEmptyPolicy(kapi.NamespaceValue(ctx)) if err := m.PolicyStorage.CreatePolicy(ctx, policy); err != nil { return nil, err } policy, err = m.PolicyStorage.GetPolicy(ctx, authorizationapi.PolicyName) if err != nil { return nil, err } } if policy.Roles == nil { policy.Roles = make(map[string]*authorizationapi.Role) } return policy, nil }
func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { localSubjectAccessReview, ok := obj.(*authorizationapi.LocalSubjectAccessReview) if !ok { return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a LocaLocalSubjectAccessReview: %#v", obj)) } if errs := authorizationvalidation.ValidateLocalSubjectAccessReview(localSubjectAccessReview); len(errs) > 0 { return nil, kapierrors.NewInvalid(authorizationapi.Kind(localSubjectAccessReview.Kind), "", errs) } namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { return nil, kapierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace)) } if namespace != localSubjectAccessReview.Namespace { return nil, kapierrors.NewBadRequest(fmt.Sprintf("spec.resourceAttributes.namespace must match namespace: %v", namespace)) } authorizationAttributes := authorizationutil.AuthorizationAttributesFrom(localSubjectAccessReview.Spec) allowed, reason, evaluationErr := r.authorizer.Authorize(authorizationAttributes) localSubjectAccessReview.Status = authorizationapi.SubjectAccessReviewStatus{ Allowed: allowed, Reason: reason, } if evaluationErr != nil { localSubjectAccessReview.Status.EvaluationError = evaluationErr.Error() } return localSubjectAccessReview, nil }
// Bind just does a POST binding RPC. func (b *binder) Bind(binding *api.Binding) error { glog.V(2).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name) ctx := api.WithNamespace(api.NewContext(), binding.Namespace) return b.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error() // TODO: use Pods interface for binding once clusters are upgraded // return b.Pods(binding.Namespace).Bind(binding) }
// ListPolicyBindings obtains a list of policyBinding that match a selector. func (r *PolicyBindingRegistry) ListPolicyBindings(ctx kapi.Context, options *unversioned.ListOptions) (*authorizationapi.PolicyBindingList, error) { if r.Err != nil { return nil, r.Err } namespace := kapi.NamespaceValue(ctx) list := make([]authorizationapi.PolicyBinding, 0) if namespace == kapi.NamespaceAll { for _, curr := range r.PolicyBindings { for _, binding := range curr { list = append(list, binding) } } } else { if namespacedBindings, ok := r.PolicyBindings[namespace]; ok { for _, curr := range namespacedBindings { list = append(list, curr) } } } return &authorizationapi.PolicyBindingList{ Items: list, }, nil }
// reconciler action factory, performs explicit task reconciliation for non-terminal // tasks identified by annotations in the Kubernetes pod registry. func (k *KubernetesScheduler) makePodRegistryReconciler() ReconcilerAction { return ReconcilerAction(func(drv bindings.SchedulerDriver, cancel <-chan struct{}) <-chan error { ctx := api.NewDefaultContext() podList, err := k.client.Pods(api.NamespaceValue(ctx)).List(labels.Everything(), fields.Everything()) if err != nil { return proc.ErrorChanf("failed to reconcile pod registry: %v", err) } taskToSlave := make(map[string]string) for _, pod := range podList.Items { if len(pod.Annotations) == 0 { continue } taskId, found := pod.Annotations[meta.TaskIdKey] if !found { continue } slaveId, found := pod.Annotations[meta.SlaveIdKey] if !found { continue } taskToSlave[taskId] = slaveId } return proc.ErrorChan(k.explicitlyReconcileTasks(drv, taskToSlave, cancel)) }) }
func (ks *KubernetesScheduler) recoverTasks() error { ctx := api.NewDefaultContext() podList, err := ks.client.Pods(api.NamespaceValue(ctx)).List(labels.Everything(), fields.Everything()) if err != nil { log.V(1).Infof("failed to recover pod registry, madness may ensue: %v", err) return err } recoverSlave := func(t *podtask.T) { slaveId := t.Spec.SlaveID ks.slaves.checkAndAdd(slaveId, t.Offer.Host()) } for _, pod := range podList.Items { if t, ok, err := podtask.RecoverFrom(pod); err != nil { log.Errorf("failed to recover task from pod, will attempt to delete '%v/%v': %v", pod.Namespace, pod.Name, err) err := ks.client.Pods(pod.Namespace).Delete(pod.Name, nil) //TODO(jdef) check for temporary or not-found errors if err != nil { log.Errorf("failed to delete pod '%v/%v': %v", pod.Namespace, pod.Name, err) } } else if ok { ks.taskRegistry.Register(t, nil) recoverSlave(t) log.Infof("recovered task %v from pod %v/%v", t.ID, pod.Namespace, pod.Name) } } return nil }
func (t *Tester) testListMatchLabels(obj runtime.Object, assignFn AssignFunc) { ctx := t.TestContext() testLabels := map[string]string{"key": "value"} foo3 := copyOrDie(obj) t.setObjectMeta(foo3, "foo3") foo4 := copyOrDie(obj) foo4Meta := t.getObjectMetaOrFail(foo4) foo4Meta.Name = "foo4" foo4Meta.Namespace = api.NamespaceValue(ctx) foo4Meta.Labels = testLabels objs := ([]runtime.Object{foo3, foo4}) assignFn(objs) filtered := []runtime.Object{objs[1]} selector := labels.SelectorFromSet(labels.Set(testLabels)) options := &api.ListOptions{LabelSelector: selector} listObj, err := t.storage.(rest.Lister).List(ctx, options) if err != nil { t.Errorf("unexpected error: %v", err) } items, err := listToItems(listObj) if err != nil { t.Errorf("unexpected error: %v", err) } if len(items) != len(filtered) { t.Errorf("unexpected number of items: %v", len(items)) } if !api.Semantic.DeepEqual(filtered, items) { t.Errorf("expected: %#v, got: %#v", filtered, items) } }
func (a *DefaultRuleResolver) GetRoleBindings(ctx kapi.Context) ([]authorizationinterfaces.RoleBinding, error) { namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { policyBindingList, err := a.clusterBindingLister.List(kapi.ListOptions{}) if err != nil { return nil, err } ret := make([]authorizationinterfaces.RoleBinding, 0, len(policyBindingList.Items)) for _, policyBinding := range policyBindingList.Items { for _, value := range policyBinding.RoleBindings { ret = append(ret, authorizationinterfaces.NewClusterRoleBindingAdapter(value)) } } return ret, nil } if a.bindingLister == nil { return nil, nil } policyBindingList, err := a.bindingLister.PolicyBindings(namespace).List(kapi.ListOptions{}) if err != nil { return nil, err } ret := make([]authorizationinterfaces.RoleBinding, 0, len(policyBindingList.Items)) for _, policyBinding := range policyBindingList.Items { for _, value := range policyBinding.RoleBindings { ret = append(ret, authorizationinterfaces.NewLocalRoleBindingAdapter(value)) } } return ret, nil }
// this pod may be out of sync with respect to the API server registry: // this pod | apiserver registry // -------------|---------------------- // host=.* | 404 ; pod was deleted // host=.* | 5xx ; failed to sync, try again later? // host="" | host="" ; perhaps no updates to process? // host="" | host="..." ; pod has been scheduled and assigned, is there a task assigned? (check TaskIdKey in binding?) // host="..." | host="" ; pod is no longer scheduled, does it need to be re-queued? // host="..." | host="..." ; perhaps no updates to process? // // TODO(jdef) this needs an integration test func (s *schedulingPlugin) reconcileTask(t *podtask.T) { log.V(1).Infof("reconcile pod %v, assigned to slave %q", t.Pod.Name, t.Spec.AssignedSlave) ctx := api.WithNamespace(api.NewDefaultContext(), t.Pod.Namespace) pod, err := s.client.Pods(api.NamespaceValue(ctx)).Get(t.Pod.Name) if err != nil { if errors.IsNotFound(err) { // attempt to delete if err = s.deleter.deleteOne(&Pod{Pod: &t.Pod}); err != nil && err != noSuchPodErr && err != noSuchTaskErr { log.Errorf("failed to delete pod: %v: %v", t.Pod.Name, err) } } else { //TODO(jdef) other errors should probably trigger a retry (w/ backoff). //For now, drop the pod on the floor log.Warning("aborting reconciliation for pod %v: %v", t.Pod.Name, err) } return } log.Infof("pod %v scheduled on %q according to apiserver", pod.Name, pod.Spec.NodeName) if t.Spec.AssignedSlave != pod.Spec.NodeName { if pod.Spec.NodeName == "" { // pod is unscheduled. // it's possible that we dropped the pod in the scheduler error handler // because of task misalignment with the pod (task.Has(podtask.Launched) == true) podKey, err := podtask.MakePodKey(ctx, pod.Name) if err != nil { log.Error(err) return } s.api.Lock() defer s.api.Unlock() if _, state := s.api.tasks().ForPod(podKey); state != podtask.StateUnknown { //TODO(jdef) reconcile the task log.Errorf("task already registered for pod %v", pod.Name) return } now := time.Now() log.V(3).Infof("reoffering pod %v", podKey) s.qr.reoffer(&Pod{ Pod: pod, deadline: &now, }) } else { // pod is scheduled. // not sure how this happened behind our backs. attempt to reconstruct // at least a partial podtask.T record. //TODO(jdef) reconcile the task log.Errorf("pod already scheduled: %v", pod.Name) } } else { //TODO(jdef) for now, ignore the fact that the rest of the spec may be different //and assume that our knowledge of the pod aligns with that of the apiserver log.Error("pod reconciliation does not support updates; not yet implemented") } }
func (t *Tester) setObjectMeta(obj runtime.Object, name string) { meta := t.getObjectMetaOrFail(obj) meta.Name = name if t.clusterScope { meta.Namespace = api.NamespaceNone } else { meta.Namespace = api.NamespaceValue(t.TestContext()) } meta.GenerateName = "" }
// Create registers a given new ResourceAccessReview instance to r.registry. func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { subjectAccessReview, ok := obj.(*authorizationapi.SubjectAccessReview) if !ok { return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a subjectAccessReview: %#v", obj)) } if err := kutilerrors.NewAggregate(authorizationvalidation.ValidateSubjectAccessReview(subjectAccessReview)); err != nil { return nil, err } // if a namespace is present on the request, then the namespace on the on the SAR is overwritten. // This is to support backwards compatibility. To have gotten here in this state, it means that // the authorizer decided that a user could run an SAR against this namespace if namespace := kapi.NamespaceValue(ctx); len(namespace) > 0 { subjectAccessReview.Action.Namespace = namespace } else if err := r.isAllowed(ctx, subjectAccessReview); err != nil { // this check is mutually exclusive to the condition above. localSAR and localRAR both clear the namespace before delegating their calls // We only need to check if the SAR is allowed **again** if the authorizer didn't already approve the request for a legacy call. return nil, err } var userToCheck user.Info if (len(subjectAccessReview.User) == 0) && (len(subjectAccessReview.Groups) == 0) { // if no user or group was specified, use the info from the context ctxUser, exists := kapi.UserFrom(ctx) if !exists { return nil, kapierrors.NewBadRequest("user missing from context") } userToCheck = ctxUser } else { userToCheck = &user.DefaultInfo{ Name: subjectAccessReview.User, Groups: subjectAccessReview.Groups.List(), } } requestContext := kapi.WithNamespace(kapi.WithUser(ctx, userToCheck), subjectAccessReview.Action.Namespace) attributes := authorizer.ToDefaultAuthorizationAttributes(subjectAccessReview.Action) allowed, reason, err := r.authorizer.Authorize(requestContext, attributes) if err != nil { return nil, err } response := &authorizationapi.SubjectAccessReviewResponse{ Namespace: subjectAccessReview.Action.Namespace, Allowed: allowed, Reason: reason, } return response, nil }
func (a *testAuthorizer) Authorize(ctx kapi.Context, attributes authorizer.AuthorizationAttributes) (allowed bool, reason string, err error) { // allow the initial check for "can I run this RAR at all" if attributes.GetResource() == "localresourceaccessreviews" { if len(a.deniedNamespaces) != 0 && a.deniedNamespaces.Has(kapi.NamespaceValue(ctx)) { return false, "denied initial check", nil } return true, "", nil } return false, "", errors.New("unsupported") }
// Create transforms a LocalRAR into an ClusterRAR that is requesting a namespace. That collapses the code paths. // LocalResourceAccessReview exists to allow clean expression of policy. func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { localRAR, ok := obj.(*authorizationapi.LocalResourceAccessReview) if !ok { return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a localResourceAccessReview: %#v", obj)) } if err := kutilerrors.NewAggregate(authorizationvalidation.ValidateLocalResourceAccessReview(localRAR)); err != nil { return nil, err } if namespace := kapi.NamespaceValue(ctx); len(namespace) == 0 { return nil, kapierrors.NewBadRequest(fmt.Sprintf("namespace is required on this type: %v", namespace)) } else if (len(localRAR.Action.Namespace) > 0) && (namespace != localRAR.Action.Namespace) { return nil, fielderrors.NewFieldInvalid("namespace", localRAR.Action.Namespace, fmt.Sprintf("namespace must be: %v", namespace)) } // transform this into a ResourceAccessReview clusterRAR := &authorizationapi.ResourceAccessReview{ Action: localRAR.Action, } clusterRAR.Action.Namespace = kapi.NamespaceValue(ctx) return r.clusterRARRegistry.CreateResourceAccessReview(kapi.WithNamespace(ctx, ""), clusterRAR) }
func (t *Tester) testGetNotFound(obj runtime.Object) { ctx := t.TestContext() objMeta := t.getObjectMetaOrFail(obj) objMeta.Name = "foo2" objMeta.Namespace = api.NamespaceValue(ctx) _, err := t.storage.(rest.Creater).Create(ctx, obj) if err != nil { t.Errorf("unexpected error: %v", err) } _, err = t.storage.(rest.Getter).Get(ctx, "foo3") if !errors.IsNotFound(err) { t.Errorf("unexpected error returned: %#v", err) } }