// createSchedulerServiceIfNeeded will create the specified service if it // doesn't already exist. func (m *SchedulerServer) createSchedulerServiceIfNeeded(serviceName string, servicePort int) error { ctx := api.NewDefaultContext() if _, err := m.client.Services(api.NamespaceValue(ctx)).Get(serviceName); err == nil { // The service already exists. return nil } svc := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: api.NamespaceDefault, Labels: map[string]string{"provider": "k8sm", "component": "scheduler"}, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}}, // maintained by this code, not by the pod selector Selector: nil, SessionAffinity: api.ServiceAffinityNone, }, } if m.ServiceAddress != nil { svc.Spec.ClusterIP = m.ServiceAddress.String() } _, err := m.client.Services(api.NamespaceValue(ctx)).Create(svc) if err != nil && errors.IsAlreadyExists(err) { err = nil } return err }
// setEndpoints sets the endpoints for the given service. // in a multi-master scenario only the master will be publishing an endpoint. // see SchedulerServer.bootstrap. func (m *SchedulerServer) setEndpoints(serviceName string, ip net.IP, port int) error { // The setting we want to find. want := []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: ip.String()}}, Ports: []api.EndpointPort{{Port: port, Protocol: api.ProtocolTCP}}, }} ctx := api.NewDefaultContext() e, err := m.client.Endpoints(api.NamespaceValue(ctx)).Get(serviceName) createOrUpdate := m.client.Endpoints(api.NamespaceValue(ctx)).Update if err != nil { if errors.IsNotFound(err) { createOrUpdate = m.client.Endpoints(api.NamespaceValue(ctx)).Create } e = &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: api.NamespaceDefault, }, } } if !reflect.DeepEqual(e.Subsets, want) { e.Subsets = want glog.Infof("setting endpoints for master service %q to %#v", serviceName, e) _, err = createOrUpdate(e) return err } // We didn't make any changes, no need to actually call update. return nil }
// reconciler action factory, performs explicit task reconciliation for non-terminal // tasks identified by annotations in the Kubernetes pod registry. func (k *KubernetesScheduler) makePodRegistryReconciler() ReconcilerAction { return ReconcilerAction(func(drv bindings.SchedulerDriver, cancel <-chan struct{}) <-chan error { ctx := api.NewDefaultContext() podList, err := k.client.Pods(api.NamespaceValue(ctx)).List(labels.Everything(), fields.Everything()) if err != nil { return proc.ErrorChanf("failed to reconcile pod registry: %v", err) } taskToSlave := make(map[string]string) for _, pod := range podList.Items { if len(pod.Annotations) == 0 { continue } taskId, found := pod.Annotations[meta.TaskIdKey] if !found { continue } slaveId, found := pod.Annotations[meta.SlaveIdKey] if !found { continue } taskToSlave[taskId] = slaveId } return proc.ErrorChan(k.explicitlyReconcileTasks(drv, taskToSlave, cancel)) }) }
// ensurePolicyBindingToMaster returns a PolicyBinding object that has a PolicyRef pointing to the Policy in the passed namespace. func (m *VirtualStorage) ensurePolicyBindingToMaster(ctx kapi.Context, policyNamespace, policyBindingName string) (*authorizationapi.PolicyBinding, error) { policyBinding, err := m.BindingRegistry.GetPolicyBinding(ctx, policyBindingName) if err != nil { if !kapierrors.IsNotFound(err) { return nil, err } // if we have no policyBinding, go ahead and make one. creating one here collapses code paths below. We only take this hit once policyBinding = policybindingregistry.NewEmptyPolicyBinding(kapi.NamespaceValue(ctx), policyNamespace, policyBindingName) if err := m.BindingRegistry.CreatePolicyBinding(ctx, policyBinding); err != nil { return nil, err } policyBinding, err = m.BindingRegistry.GetPolicyBinding(ctx, policyBindingName) if err != nil { return nil, err } } if policyBinding.RoleBindings == nil { policyBinding.RoleBindings = make(map[string]*authorizationapi.RoleBinding) } return policyBinding, nil }
// ListClusterPolicies obtains list of ListClusterPolicy that match a selector. func (r *ClusterPolicyRegistry) ListClusterPolicies(ctx kapi.Context, label labels.Selector, field fields.Selector) (*authorizationapi.ClusterPolicyList, error) { if r.Err != nil { return nil, r.Err } namespace := kapi.NamespaceValue(ctx) list := make([]authorizationapi.ClusterPolicy, 0) if namespace == kapi.NamespaceAll { for _, curr := range r.ClusterPolicies { for _, policy := range curr { list = append(list, policy) } } } else { if namespacedClusterPolicies, ok := r.ClusterPolicies[namespace]; ok { for _, curr := range namespacedClusterPolicies { list = append(list, curr) } } } return &authorizationapi.ClusterPolicyList{ Items: list, }, nil }
// ListPolicyBindings obtains a list of policyBinding that match a selector. func (r *PolicyBindingRegistry) ListPolicyBindings(ctx kapi.Context, label labels.Selector, field fields.Selector) (*authorizationapi.PolicyBindingList, error) { if r.Err != nil { return nil, r.Err } namespace := kapi.NamespaceValue(ctx) list := make([]authorizationapi.PolicyBinding, 0) if namespace == kapi.NamespaceAll { for _, curr := range r.PolicyBindings { for _, binding := range curr { list = append(list, binding) } } } else { if namespacedBindings, ok := r.PolicyBindings[namespace]; ok { for _, curr := range namespacedBindings { list = append(list, curr) } } } return &authorizationapi.PolicyBindingList{ Items: list, }, nil }
// EnsurePolicy returns the policy object for the specified namespace. If one does not exist, it is created for you. Permission to // create, update, or delete roles in a namespace implies the ability to create a Policy object itself. func (m *VirtualStorage) EnsurePolicy(ctx kapi.Context) (*authorizationapi.Policy, error) { policy, err := m.PolicyStorage.GetPolicy(ctx, authorizationapi.PolicyName) if err != nil { if !kapierrors.IsNotFound(err) { return nil, err } // if we have no policy, go ahead and make one. creating one here collapses code paths below. We only take this hit once policy = NewEmptyPolicy(kapi.NamespaceValue(ctx)) if err := m.PolicyStorage.CreatePolicy(ctx, policy); err != nil { return nil, err } policy, err = m.PolicyStorage.GetPolicy(ctx, authorizationapi.PolicyName) if err != nil { return nil, err } } if policy.Roles == nil { policy.Roles = make(map[string]*authorizationapi.Role) } return policy, nil }
// Bind just does a POST binding RPC. func (b *binder) Bind(binding *api.Binding) error { glog.V(2).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name) ctx := api.WithNamespace(api.NewContext(), binding.Namespace) return b.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error() // TODO: use Pods interface for binding once clusters are upgraded // return b.Pods(binding.Namespace).Bind(binding) }
// TestValidNamespace validates that namespace rules are enforced on a resource prior to create or update func TestValidNamespace(t *testing.T) { ctx := api.NewDefaultContext() namespace, _ := api.NamespaceFrom(ctx) resource := api.ReplicationController{} if !api.ValidNamespace(ctx, &resource.ObjectMeta) { t.Errorf("expected success") } if namespace != resource.Namespace { t.Errorf("expected resource to have the default namespace assigned during validation") } resource = api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: "other"}} if api.ValidNamespace(ctx, &resource.ObjectMeta) { t.Errorf("Expected error that resource and context errors do not match because resource has different namespace") } ctx = api.NewContext() if api.ValidNamespace(ctx, &resource.ObjectMeta) { t.Errorf("Expected error that resource and context errors do not match since context has no namespace") } ctx = api.NewContext() ns := api.NamespaceValue(ctx) if ns != "" { t.Errorf("Expected the empty string") } }
func (a *DefaultRuleResolver) GetRoleBindings(ctx kapi.Context) ([]authorizationinterfaces.RoleBinding, error) { namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { policyBindingList, err := a.clusterBindingLister.ListClusterPolicyBindings(ctx, labels.Everything(), fields.Everything()) if err != nil { return nil, err } ret := make([]authorizationinterfaces.RoleBinding, 0, len(policyBindingList.Items)) for _, policyBinding := range policyBindingList.Items { for _, value := range policyBinding.RoleBindings { ret = append(ret, authorizationinterfaces.NewClusterRoleBindingAdapter(value)) } } return ret, nil } policyBindingList, err := a.bindingLister.ListPolicyBindings(ctx, labels.Everything(), fields.Everything()) if err != nil { return nil, err } ret := make([]authorizationinterfaces.RoleBinding, 0, len(policyBindingList.Items)) for _, policyBinding := range policyBindingList.Items { for _, value := range policyBinding.RoleBindings { ret = append(ret, authorizationinterfaces.NewLocalRoleBindingAdapter(value)) } } return ret, nil }
func (ks *KubernetesScheduler) recoverTasks() error { ctx := api.NewDefaultContext() podList, err := ks.client.Pods(api.NamespaceValue(ctx)).List(labels.Everything(), fields.Everything()) if err != nil { log.V(1).Infof("failed to recover pod registry, madness may ensue: %v", err) return err } recoverSlave := func(t *podtask.T) { slaveId := t.Spec.SlaveID ks.slaves.checkAndAdd(slaveId, t.Offer.Host()) } for _, pod := range podList.Items { if t, ok, err := podtask.RecoverFrom(pod); err != nil { log.Errorf("failed to recover task from pod, will attempt to delete '%v/%v': %v", pod.Namespace, pod.Name, err) err := ks.client.Pods(pod.Namespace).Delete(pod.Name, nil) //TODO(jdef) check for temporary or not-found errors if err != nil { log.Errorf("failed to delete pod '%v/%v': %v", pod.Namespace, pod.Name, err) } } else if ok { ks.taskRegistry.Register(t, nil) recoverSlave(t) log.Infof("recovered task %v from pod %v/%v", t.ID, pod.Namespace, pod.Name) } } return nil }
// Create registers a given new ResourceAccessReview instance to r.registry. func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { resourceAccessReview, ok := obj.(*authorizationapi.ResourceAccessReview) if !ok { return nil, errors.NewBadRequest(fmt.Sprintf("not a resourceAccessReview: %#v", obj)) } if err := kutilerrors.NewAggregate(authorizationvalidation.ValidateResourceAccessReview(resourceAccessReview)); err != nil { return nil, err } namespace := kapi.NamespaceValue(ctx) attributes := &authorizer.DefaultAuthorizationAttributes{ Verb: resourceAccessReview.Verb, Resource: resourceAccessReview.Resource, } users, groups, err := r.authorizer.GetAllowedSubjects(ctx, attributes) if err != nil { return nil, err } response := &authorizationapi.ResourceAccessReviewResponse{ Namespace: namespace, Users: users, Groups: groups, } return response, nil }
// this pod may be out of sync with respect to the API server registry: // this pod | apiserver registry // -------------|---------------------- // host=.* | 404 ; pod was deleted // host=.* | 5xx ; failed to sync, try again later? // host="" | host="" ; perhaps no updates to process? // host="" | host="..." ; pod has been scheduled and assigned, is there a task assigned? (check TaskIdKey in binding?) // host="..." | host="" ; pod is no longer scheduled, does it need to be re-queued? // host="..." | host="..." ; perhaps no updates to process? // // TODO(jdef) this needs an integration test func (s *schedulingPlugin) reconcilePod(oldPod api.Pod) { log.V(1).Infof("reconcile pod %v", oldPod.Name) ctx := api.WithNamespace(api.NewDefaultContext(), oldPod.Namespace) pod, err := s.client.Pods(api.NamespaceValue(ctx)).Get(oldPod.Name) if err != nil { if errors.IsNotFound(err) { // attempt to delete if err = s.deleter.deleteOne(&Pod{Pod: &oldPod}); err != nil && err != noSuchPodErr && err != noSuchTaskErr { log.Errorf("failed to delete pod: %v: %v", oldPod.Name, err) } } else { //TODO(jdef) other errors should probably trigger a retry (w/ backoff). //For now, drop the pod on the floor log.Warning("aborting reconciliation for pod %v: %v", oldPod.Name, err) } return } if oldPod.Spec.NodeName != pod.Spec.NodeName { if pod.Spec.NodeName == "" { // pod is unscheduled. // it's possible that we dropped the pod in the scheduler error handler // because of task misalignment with the pod (task.Has(podtask.Launched) == true) podKey, err := podtask.MakePodKey(ctx, pod.Name) if err != nil { log.Error(err) return } s.api.Lock() defer s.api.Unlock() if _, state := s.api.tasks().ForPod(podKey); state != podtask.StateUnknown { //TODO(jdef) reconcile the task log.Errorf("task already registered for pod %v", pod.Name) return } now := time.Now() log.V(3).Infof("reoffering pod %v", podKey) s.qr.reoffer(&Pod{ Pod: pod, deadline: &now, }) } else { // pod is scheduled. // not sure how this happened behind our backs. attempt to reconstruct // at least a partial podtask.T record. //TODO(jdef) reconcile the task log.Errorf("pod already scheduled: %v", pod.Name) } } else { //TODO(jdef) for now, ignore the fact that the rest of the spec may be different //and assume that our knowledge of the pod aligns with that of the apiserver log.Error("pod reconciliation does not support updates; not yet implemented") } }
// authorizeWithNamespaceRules returns isAllowed, reason, and error. If an error is returned, isAllowed and reason are still valid. This seems strange // but errors are not always fatal to the authorization process. It is entirely possible to get an error and be able to continue determine authorization // status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to authorize the request. func (a *openshiftAuthorizer) authorizeWithNamespaceRules(ctx kapi.Context, passedAttributes AuthorizationAttributes) (bool, string, error) { attributes := coerceToDefaultAuthorizationAttributes(passedAttributes) allRules, ruleRetrievalError := a.ruleResolver.GetEffectivePolicyRules(ctx) for _, rule := range allRules { matches, err := attributes.RuleMatches(rule) if err != nil { return false, "", err } if matches { if len(kapi.NamespaceValue(ctx)) == 0 { return true, fmt.Sprintf("allowed by cluster rule: %#v", rule), nil } return true, fmt.Sprintf("allowed by rule in %v: %#v", kapi.NamespaceValue(ctx), rule), nil } } return false, "", ruleRetrievalError }
// Implement ResourceWatcher. func (storage *SimpleRESTStorage) Watch(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { storage.checkContext(ctx) storage.requestedLabelSelector = label storage.requestedFieldSelector = field storage.requestedResourceVersion = resourceVersion storage.requestedResourceNamespace = api.NamespaceValue(ctx) if err := storage.errors["watch"]; err != nil { return nil, err } storage.fakeWatch = watch.NewFake() return storage.fakeWatch, nil }
// Implement Redirector. func (storage *SimpleRESTStorage) ResourceLocation(ctx api.Context, id string) (string, error) { storage.checkContext(ctx) // validate that the namespace context on the request matches the expected input storage.requestedResourceNamespace = api.NamespaceValue(ctx) if storage.expectedResourceNamespace != storage.requestedResourceNamespace { return "", fmt.Errorf("Expected request namespace %s, but got namespace %s", storage.expectedResourceNamespace, storage.requestedResourceNamespace) } storage.requestedResourceLocationID = id if err := storage.errors["resourceLocation"]; err != nil { return "", err } return storage.resourceLocation, nil }
// Get retrieves an image by ID that has previously been tagged into an image stream. // `id` is of the form <repo name>@<image id>. func (r *REST) Get(ctx kapi.Context, id string) (runtime.Object, error) { name, imageID, err := ParseNameAndID(id) if err != nil { return nil, err } repo, err := r.imageStreamRegistry.GetImageStream(ctx, name) if err != nil { return nil, err } if repo.Status.Tags == nil { return nil, errors.NewNotFound("imageStreamImage", imageID) } set := api.ResolveImageID(repo, imageID) switch len(set) { case 1: imageName := set.List()[0] image, err := r.imageRegistry.GetImage(ctx, imageName) if err != nil { return nil, err } imageWithMetadata, err := api.ImageWithMetadata(*image) if err != nil { return nil, err } if d, err := digest.ParseDigest(imageName); err == nil { imageName = d.Hex() } if len(imageName) > 7 { imageName = imageName[:7] } isi := api.ImageStreamImage{ ObjectMeta: kapi.ObjectMeta{ Namespace: kapi.NamespaceValue(ctx), Name: fmt.Sprintf("%s@%s", name, imageName), }, Image: *imageWithMetadata, } return &isi, nil case 0: return nil, errors.NewNotFound("imageStreamImage", imageID) default: return nil, errors.NewConflict("imageStreamImage", imageID, fmt.Errorf("multiple images match the prefix %q: %s", imageID, strings.Join(set.List(), ", "))) } }
// Implement Redirector. func (storage *SimpleRESTStorage) ResourceLocation(ctx api.Context, id string) (*url.URL, http.RoundTripper, error) { storage.checkContext(ctx) // validate that the namespace context on the request matches the expected input storage.requestedResourceNamespace = api.NamespaceValue(ctx) if storage.expectedResourceNamespace != storage.requestedResourceNamespace { return nil, nil, fmt.Errorf("Expected request namespace %s, but got namespace %s", storage.expectedResourceNamespace, storage.requestedResourceNamespace) } storage.requestedResourceLocationID = id if err := storage.errors["resourceLocation"]; err != nil { return nil, nil, err } // Make a copy so the internal URL never gets mutated locationCopy := *storage.resourceLocation return &locationCopy, nil, nil }
// Get retrieves an image that has been tagged by stream and tag. `id` is of the format // <stream name>:<tag>. func (r *REST) Get(ctx kapi.Context, id string) (runtime.Object, error) { name, tag, err := nameAndTag(id) if err != nil { return nil, err } stream, err := r.imageStreamRegistry.GetImageStream(ctx, name) if err != nil { return nil, err } event := api.LatestTaggedImage(stream, tag) if event == nil || len(event.Image) == 0 { return nil, errors.NewNotFound("imageStreamTag", id) } image, err := r.imageRegistry.GetImage(ctx, event.Image) if err != nil { return nil, err } // if the stream has Spec.Tags[tag].Annotations[k] = v, copy it to the image's annotations if stream.Spec.Tags != nil { if tagRef, ok := stream.Spec.Tags[tag]; ok { if image.Annotations == nil { image.Annotations = make(map[string]string) } for k, v := range tagRef.Annotations { image.Annotations[k] = v } } } imageWithMetadata, err := api.ImageWithMetadata(*image) if err != nil { return nil, err } ist := api.ImageStreamTag{ ObjectMeta: kapi.ObjectMeta{ Namespace: kapi.NamespaceValue(ctx), Name: id, }, Image: *imageWithMetadata, } return &ist, nil }
// Create registers a given new ResourceAccessReview instance to r.registry. func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { subjectAccessReview, ok := obj.(*authorizationapi.SubjectAccessReview) if !ok { return nil, kerrors.NewBadRequest(fmt.Sprintf("not a subjectAccessReview: %#v", obj)) } if err := kutilerrors.NewAggregate(authorizationvalidation.ValidateSubjectAccessReview(subjectAccessReview)); err != nil { return nil, err } var userToCheck user.Info if (len(subjectAccessReview.User) == 0) && (len(subjectAccessReview.Groups) == 0) { // if no user or group was specified, use the info from the context ctxUser, exists := kapi.UserFrom(ctx) if !exists { return nil, kerrors.NewBadRequest("user missing from context") } userToCheck = ctxUser } else { userToCheck = &user.DefaultInfo{ Name: subjectAccessReview.User, Groups: subjectAccessReview.Groups.List(), } } namespace := kapi.NamespaceValue(ctx) requestContext := kapi.WithUser(ctx, userToCheck) attributes := &authorizer.DefaultAuthorizationAttributes{ Verb: subjectAccessReview.Verb, Resource: subjectAccessReview.Resource, } allowed, reason, err := r.authorizer.Authorize(requestContext, attributes) if err != nil { return nil, err } response := &authorizationapi.SubjectAccessReviewResponse{ Namespace: namespace, Allowed: allowed, Reason: reason, } return response, nil }
// CreateClusterPolicyBinding creates a new policyBinding. func (r *ClusterPolicyBindingRegistry) CreateClusterPolicyBinding(ctx kapi.Context, policyBinding *authorizationapi.ClusterPolicyBinding) error { if r.Err != nil { return r.Err } namespace := kapi.NamespaceValue(ctx) if len(namespace) != 0 { return errors.New("invalid request. Namespace parameter disallowed.") } if existing, _ := r.GetClusterPolicyBinding(ctx, policyBinding.Name); existing != nil { return kapierrors.NewAlreadyExists("ClusterPolicyBinding", policyBinding.Name) } addClusterPolicyBinding(r.ClusterPolicyBindings, *policyBinding) return nil }
// UpdateClusterPolicy updates a policy. func (r *ClusterPolicyRegistry) UpdateClusterPolicy(ctx kapi.Context, policy *authorizationapi.ClusterPolicy) error { if r.Err != nil { return r.Err } namespace := kapi.NamespaceValue(ctx) if len(namespace) != 0 { return errors.New("invalid request. Namespace parameter disallowed.") } if existing, _ := r.GetClusterPolicy(ctx, policy.Name); existing == nil { return kapierrors.NewNotFound("ClusterPolicy", policy.Name) } addClusterPolicy(r.ClusterPolicies, *policy) return nil }
// CreatePolicyBinding creates a new policyBinding. func (r *PolicyBindingRegistry) CreatePolicyBinding(ctx kapi.Context, policyBinding *authorizationapi.PolicyBinding) error { if r.Err != nil { return r.Err } namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { return errors.New("invalid request. Namespace parameter required.") } if existing, _ := r.GetPolicyBinding(ctx, policyBinding.Name); existing != nil { return fmt.Errorf("PolicyBinding %v::%v already exists", namespace, policyBinding.Name) } addPolicyBinding(r.PolicyBindings, *policyBinding) return nil }
// DeleteClusterPolicy deletes a policy. func (r *ClusterPolicyRegistry) DeleteClusterPolicy(ctx kapi.Context, id string) error { if r.Err != nil { return r.Err } namespace := kapi.NamespaceValue(ctx) if len(namespace) != 0 { return errors.New("invalid request. Namespace parameter disallowed.") } namespacedClusterPolicies, ok := r.ClusterPolicies[namespace] if ok { delete(namespacedClusterPolicies, id) } return nil }
// DeletePolicyBinding deletes a policyBinding. func (r *PolicyBindingRegistry) DeletePolicyBinding(ctx kapi.Context, id string) error { if r.Err != nil { return r.Err } namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { return errors.New("invalid request. Namespace parameter required.") } namespacedBindings, ok := r.PolicyBindings[namespace] if ok { delete(namespacedBindings, id) } return nil }
// UpdatePolicyBinding updates a policyBinding. func (r *PolicyBindingRegistry) UpdatePolicyBinding(ctx kapi.Context, policyBinding *authorizationapi.PolicyBinding) error { if r.Err != nil { return r.Err } namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { return errors.New("invalid request. Namespace parameter required.") } if existing, _ := r.GetPolicyBinding(ctx, policyBinding.Name); existing == nil { return kapierrors.NewNotFound("PolicyBinding", policyBinding.Name) } addPolicyBinding(r.PolicyBindings, *policyBinding) return nil }
// UpdatePolicy updates a policy. func (r *PolicyRegistry) UpdatePolicy(ctx kapi.Context, policy *authorizationapi.Policy) error { if r.Err != nil { return r.Err } namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { return errors.New("invalid request. Namespace parameter required.") } if existing, _ := r.GetPolicy(ctx, policy.Name); existing == nil { return fmt.Errorf("Policy %v::%v not found", namespace, policy.Name) } addPolicy(r.Policies, *policy) return nil }
func TestRESTCreate(t *testing.T) { table := []struct { ctx api.Context secret *api.Secret valid bool }{ { ctx: api.NewDefaultContext(), secret: testSecret("foo"), valid: true, }, { ctx: api.NewContext(), secret: testSecret("bar"), valid: false, }, { ctx: api.WithNamespace(api.NewContext(), "nondefault"), secret: testSecret("bazzzz"), valid: false, }, } for _, item := range table { _, storage := NewTestREST() c, err := storage.Create(item.ctx, item.secret) if !item.valid { if err == nil { ctxNS := api.NamespaceValue(item.ctx) t.Errorf("%v: Unexpected non-error: (%v, %v)", item.secret.Name, ctxNS, item.secret.Namespace) } continue } if err != nil { t.Errorf("%v: Unexpected error: %v", item.secret.Name, err) continue } if !api.HasObjectMetaSystemFieldValues(&item.secret.ObjectMeta) { t.Errorf("storage did not populate object meta field values") } if e, a := item.secret, c; !reflect.DeepEqual(e, a) { t.Errorf("diff: %s", util.ObjectDiff(e, a)) } // Ensure we implement the interface _ = rest.Watcher(storage) } }
// GetPolicyBinding retrieves a specific policyBinding. func (r *PolicyBindingRegistry) GetPolicyBinding(ctx kapi.Context, id string) (*authorizationapi.PolicyBinding, error) { if r.Err != nil { return nil, r.Err } namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { return nil, errors.New("invalid request. Namespace parameter required.") } if namespacedBindings, ok := r.PolicyBindings[namespace]; ok { if binding, ok := namespacedBindings[id]; ok { return &binding, nil } } return nil, kapierrors.NewNotFound("PolicyBinding", id) }
// GetPolicy retrieves a specific policy. func (r *PolicyRegistry) GetPolicy(ctx kapi.Context, id string) (*authorizationapi.Policy, error) { if r.Err != nil { return nil, r.Err } namespace := kapi.NamespaceValue(ctx) if len(namespace) == 0 { return nil, errors.New("invalid request. Namespace parameter required.") } if namespacedPolicies, ok := r.Policies[namespace]; ok { if policy, ok := namespacedPolicies[id]; ok { return &policy, nil } } return nil, fmt.Errorf("Policy %v::%v not found", namespace, id) }