// ServerGroups returns the supported groups, with information like supported versions and the // preferred version. func (d *DiscoveryClient) ServerGroups() (apiGroupList *unversioned.APIGroupList, err error) { // Get the groupVersions exposed at /api v := &unversioned.APIVersions{} err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v) apiGroup := unversioned.APIGroup{} if err == nil { apiGroup = apiVersionsToAPIGroup(v) } if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { return nil, err } // Get the groupVersions exposed at /apis apiGroupList = &unversioned.APIGroupList{} err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList) if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { return nil, err } // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { apiGroupList = &unversioned.APIGroupList{} } // append the group retrieved from /api to the list apiGroupList.Groups = append(apiGroupList.Groups, apiGroup) return apiGroupList, nil }
func doServiceAccountAPIRequests(t *testing.T, c *clientset.Clientset, ns string, authenticated bool, canRead bool, canWrite bool) { testSecret := &api.Secret{ ObjectMeta: api.ObjectMeta{Name: "testSecret"}, Data: map[string][]byte{"test": []byte("data")}, } readOps := []testOperation{ func() error { _, err := c.Core().Secrets(ns).List(api.ListOptions{}) return err }, func() error { _, err := c.Core().Pods(ns).List(api.ListOptions{}) return err }, } writeOps := []testOperation{ func() error { _, err := c.Core().Secrets(ns).Create(testSecret); return err }, func() error { return c.Core().Secrets(ns).Delete(testSecret.Name, nil) }, } for _, op := range readOps { err := op() unauthorizedError := errors.IsUnauthorized(err) forbiddenError := errors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: t.Fatalf("expected unauthorized error, got %v", err) case authenticated && unauthorizedError: t.Fatalf("unexpected unauthorized error: %v", err) case authenticated && canRead && forbiddenError: t.Fatalf("unexpected forbidden error: %v", err) case authenticated && !canRead && !forbiddenError: t.Fatalf("expected forbidden error, got: %v", err) } } for _, op := range writeOps { err := op() unauthorizedError := errors.IsUnauthorized(err) forbiddenError := errors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: t.Fatalf("expected unauthorized error, got %v", err) case authenticated && unauthorizedError: t.Fatalf("unexpected unauthorized error: %v", err) case authenticated && canWrite && forbiddenError: t.Fatalf("unexpected forbidden error: %v", err) case authenticated && !canWrite && !forbiddenError: t.Fatalf("expected forbidden error, got: %v", err) } } }
func TestAuthorizationRestrictedAccessForProjectAdmins(t *testing.T) { testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) _, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI() if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } haroldClient, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, "hammer-project", "harold") if err != nil { t.Fatalf("unexpected error: %v", err) } markClient, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, "mallet-project", "mark") if err != nil { t.Fatalf("unexpected error: %v", err) } _, err = haroldClient.DeploymentConfigs("hammer-project").List(kapi.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } _, err = markClient.DeploymentConfigs("hammer-project").List(kapi.ListOptions{}) if (err == nil) || !kapierror.IsForbidden(err) { t.Fatalf("unexpected error: %v", err) } // projects are a special case where a get of a project actually sets a namespace. Make sure that // the namespace is properly special cased and set for authorization rules _, err = haroldClient.Projects().Get("hammer-project") if err != nil { t.Fatalf("unexpected error: %v", err) } _, err = markClient.Projects().Get("hammer-project") if (err == nil) || !kapierror.IsForbidden(err) { t.Fatalf("unexpected error: %v", err) } // wait for the project authorization cache to catch the change. It is on a one second period waitForProject(t, haroldClient, "hammer-project", 1*time.Second, 10) waitForProject(t, markClient, "mallet-project", 1*time.Second, 10) }
func (c *CacheAuthorizer) Authorize(ctx kapi.Context, a authorizer.Action) (allowed bool, reason string, err error) { key, err := cacheKey(ctx, a) if err != nil { glog.V(5).Infof("could not build cache key for %#v: %v", a, err) return c.authorizer.Authorize(ctx, a) } if value, hit := c.authorizeCache.Get(key); hit { switch record := value.(type) { case *authorizeCacheRecord: if record.created.Add(c.ttl).After(c.now()) { return record.allowed, record.reason, record.err } else { glog.V(5).Infof("cache record expired for %s", key) c.authorizeCache.Remove(key) } default: utilruntime.HandleError(fmt.Errorf("invalid cache record type for key %s: %#v", key, record)) } } allowed, reason, err = c.authorizer.Authorize(ctx, a) // Don't cache results if there was an error unrelated to authorization // TODO: figure out a better way to determine this if err == nil || kerrs.IsForbidden(err) { c.authorizeCache.Add(key, &authorizeCacheRecord{created: c.now(), allowed: allowed, reason: reason, err: err}) } return allowed, reason, err }
func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, sets.String, error) { g := osgraph.New() loaders := []GraphLoader{ &serviceLoader{namespace: namespace, lister: d.K}, &serviceAccountLoader{namespace: namespace, lister: d.K}, &secretLoader{namespace: namespace, lister: d.K}, &rcLoader{namespace: namespace, lister: d.K}, &podLoader{namespace: namespace, lister: d.K}, // TODO check swagger for feature enablement and selectively add bcLoader and buildLoader // then remove errors.TolerateNotFoundError method. &bcLoader{namespace: namespace, lister: d.C}, &buildLoader{namespace: namespace, lister: d.C}, &isLoader{namespace: namespace, lister: d.C}, &dcLoader{namespace: namespace, lister: d.C}, &routeLoader{namespace: namespace, lister: d.C}, } loadingFuncs := []func() error{} for _, loader := range loaders { loadingFuncs = append(loadingFuncs, loader.Load) } forbiddenResources := sets.String{} if errs := parallel.Run(loadingFuncs...); len(errs) > 0 { actualErrors := []error{} for _, err := range errs { if kapierrors.IsForbidden(err) { forbiddenErr := err.(*kapierrors.StatusError) if (forbiddenErr.Status().Details != nil) && (len(forbiddenErr.Status().Details.Kind) > 0) { forbiddenResources.Insert(forbiddenErr.Status().Details.Kind) } continue } actualErrors = append(actualErrors, err) } if len(actualErrors) > 0 { return g, forbiddenResources, utilerrors.NewAggregate(actualErrors) } } for _, loader := range loaders { loader.AddToGraph(g) } kubeedges.AddAllExposedPodTemplateSpecEdges(g) kubeedges.AddAllExposedPodEdges(g) kubeedges.AddAllManagedByRCPodEdges(g) kubeedges.AddAllRequestedServiceAccountEdges(g) kubeedges.AddAllMountableSecretEdges(g) kubeedges.AddAllMountedSecretEdges(g) buildedges.AddAllInputOutputEdges(g) buildedges.AddAllBuildEdges(g) deployedges.AddAllTriggerEdges(g) deployedges.AddAllDeploymentEdges(g) imageedges.AddAllImageStreamRefEdges(g) routeedges.AddAllRouteEdges(g) return g, forbiddenResources, nil }
// SetupProject creates a new project and assign a random user to the project. // All resources will be then created within this project and Kubernetes E2E // suite will destroy the project after test case finish. func (c *CLI) SetupProject(name string, kubeClient *kclient.Client, _ map[string]string) (*kapi.Namespace, error) { newNamespace := kapi.SimpleNameGenerator.GenerateName(fmt.Sprintf("extended-test-%s-", name)) c.SetNamespace(newNamespace).ChangeUser(fmt.Sprintf("%s-user", c.Namespace())) e2e.Logf("The user is now %q", c.Username()) e2e.Logf("Creating project %q", c.Namespace()) _, err := c.REST().ProjectRequests().Create(&projectapi.ProjectRequest{ ObjectMeta: kapi.ObjectMeta{Name: c.Namespace()}, }) if err != nil { e2e.Logf("Failed to create a project and namespace %q: %v", c.Namespace(), err) return nil, err } if err := wait.ExponentialBackoff(kclient.DefaultBackoff, func() (bool, error) { if _, err := c.KubeREST().Pods(c.Namespace()).List(kapi.ListOptions{}); err != nil { if apierrs.IsForbidden(err) { e2e.Logf("Waiting for user to have access to the namespace") return false, nil } } return true, nil }); err != nil { return nil, err } return &kapi.Namespace{ObjectMeta: kapi.ObjectMeta{Name: c.Namespace()}}, err }
func verifyImageStreamAccess(ctx context.Context, namespace, imageRepo, verb string, client client.LocalSubjectAccessReviewsNamespacer) error { sar := authorizationapi.LocalSubjectAccessReview{ Action: authorizationapi.Action{ Verb: verb, Group: imageapi.GroupName, Resource: "imagestreams/layers", ResourceName: imageRepo, }, } response, err := client.LocalSubjectAccessReviews(namespace).Create(&sar) if err != nil { context.GetLogger(ctx).Errorf("OpenShift client error: %s", err) if kerrors.IsUnauthorized(err) || kerrors.IsForbidden(err) { return ErrOpenShiftAccessDenied } return err } if !response.Allowed { context.GetLogger(ctx).Errorf("OpenShift access denied: %s", response.Reason) return ErrOpenShiftAccessDenied } return nil }
func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReviewResponse, err error) { result = &authorizationapi.SubjectAccessReviewResponse{} // if this a cluster SAR, then no special handling if len(sar.Action.Namespace) == 0 { err = overrideAuth(c.token, c.r.Post().Resource("subjectAccessReviews")).Body(sar).Do().Into(result) return } err = c.r.Post().Resource("subjectAccessReviews").Body(sar).Do().Into(result) // if the namespace values don't match then we definitely hit an old server. If we got a forbidden, then we might have hit an old server // and should try the old endpoint if (sar.Action.Namespace != result.Namespace) || kapierrors.IsForbidden(err) { deprecatedResponse := &authorizationapi.SubjectAccessReviewResponse{} deprecatedAttemptErr := overrideAuth(c.token, c.r.Post().Namespace(sar.Action.Namespace).Resource("subjectAccessReviews")).Body(sar).Do().Into(deprecatedResponse) // if we definitely hit an old server, then return the error and result you get from the older server. if sar.Action.Namespace != result.Namespace { return deprecatedResponse, deprecatedAttemptErr } // if we're not certain it was an old server, success overwrites the previous error, but failure doesn't overwrite the previous error if deprecatedAttemptErr == nil { err = nil result = deprecatedResponse } } return }
func verifyImageStreamAccess(namespace, imageRepo, verb string, client *client.Client) error { sar := authorizationapi.LocalSubjectAccessReview{ Action: authorizationapi.AuthorizationAttributes{ Verb: verb, Resource: "imagestreams/layers", ResourceName: imageRepo, }, } response, err := client.LocalSubjectAccessReviews(namespace).Create(&sar) if err != nil { log.Errorf("OpenShift client error: %s", err) if kerrors.IsUnauthorized(err) || kerrors.IsForbidden(err) { return ErrOpenShiftAccessDenied } return err } if !response.Allowed { log.Errorf("OpenShift access denied: %s", response.Reason) return ErrOpenShiftAccessDenied } return nil }
// ensureComponentAuthorizationRules initializes the cluster policies func (c *MasterConfig) ensureComponentAuthorizationRules() { clusterPolicyRegistry := clusterpolicyregistry.NewRegistry(clusterpolicystorage.NewStorage(c.EtcdHelper)) ctx := kapi.WithNamespace(kapi.NewContext(), "") if _, err := clusterPolicyRegistry.GetClusterPolicy(ctx, authorizationapi.PolicyName); kapierror.IsNotFound(err) { glog.Infof("No cluster policy found. Creating bootstrap policy based on: %v", c.Options.PolicyConfig.BootstrapPolicyFile) if err := admin.OverwriteBootstrapPolicy(c.EtcdHelper, c.Options.PolicyConfig.BootstrapPolicyFile, admin.CreateBootstrapPolicyFileFullCommand, true, ioutil.Discard); err != nil { glog.Errorf("Error creating bootstrap policy: %v", err) } } else { glog.V(2).Infof("Ignoring bootstrap policy file because cluster policy found") } // Wait until the policy cache has caught up before continuing review := &authorizationapi.SubjectAccessReview{Action: authorizationapi.AuthorizationAttributes{Verb: "get", Resource: "clusterpolicies"}} err := wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) { result, err := c.PolicyClient().SubjectAccessReviews().Create(review) if err == nil && result.Allowed { return true, nil } if kapierror.IsForbidden(err) || (err == nil && !result.Allowed) { glog.V(2).Infof("waiting for policy cache to initialize") return false, nil } return false, err }) if err != nil { glog.Errorf("error waiting for policy cache to initialize: %v", err) } }
func whoAmI(clientConfig *restclient.Config) (*api.User, error) { client, err := client.New(clientConfig) me, err := client.Users().Get("~") // if we're talking to kube (or likely talking to kube), if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) { switch { case len(clientConfig.BearerToken) > 0: // the user has already been willing to provide the token on the CLI, so they probably // don't mind using it again if they switch to and from this user return &api.User{ObjectMeta: kapi.ObjectMeta{Name: clientConfig.BearerToken}}, nil case len(clientConfig.Username) > 0: return &api.User{ObjectMeta: kapi.ObjectMeta{Name: clientConfig.Username}}, nil } } if err != nil { return nil, err } return me, nil }
func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (*authorizationapi.SubjectAccessReviewResponse, error) { result := &authorizationapi.SubjectAccessReviewResponse{} req, err := overrideAuth(c.token, c.r.Post().Namespace(c.ns).Resource("localSubjectAccessReviews")) if err != nil { return &authorizationapi.SubjectAccessReviewResponse{}, err } err = req.Body(sar).Do().Into(result) // if we get one of these failures, we may be talking to an older openshift. In that case, we need to try hitting ns/namespace-name/subjectaccessreview if kapierrors.IsForbidden(err) || kapierrors.IsNotFound(err) { deprecatedSAR := &authorizationapi.SubjectAccessReview{ Action: sar.Action, User: sar.User, Groups: sar.Groups, } deprecatedResponse := &authorizationapi.SubjectAccessReviewResponse{} req, err := overrideAuth(c.token, c.r.Post().Namespace(c.ns).Resource("subjectAccessReviews")) if err != nil { return &authorizationapi.SubjectAccessReviewResponse{}, err } deprecatedAttemptErr := req.Body(deprecatedSAR).Do().Into(deprecatedResponse) if deprecatedAttemptErr == nil { err = nil result = deprecatedResponse } } return result, err }
// ServerResourcesForGroupVersion returns the supported resources for a group and version. func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *unversioned.APIResourceList, err error) { // we don't expose this version if groupVersion == "v1beta3" { return &unversioned.APIResourceList{}, nil } parentList, err := d.DiscoveryClient.ServerResourcesForGroupVersion(groupVersion) if err != nil { return parentList, err } if groupVersion != "v1" { return parentList, nil } // we request v1, we must combine the parent list with the list from /oapi url := url.URL{} url.Path = "/oapi/" + groupVersion originResources := &unversioned.APIResourceList{} err = d.Get().AbsPath(url.String()).Do().Into(originResources) if err != nil { // ignore 403 or 404 error to be compatible with an v1.0 server. if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { return parentList, nil } return nil, err } parentList.APIResources = append(parentList.APIResources, originResources.APIResources...) return parentList, nil }
func (o RolloutLatestOptions) RunRolloutLatest() error { info := o.infos[0] config, ok := info.Object.(*deployapi.DeploymentConfig) if !ok { return fmt.Errorf("%s is not a deployment config", info.Name) } // TODO: Consider allowing one-off deployments for paused configs // See https://github.com/openshift/origin/issues/9903 if config.Spec.Paused { return fmt.Errorf("cannot deploy a paused deployment config") } deploymentName := deployutil.LatestDeploymentNameForConfig(config) deployment, err := o.kc.ReplicationControllers(config.Namespace).Get(deploymentName) switch { case err == nil: // Reject attempts to start a concurrent deployment. if !deployutil.IsTerminatedDeployment(deployment) { status := deployutil.DeploymentStatusFor(deployment) return fmt.Errorf("#%d is already in progress (%s).", config.Status.LatestVersion, status) } case !kerrors.IsNotFound(err): return err } dc := config if !o.DryRun { request := &deployapi.DeploymentRequest{ Name: config.Name, Latest: !o.again, Force: true, } dc, err = o.oc.DeploymentConfigs(config.Namespace).Instantiate(request) // Pre 1.4 servers don't support the instantiate endpoint. Fallback to incrementing // latestVersion on them. if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) { config.Status.LatestVersion++ dc, err = o.oc.DeploymentConfigs(config.Namespace).Update(config) } if err != nil { return err } info.Refresh(dc, true) } if o.output == "revision" { fmt.Fprintf(o.out, fmt.Sprintf("%d", dc.Status.LatestVersion)) return nil } kcmdutil.PrintSuccess(o.mapper, o.output == "name", o.out, info.Mapping.Resource, info.Name, o.DryRun, "rolled out") return nil }
// ensureComponentAuthorizationRules initializes the cluster policies func (c *MasterConfig) ensureComponentAuthorizationRules() { clusterPolicyRegistry := clusterpolicyregistry.NewRegistry(clusterpolicystorage.NewStorage(c.EtcdHelper)) ctx := kapi.WithNamespace(kapi.NewContext(), "") if _, err := clusterPolicyRegistry.GetClusterPolicy(ctx, authorizationapi.PolicyName); kapierror.IsNotFound(err) { glog.Infof("No cluster policy found. Creating bootstrap policy based on: %v", c.Options.PolicyConfig.BootstrapPolicyFile) if err := admin.OverwriteBootstrapPolicy(c.EtcdHelper, c.Options.PolicyConfig.BootstrapPolicyFile, admin.CreateBootstrapPolicyFileFullCommand, true, ioutil.Discard); err != nil { glog.Errorf("Error creating bootstrap policy: %v", err) } } else { glog.V(2).Infof("Ignoring bootstrap policy file because cluster policy found") } // Wait until the policy cache has caught up before continuing review := &authorizationapi.SubjectAccessReview{Action: authorizationapi.AuthorizationAttributes{Verb: "get", Group: authorizationapi.GroupName, Resource: "clusterpolicies"}} err := wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) { result, err := c.PolicyClient().SubjectAccessReviews().Create(review) if err == nil && result.Allowed { return true, nil } if kapierror.IsForbidden(err) || (err == nil && !result.Allowed) { glog.V(2).Infof("waiting for policy cache to initialize") return false, nil } return false, err }) if err != nil { glog.Errorf("error waiting for policy cache to initialize: %v", err) } // Reconcile roles that must exist for the cluster to function // Be very judicious about what is placed in this list, since it will be enforced on every server start reconcileRoles := &policy.ReconcileClusterRolesOptions{ RolesToReconcile: []string{bootstrappolicy.DiscoveryRoleName}, Confirmed: true, Union: true, Out: ioutil.Discard, RoleClient: c.PrivilegedLoopbackOpenShiftClient.ClusterRoles(), } if err := reconcileRoles.RunReconcileClusterRoles(nil, nil); err != nil { glog.Errorf("Could not auto reconcile roles: %v\n", err) } // Reconcile rolebindings that must exist for the cluster to function // Be very judicious about what is placed in this list, since it will be enforced on every server start reconcileRoleBindings := &policy.ReconcileClusterRoleBindingsOptions{ RolesToReconcile: []string{bootstrappolicy.DiscoveryRoleName}, Confirmed: true, Union: true, Out: ioutil.Discard, RoleBindingClient: c.PrivilegedLoopbackOpenShiftClient.ClusterRoleBindings(), } if err := reconcileRoleBindings.RunReconcileClusterRoleBindings(nil, nil); err != nil { glog.Errorf("Could not auto reconcile role bindings: %v\n", err) } }
func TestClusterResourceOverridePluginWithLimits(t *testing.T) { defer testutil.DumpEtcdOnFailure(t) config := &overrideapi.ClusterResourceOverrideConfig{ LimitCPUToMemoryPercent: 100, CPURequestToLimitPercent: 50, MemoryRequestToLimitPercent: 50, } kubeClientset := setupClusterResourceOverrideTest(t, config) podHandler := kubeClientset.Core().Pods(testutil.Namespace()) limitHandler := kubeClientset.Core().LimitRanges(testutil.Namespace()) // test with limits object with defaults; // I wanted to test with a limits object without defaults to see limits forbid an empty resource spec, // but found that if defaults aren't set in the limit object, something still fills them in. // note: defaults are only used when quantities are *missing*, not when they are 0 limitItem := kapi.LimitRangeItem{ Type: kapi.LimitTypeContainer, Max: testResourceList("2Gi", "2"), Min: testResourceList("128Mi", "200m"), Default: testResourceList("512Mi", "500m"), // note: auto-filled from max if we set that; DefaultRequest: testResourceList("128Mi", "200m"), // filled from max if set, or min if that is set MaxLimitRequestRatio: kapi.ResourceList{}, } limit := &kapi.LimitRange{ ObjectMeta: kapi.ObjectMeta{Name: "limit"}, Spec: kapi.LimitRangeSpec{Limits: []kapi.LimitRangeItem{limitItem}}, } _, err := limitHandler.Create(limit) if err != nil { t.Fatal(err) } podCreated, err := podHandler.Create(testClusterResourceOverridePod("limit-with-default", "", "1")) if err != nil { t.Fatal(err) } if memory := podCreated.Spec.Containers[0].Resources.Limits.Memory(); memory.Cmp(resource.MustParse("512Mi")) != 0 { t.Errorf("limit-with-default: Memory limit did not match default 512Mi: %v", memory) } if memory := podCreated.Spec.Containers[0].Resources.Requests.Memory(); memory.Cmp(resource.MustParse("256Mi")) != 0 { t.Errorf("limit-with-default: Memory req did not match expected 256Mi: %v", memory) } if cpu := podCreated.Spec.Containers[0].Resources.Limits.Cpu(); cpu.Cmp(resource.MustParse("500m")) != 0 { t.Errorf("limit-with-default: CPU limit did not match expected 500 mcore: %v", cpu) } if cpu := podCreated.Spec.Containers[0].Resources.Requests.Cpu(); cpu.Cmp(resource.MustParse("250m")) != 0 { t.Errorf("limit-with-default: CPU req did not match expected 250 mcore: %v", cpu) } // set it up so that the overrides create resources that fail validation _, err = podHandler.Create(testClusterResourceOverridePod("limit-with-default-fail", "128Mi", "1")) if err == nil { t.Errorf("limit-with-default-fail: expected to be forbidden") } else if !apierrors.IsForbidden(err) { t.Errorf("limit-with-default-fail: unexpected error: %v", err) } }
// IsErrorLimitExceeded returns true if the given error is a limit error. func IsErrorLimitExceeded(err error) bool { if isForbidden := apierrs.IsForbidden(err); isForbidden || apierrs.IsInvalid(err) { lowered := strings.ToLower(err.Error()) // the limit error message can be accompanied only by Invalid reason if strings.Contains(lowered, errLimitsMessageString) { return true } } return false }
// errForbiddenWithRetry returns true if this is a status error and has requested a retry func errForbiddenWithRetry(err error) bool { if err == nil || !kapierrors.IsForbidden(err) { return false } status, ok := err.(kapierrors.APIStatus) if !ok { return false } return status.Status().Details != nil && status.Status().Details.RetryAfterSeconds > 0 }
func TestPolicyBasedRestrictionOfBuildCreateAndCloneByStrategy(t *testing.T) { defer testutil.DumpEtcdOnFailure(t) clusterAdminClient, projectAdminClient, projectEditorClient := setupBuildStrategyTest(t, false) clients := map[string]*client.Client{"admin": projectAdminClient, "editor": projectEditorClient} builds := map[string]*buildapi.Build{} // Create builds to setup test for _, strategy := range buildStrategyTypes() { for clientType, client := range clients { var err error if builds[string(strategy)+clientType], err = createBuild(t, client.Builds(testutil.Namespace()), strategy); err != nil { t.Errorf("unexpected error for strategy %s and client %s: %v", strategy, clientType, err) } } } // by default amdins and editors can clone builds for _, strategy := range buildStrategyTypes() { for clientType, client := range clients { if _, err := cloneBuild(t, client.Builds(testutil.Namespace()), builds[string(strategy)+clientType]); err != nil { t.Errorf("unexpected clone error for strategy %s and client %s: %v", strategy, clientType, err) } } } removeBuildStrategyRoleResources(t, clusterAdminClient, projectAdminClient, projectEditorClient) // make sure builds are rejected for _, strategy := range buildStrategyTypes() { for clientType, client := range clients { if _, err := createBuild(t, client.Builds(testutil.Namespace()), strategy); !kapierror.IsForbidden(err) { t.Errorf("expected forbidden for strategy %s and client %s: got %v", strategy, clientType, err) } } } // make sure build updates are rejected for _, strategy := range buildStrategyTypes() { for clientType, client := range clients { if _, err := updateBuild(t, client.Builds(testutil.Namespace()), builds[string(strategy)+clientType]); !kapierror.IsForbidden(err) { t.Errorf("expected forbidden for strategy %s and client %s: got %v", strategy, clientType, err) } } } // make sure clone is rejected for _, strategy := range buildStrategyTypes() { for clientType, client := range clients { if _, err := cloneBuild(t, client.Builds(testutil.Namespace()), builds[string(strategy)+clientType]); !kapierror.IsForbidden(err) { t.Errorf("expected forbidden for strategy %s and client %s: got %v", strategy, clientType, err) } } } }
// Search searches for a template and returns matches with the object representation func (r TemplateSearcher) Search(precise bool, terms ...string) (ComponentMatches, []error) { matches := ComponentMatches{} var errs []error checkedNamespaces := sets.NewString() for _, namespace := range r.Namespaces { if checkedNamespaces.Has(namespace) { continue } checkedNamespaces.Insert(namespace) templates, err := r.Client.Templates(namespace).List(kapi.ListOptions{}) if err != nil { if errors.IsNotFound(err) || errors.IsForbidden(err) { continue } errs = append(errs, err) continue } exact := false for i := range templates.Items { template := &templates.Items[i] for _, term := range terms { if term == "__template_fail" { errs = append(errs, fmt.Errorf("unable to find the specified template: %s", term)) continue } glog.V(4).Infof("checking for term %s in namespace %s", term, namespace) if score, scored := templateScorer(*template, term); scored { if score == 0.0 { exact = true } glog.V(4).Infof("Adding template %q in project %q with score %f", template.Name, template.Namespace, score) matches = append(matches, &ComponentMatch{ Value: term, Argument: fmt.Sprintf("--template=%q", template.Name), Name: template.Name, Description: fmt.Sprintf("Template %q in project %q", template.Name, template.Namespace), Score: score, Template: template, }) } } } // If we found one or more exact matches in this namespace, do not continue looking at // other namespaces if exact && precise { break } } return matches, errs }
func verifyOpenShiftUser(client *client.Client) error { if _, err := client.Users().Get("~"); err != nil { log.Errorf("Get user failed with error: %s", err) if kerrors.IsUnauthorized(err) || kerrors.IsForbidden(err) { return ErrOpenShiftAccessDenied } return err } return nil }
func verifyOpenShiftUser(ctx context.Context, client client.UsersInterface) error { if _, err := client.Users().Get("~"); err != nil { context.GetLogger(ctx).Errorf("Get user failed with error: %s", err) if kerrors.IsUnauthorized(err) || kerrors.IsForbidden(err) { return ErrOpenShiftAccessDenied } return err } return nil }
// NewForbidden is a utility function to return a well-formatted admission control error response func NewForbidden(a Attributes, internalError error) error { // do not double wrap an error of same type if apierrors.IsForbidden(internalError) { return internalError } name, kind, err := extractKindName(a) if err != nil { return apierrors.NewInternalError(utilerrors.NewAggregate([]error{internalError, err})) } return apierrors.NewForbidden(kind.Kind, name, internalError) }
func TestErrors(t *testing.T) { oc, _, _ := NewErrorClients(errors.NewNotFound(deployapi.Resource("DeploymentConfigList"), "")) _, err := oc.DeploymentConfigs("test").List(kapi.ListOptions{}) if !errors.IsNotFound(err) { t.Fatalf("unexpected error: %v", err) } oc, _, _ = NewErrorClients(errors.NewForbidden(deployapi.Resource("DeploymentConfigList"), "", nil)) _, err = oc.DeploymentConfigs("test").List(kapi.ListOptions{}) if !errors.IsForbidden(err) { t.Fatalf("unexpected error: %v", err) } }
// Stat makes a local check for the blob, then falls through to the other servers referenced by // the image stream and looks for those that have the layer. func (r *pullthroughBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { // check the local store for the blob desc, err := r.BlobStore.Stat(ctx, dgst) switch { case err == distribution.ErrBlobUnknown: // continue on to the code below and look up the blob in a remote store since it is not in // the local store case err != nil: context.GetLogger(r.repo.ctx).Errorf("Failed to find blob %q: %#v", dgst.String(), err) fallthrough default: return desc, err } // look up the potential remote repositories that this blob could be part of (at this time, // we don't know which image in the image stream surfaced the content). is, err := r.repo.getImageStream() if err != nil { if errors.IsNotFound(err) || errors.IsForbidden(err) { return distribution.Descriptor{}, distribution.ErrBlobUnknown } context.GetLogger(r.repo.ctx).Errorf("Error retrieving image stream for blob: %s", err) return distribution.Descriptor{}, err } var localRegistry string if local, err := imageapi.ParseDockerImageReference(is.Status.DockerImageRepository); err == nil { // TODO: normalize further? localRegistry = local.Registry } retriever := r.repo.importContext() cached := r.repo.cachedLayers.RepositoriesForDigest(dgst) // look at the first level of tagged repositories first search := identifyCandidateRepositories(is, localRegistry, true) if desc, err := r.findCandidateRepository(ctx, search, cached, dgst, retriever); err == nil { return desc, nil } // look at all other repositories tagged by the server secondary := identifyCandidateRepositories(is, localRegistry, false) for k := range search { delete(secondary, k) } if desc, err := r.findCandidateRepository(ctx, secondary, cached, dgst, retriever); err == nil { return desc, nil } return distribution.Descriptor{}, distribution.ErrBlobUnknown }
func confirmProjectAccess(currentProject string, oClient *client.Client, kClient kclientset.Interface) error { _, projectErr := oClient.Projects().Get(currentProject) if !kapierrors.IsNotFound(projectErr) && !kapierrors.IsForbidden(projectErr) { return projectErr } // at this point we know the error is a not found or forbidden, but we'll test namespaces just in case we're running on kube if _, err := kClient.Core().Namespaces().Get(currentProject); err == nil { return nil } // otherwise return the openshift error default return projectErr }
// we had a bug where this failed on ` uenxpected error: metadata.name: Invalid value: "system:admin": may not contain ":"` // make sure we never have that bug again and that project limits for them work func TestProjectRequestLimitAsSystemAdmin(t *testing.T) { _, oclient, _ := setupProjectRequestLimitTest(t, projectRequestLimitSingleDefaultConfig()) if _, err := oclient.ProjectRequests().Create(&projectapi.ProjectRequest{ ObjectMeta: kapi.ObjectMeta{Name: "foo"}, }); err != nil { t.Errorf("uenxpected error: %v", err) } if _, err := oclient.ProjectRequests().Create(&projectapi.ProjectRequest{ ObjectMeta: kapi.ObjectMeta{Name: "bar"}, }); !apierrors.IsForbidden(err) { t.Errorf("missing error: %v", err) } }
func TestBootstrapPolicyOverwritePolicyCommand(t *testing.T) { testutil.RequireEtcd(t) masterConfig, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI() if err != nil { t.Fatalf("unexpected error: %v", err) } client, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Errorf("unexpected error: %v", err) } if err := client.ClusterPolicies().Delete(authorizationapi.PolicyName); err != nil { t.Errorf("unexpected error: %v", err) } // after the policy is deleted, we must wait for it to be cleared from the policy cache err = wait.Poll(10*time.Millisecond, 10*time.Second, func() (bool, error) { _, err := client.ClusterPolicies().List(kapi.ListOptions{}) if err == nil { return false, nil } if !kapierror.IsForbidden(err) { t.Errorf("unexpected error: %v", err) } return true, nil }) if err != nil { t.Errorf("timeout: %v", err) } etcdClient, err := etcd.MakeNewEtcdClient(masterConfig.EtcdClientInfo) if err != nil { t.Errorf("unexpected error: %v", err) } storageVersion := unversioned.GroupVersion{Group: "", Version: masterConfig.EtcdStorageConfig.OpenShiftStorageVersion} etcdHelper, err := origin.NewEtcdStorage(etcdClient, storageVersion, masterConfig.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { t.Errorf("unexpected error: %v", err) } if err := admin.OverwriteBootstrapPolicy(etcdHelper, masterConfig.PolicyConfig.BootstrapPolicyFile, admin.CreateBootstrapPolicyFileFullCommand, true, ioutil.Discard); err != nil { t.Errorf("unexpected error: %v", err) } if _, err := client.ClusterPolicies().List(kapi.ListOptions{}); err != nil { t.Errorf("unexpected error: %v", err) } }
// testPodNodeConstraintsObjectCreationWithPodTemplate attempts to create different object types that contain pod templates // using the passed in nodeName and nodeSelector. It will use the expectError flag to determine if an error should be returned or not func testPodNodeConstraintsObjectCreationWithPodTemplate(t *testing.T, name string, kclientset kclientset.Interface, client client.Interface, nodeName string, nodeSelector map[string]string, expectError bool) { checkForbiddenErr := func(objType string, err error) { if err == nil && expectError { t.Errorf("%s (%s): expected forbidden error but did not receive one", name, objType) return } if err != nil && !expectError { t.Errorf("%s (%s): got error but did not expect one: %v", name, objType, err) return } if err != nil && expectError && !kapierrors.IsForbidden(err) { t.Errorf("%s (%s): did not get an expected forbidden error: %v", name, objType, err) return } } // Pod pod := testPodNodeConstraintsPod(nodeName, nodeSelector) _, err := kclientset.Core().Pods(testutil.Namespace()).Create(pod) checkForbiddenErr("pod", err) // ReplicationController rc := testPodNodeConstraintsReplicationController(nodeName, nodeSelector) _, err = kclientset.Core().ReplicationControllers(testutil.Namespace()).Create(rc) checkForbiddenErr("rc", err) // TODO: Enable when the deployments endpoint is supported in Origin // Deployment // d := testPodNodeConstraintsDeployment(nodeName, nodeSelector) // _, err = kclientset.Extensions().Deployments(testutil.Namespace()).Create(d) // checkForbiddenErr("deployment", err) // ReplicaSet rs := testPodNodeConstraintsReplicaSet(nodeName, nodeSelector) _, err = kclientset.Extensions().ReplicaSets(testutil.Namespace()).Create(rs) checkForbiddenErr("replicaset", err) // Job job := testPodNodeConstraintsJob(nodeName, nodeSelector) _, err = kclientset.Batch().Jobs(testutil.Namespace()).Create(job) checkForbiddenErr("job", err) // DeploymentConfig dc := testPodNodeConstraintsDeploymentConfig(nodeName, nodeSelector) _, err = client.DeploymentConfigs(testutil.Namespace()).Create(dc) checkForbiddenErr("dc", err) }
// deploy launches a new deployment unless there's already a deployment // process in progress for config. func (o DeployOptions) deploy(config *deployapi.DeploymentConfig) error { if config.Spec.Paused { return fmt.Errorf("cannot deploy a paused deployment config") } // TODO: This implies that deploymentconfig.status.latestVersion is always synced. Currently, // that's the case because clients (oc, trigger controllers) are updating the status directly. // Clients should be acting either on spec or on annotations and status updates should be a // responsibility of the main controller. We need to start by unplugging this assumption from // our client tools. deploymentName := deployutil.LatestDeploymentNameForConfig(config) deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName) if err == nil && !deployutil.IsTerminatedDeployment(deployment) { // Reject attempts to start a concurrent deployment. return fmt.Errorf("#%d is already in progress (%s).\nOptionally, you can cancel this deployment using the --cancel option.", config.Status.LatestVersion, deployutil.DeploymentStatusFor(deployment)) } if err != nil && !kerrors.IsNotFound(err) { return err } request := &deployapi.DeploymentRequest{ Name: config.Name, Latest: false, Force: true, } dc, err := o.osClient.DeploymentConfigs(config.Namespace).Instantiate(request) // Pre 1.4 servers don't support the instantiate endpoint. Fallback to incrementing // latestVersion on them. if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) { config.Status.LatestVersion++ dc, err = o.osClient.DeploymentConfigs(config.Namespace).Update(config) } if err != nil { if kerrors.IsBadRequest(err) { err = fmt.Errorf("%v - try 'oc rollout latest dc/%s'", err, config.Name) } return err } fmt.Fprintf(o.out, "Started deployment #%d\n", dc.Status.LatestVersion) if o.follow { return o.getLogs(dc) } fmt.Fprintf(o.out, "Use '%s logs -f dc/%s' to track its progress.\n", o.baseCommandName, dc.Name) return nil }