func setupClusterResourceOverrideTest(t *testing.T, pluginConfig *overrideapi.ClusterResourceOverrideConfig) kclient.Interface { masterConfig, err := testserver.DefaultMasterOptions() checkErr(t, err) // fill in possibly-empty config values if masterConfig.KubernetesMasterConfig == nil { masterConfig.KubernetesMasterConfig = &api.KubernetesMasterConfig{} } kubeMaster := masterConfig.KubernetesMasterConfig if kubeMaster.AdmissionConfig.PluginConfig == nil { kubeMaster.AdmissionConfig.PluginConfig = map[string]api.AdmissionPluginConfig{} } // set our config as desired kubeMaster.AdmissionConfig.PluginConfig[overrideapi.PluginName] = api.AdmissionPluginConfig{Configuration: pluginConfig} // start up a server and return useful clients to that server clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig) checkErr(t, err) clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) checkErr(t, err) clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) checkErr(t, err) // need to create a project and return client for project admin clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) checkErr(t, err) _, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, testutil.Namespace(), "peon") checkErr(t, err) checkErr(t, testserver.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName})) return clusterAdminKubeClient }
func setupRunOnceDurationTest(t *testing.T, pluginConfig *pluginapi.RunOnceDurationConfig, nsAnnotations map[string]string) kclient.Interface { testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{ "RunOnceDuration": { Configuration: pluginConfig, }, } kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } ns := &kapi.Namespace{} ns.Name = testutil.Namespace() ns.Annotations = nsAnnotations _, err = kubeClient.Namespaces().Create(ns) if err != nil { t.Fatalf("error creating namespace: %v", err) } if err := testserver.WaitForServiceAccounts(kubeClient, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil { t.Errorf("unexpected error: %v", err) } return kubeClient }
func setupUserPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig, user string) (*client.Client, *kclientset.Clientset) { testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } cfg := map[string]configapi.AdmissionPluginConfig{ "PodNodeConstraints": { Configuration: pluginConfig, }, } masterConfig.AdmissionConfig.PluginConfig = cfg masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = cfg kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(kubeConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } userClient, userkubeClientset, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, user) if err != nil { t.Fatalf("error getting user/kube client: %v", err) } kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting kube client: %v", err) } ns := &kapi.Namespace{} ns.Name = testutil.Namespace() _, err = kubeClientset.Core().Namespaces().Create(ns) if err != nil { t.Fatalf("error creating namespace: %v", err) } if err := testserver.WaitForServiceAccounts(kubeClientset, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil { t.Fatalf("unexpected error: %v", err) } addUser := &policy.RoleModificationOptions{ RoleNamespace: ns.Name, RoleName: bootstrappolicy.AdminRoleName, RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(clusterAdminClient), Users: []string{user}, } if err := addUser.AddRole(); err != nil { t.Fatalf("unexpected error: %v", err) } return userClient, userkubeClientset }
func setupBuildControllerTest(counts controllerCount, t *testing.T) (*client.Client, *kclientset.Clientset) { testutil.RequireEtcd(t) master, clusterAdminKubeConfig, err := testserver.StartTestMaster() if err != nil { t.Fatal(err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatal(err) } clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { t.Fatal(err) } _, err = clusterAdminKubeClientset.Core().Namespaces().Create(&kapi.Namespace{ ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()}, }) if err != nil { t.Fatal(err) } if err := testserver.WaitForServiceAccounts(clusterAdminKubeClientset, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil { t.Fatalf("unexpected error: %v", err) } openshiftConfig, err := origin.BuildMasterConfig(*master) if err != nil { t.Fatal(err) } // Get the build controller clients, since those rely on service account tokens // We don't want to proceed with the rest of the test until those are available openshiftConfig.BuildControllerClients() for i := 0; i < counts.BuildControllers; i++ { openshiftConfig.RunBuildController(openshiftConfig.Informers) } for i := 0; i < counts.BuildPodControllers; i++ { openshiftConfig.RunBuildPodController() } for i := 0; i < counts.ImageChangeControllers; i++ { openshiftConfig.RunBuildImageChangeTriggerController() } for i := 0; i < counts.ConfigChangeControllers; i++ { openshiftConfig.RunBuildConfigChangeController() } return clusterAdminClient, clusterAdminKubeClientset }
func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]configapi.AdmissionPluginConfig) (*client.Client, *kclientset.Clientset) { testutil.RequireEtcd(t) master, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("%v", err) } master.AdmissionConfig.PluginConfig = pluginConfig clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(master) if err != nil { t.Fatalf("%v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("%v", err) } clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("%v", err) } _, err = clusterAdminKubeClientset.Namespaces().Create(&kapi.Namespace{ ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()}, }) if err != nil { t.Fatalf("%v", err) } err = testserver.WaitForServiceAccounts( clusterAdminKubeClientset, testutil.Namespace(), []string{ bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName, }) if err != nil { t.Fatalf("%v", err) } return clusterAdminClient, clusterAdminKubeClientset }
func setupClusterAdminPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig) (*client.Client, *kclient.Client) { testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } cfg := map[string]configapi.AdmissionPluginConfig{ "PodNodeConstraints": { Configuration: pluginConfig, }, } masterConfig.AdmissionConfig.PluginConfig = cfg masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = cfg kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } openShiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } ns := &kapi.Namespace{} ns.Name = testutil.Namespace() _, err = kubeClient.Namespaces().Create(ns) if err != nil { t.Fatalf("error creating namespace: %v", err) } if err := testserver.WaitForServiceAccounts(kubeClient, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil { t.Fatalf("unexpected error: %v", err) } return openShiftClient, kubeClient }
func TestWebhookGitHubPushWithImage(t *testing.T) { testutil.RequireEtcd(t) _, clusterAdminKubeConfig, err := testserver.StartTestMaster() if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Errorf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Errorf("unexpected error: %v", err) } err = testutil.CreateNamespace(clusterAdminKubeConfig, testutil.Namespace()) if err != nil { t.Errorf("unexpected error: %v", err) } clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) checkErr(t, err) if err := testserver.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil { t.Errorf("unexpected error: %v", err) } // create imagerepo imageStream := &imageapi.ImageStream{ ObjectMeta: kapi.ObjectMeta{Name: "image-stream"}, Spec: imageapi.ImageStreamSpec{ DockerImageRepository: "registry:3000/integration/imageStream", Tags: map[string]imageapi.TagReference{ "validTag": { From: &kapi.ObjectReference{ Kind: "DockerImage", Name: "registry:3000/integration/imageStream:success", }, }, }, }, } if _, err := clusterAdminClient.ImageStreams(testutil.Namespace()).Create(imageStream); err != nil { t.Fatalf("Unexpected error: %v", err) } ism := &imageapi.ImageStreamMapping{ ObjectMeta: kapi.ObjectMeta{Name: "image-stream"}, Tag: "validTag", Image: imageapi.Image{ ObjectMeta: kapi.ObjectMeta{ Name: "myimage", }, DockerImageReference: "registry:3000/integration/imageStream:success", }, } if err := clusterAdminClient.ImageStreamMappings(testutil.Namespace()).Create(ism); err != nil { t.Fatalf("Unexpected error: %v", err) } // create buildconfig buildConfig := mockBuildConfigImageParms("originalImage", "imageStream", "validTag") if _, err := clusterAdminClient.BuildConfigs(testutil.Namespace()).Create(buildConfig); err != nil { t.Fatalf("Unexpected error: %v", err) } watch, err := clusterAdminClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{}) if err != nil { t.Fatalf("Couldn't subscribe to builds: %v", err) } defer watch.Stop() for _, s := range []string{ "/oapi/v1/namespaces/" + testutil.Namespace() + "/buildconfigs/pushbuild/webhooks/secret101/github", "/oapi/v1/namespaces/" + testutil.Namespace() + "/buildconfigs/pushbuild/webhooks/secret100/github", "/oapi/v1/namespaces/" + testutil.Namespace() + "/buildconfigs/pushbuild/webhooks/secret102/github", } { // trigger build event sending push notification postFile(clusterAdminClient.RESTClient.Client, "push", "pushevent.json", clusterAdminClientConfig.Host+s, http.StatusOK, t) event := <-watch.ResultChan() actual := event.Object.(*buildapi.Build) // FIXME: I think the build creation is fast and in some situation we miss // the BuildPhaseNew here. Note that this is not a bug, in future we should // move this to use go routine to capture all events. if actual.Status.Phase != buildapi.BuildPhaseNew && actual.Status.Phase != buildapi.BuildPhasePending { t.Errorf("Expected %s or %s, got %s", buildapi.BuildPhaseNew, buildapi.BuildPhasePending, actual.Status.Phase) } if actual.Spec.Strategy.DockerStrategy.From.Name != "originalImage" { t.Errorf("Expected %s, got %s", "originalImage", actual.Spec.Strategy.DockerStrategy.From.Name) } } }
func TestPodUpdateSCCEnforcement(t *testing.T) { testutil.RequireEtcd(t) _, clusterAdminKubeConfig, err := testserver.StartTestMaster() if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } projectName := "hammer-project" if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, projectName, "harold"); err != nil { t.Fatalf("unexpected error: %v", err) } _, haroldKubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "harold") if err != nil { t.Fatalf("unexpected error: %v", err) } if err := testserver.WaitForServiceAccounts(clusterAdminKubeClient, projectName, []string{"default"}); err != nil { t.Fatalf("unexpected error: %v", err) } // so cluster-admin can create privileged pods, but harold cannot. This means that harold should not be able // to update the privileged pods either, even if he lies about its privileged nature privilegedPod := &kapi.Pod{ ObjectMeta: kapi.ObjectMeta{Name: "unsafe"}, Spec: kapi.PodSpec{ Containers: []kapi.Container{ {Name: "first", Image: "something-innocuous"}, }, SecurityContext: &kapi.PodSecurityContext{ HostPID: true, }, }, } if _, err := haroldKubeClient.Pods(projectName).Create(privilegedPod); !kapierror.IsForbidden(err) { t.Fatalf("missing forbidden: %v", err) } actualPod, err := clusterAdminKubeClient.Pods(projectName).Create(privilegedPod) if err != nil { t.Fatalf("unexpected error: %v", err) } actualPod.Spec.Containers[0].Image = "something-nefarious" if _, err := haroldKubeClient.Pods(projectName).Update(actualPod); !kapierror.IsForbidden(err) { t.Fatalf("missing forbidden: %v", err) } // try to lie about the privileged nature actualPod.Spec.SecurityContext.HostPID = false if _, err := haroldKubeClient.Pods(projectName).Update(actualPod); err == nil { t.Fatalf("missing error: %v", err) } }
func TestSAAsOAuthClient(t *testing.T) { testutil.RequireEtcd(t) _, clusterAdminKubeConfig, err := testserver.StartTestMaster() if err != nil { t.Fatalf("unexpected error: %v", err) } authorizationCodes := make(chan string, 1) authorizationErrors := make(chan string, 1) oauthServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { t.Logf("fake pod server got %v", req.URL) if code := req.URL.Query().Get("code"); len(code) > 0 { authorizationCodes <- code } if err := req.URL.Query().Get("error"); len(err) > 0 { authorizationErrors <- err } })) defer oauthServer.Close() clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } projectName := "hammer-project" if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, projectName, "harold"); err != nil { t.Fatalf("unexpected error: %v", err) } if err := testserver.WaitForServiceAccounts(clusterAdminKubeClient, projectName, []string{"default"}); err != nil { t.Fatalf("unexpected error: %v", err) } // get the SA ready with redirect URIs and secret annotations var defaultSA *kapi.ServiceAccount // retry this a couple times. We seem to be flaking on update conflicts and missing secrets all together err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error { defaultSA, err = clusterAdminKubeClient.ServiceAccounts(projectName).Get("default") if err != nil { return err } if defaultSA.Annotations == nil { defaultSA.Annotations = map[string]string{} } defaultSA.Annotations[saoauth.OAuthRedirectURISecretAnnotationPrefix+"one"] = oauthServer.URL defaultSA.Annotations[saoauth.OAuthWantChallengesAnnotationPrefix] = "true" defaultSA, err = clusterAdminKubeClient.ServiceAccounts(projectName).Update(defaultSA) return err }) if err != nil { t.Fatalf("unexpected error: %v", err) } var oauthSecret *kapi.Secret // retry this a couple times. We seem to be flaking on update conflicts and missing secrets all together err = wait.PollImmediate(30*time.Millisecond, 10*time.Second, func() (done bool, err error) { allSecrets, err := clusterAdminKubeClient.Secrets(projectName).List(kapi.ListOptions{}) if err != nil { return false, err } for i := range allSecrets.Items { secret := allSecrets.Items[i] if serviceaccount.IsServiceAccountToken(&secret, defaultSA) { oauthSecret = &secret return true, nil } } return false, nil }) if err != nil { t.Fatalf("unexpected error: %v", err) } oauthClientConfig := &osincli.ClientConfig{ ClientId: serviceaccount.MakeUsername(defaultSA.Namespace, defaultSA.Name), ClientSecret: string(oauthSecret.Data[kapi.ServiceAccountTokenKey]), AuthorizeUrl: clusterAdminClientConfig.Host + "/oauth/authorize", TokenUrl: clusterAdminClientConfig.Host + "/oauth/token", RedirectUrl: oauthServer.URL, Scope: scope.Join([]string{"user:info", "role:edit:" + projectName}), SendClientSecretInParams: true, } runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, authorizationCodes, authorizationErrors, true, true) clusterAdminClient.OAuthClientAuthorizations().Delete("harold:" + oauthClientConfig.ClientId) oauthClientConfig = &osincli.ClientConfig{ ClientId: serviceaccount.MakeUsername(defaultSA.Namespace, defaultSA.Name), ClientSecret: string(oauthSecret.Data[kapi.ServiceAccountTokenKey]), AuthorizeUrl: clusterAdminClientConfig.Host + "/oauth/authorize", TokenUrl: clusterAdminClientConfig.Host + "/oauth/token", RedirectUrl: oauthServer.URL, Scope: scope.Join([]string{"user:info", "role:edit:other-ns"}), SendClientSecretInParams: true, } runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, authorizationCodes, authorizationErrors, false, false) clusterAdminClient.OAuthClientAuthorizations().Delete("harold:" + oauthClientConfig.ClientId) oauthClientConfig = &osincli.ClientConfig{ ClientId: serviceaccount.MakeUsername(defaultSA.Namespace, defaultSA.Name), ClientSecret: string(oauthSecret.Data[kapi.ServiceAccountTokenKey]), AuthorizeUrl: clusterAdminClientConfig.Host + "/oauth/authorize", TokenUrl: clusterAdminClientConfig.Host + "/oauth/token", RedirectUrl: oauthServer.URL, Scope: scope.Join([]string{"user:info"}), SendClientSecretInParams: true, } runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, authorizationCodes, authorizationErrors, true, false) clusterAdminClient.OAuthClientAuthorizations().Delete("harold:" + oauthClientConfig.ClientId) }
func TestSAAsOAuthClient(t *testing.T) { testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) _, clusterAdminKubeConfig, err := testserver.StartTestMaster() if err != nil { t.Fatalf("unexpected error: %v", err) } authorizationCodes := make(chan string, 1) authorizationErrors := make(chan string, 1) oauthServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { t.Logf("fake pod server got %v", req.URL) if code := req.URL.Query().Get("code"); len(code) > 0 { authorizationCodes <- code } if err := req.URL.Query().Get("error"); len(err) > 0 { authorizationErrors <- err } })) defer oauthServer.Close() redirectURL := oauthServer.URL + "/oauthcallback" clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } projectName := "hammer-project" if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, projectName, "harold"); err != nil { t.Fatalf("unexpected error: %v", err) } if err := testserver.WaitForServiceAccounts(clusterAdminKubeClient, projectName, []string{"default"}); err != nil { t.Fatalf("unexpected error: %v", err) } promptingClient, err := clusterAdminClient.OAuthClients().Create(&oauthapi.OAuthClient{ ObjectMeta: kapi.ObjectMeta{Name: "prompting-client"}, Secret: "prompting-client-secret", RedirectURIs: []string{redirectURL}, GrantMethod: oauthapi.GrantHandlerPrompt, RespondWithChallenges: true, }) if err != nil { t.Fatalf("unexpected error: %v", err) } // get the SA ready with redirect URIs and secret annotations var defaultSA *kapi.ServiceAccount // retry this a couple times. We seem to be flaking on update conflicts and missing secrets all together err = kclient.RetryOnConflict(kclient.DefaultRetry, func() error { defaultSA, err = clusterAdminKubeClient.ServiceAccounts(projectName).Get("default") if err != nil { return err } if defaultSA.Annotations == nil { defaultSA.Annotations = map[string]string{} } defaultSA.Annotations[saoauth.OAuthRedirectURISecretAnnotationPrefix+"one"] = redirectURL defaultSA.Annotations[saoauth.OAuthWantChallengesAnnotationPrefix] = "true" defaultSA, err = clusterAdminKubeClient.ServiceAccounts(projectName).Update(defaultSA) return err }) if err != nil { t.Fatalf("unexpected error: %v", err) } var oauthSecret *kapi.Secret // retry this a couple times. We seem to be flaking on update conflicts and missing secrets all together err = wait.PollImmediate(30*time.Millisecond, 10*time.Second, func() (done bool, err error) { allSecrets, err := clusterAdminKubeClient.Secrets(projectName).List(kapi.ListOptions{}) if err != nil { return false, err } for i := range allSecrets.Items { secret := allSecrets.Items[i] if serviceaccount.IsServiceAccountToken(&secret, defaultSA) { oauthSecret = &secret return true, nil } } return false, nil }) if err != nil { t.Fatalf("unexpected error: %v", err) } // Test with a normal OAuth client { oauthClientConfig := &osincli.ClientConfig{ ClientId: promptingClient.Name, ClientSecret: promptingClient.Secret, AuthorizeUrl: clusterAdminClientConfig.Host + "/oauth/authorize", TokenUrl: clusterAdminClientConfig.Host + "/oauth/token", RedirectUrl: redirectURL, SendClientSecretInParams: true, } t.Log("Testing unrestricted scope") oauthClientConfig.Scope = "" // approval steps are needed for unscoped access runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, true, true, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauth/approve", "form", "POST /oauth/approve", "redirect to /oauth/authorize", "redirect to /oauthcallback", "code", "scope:user:full", }) // verify the persisted client authorization looks like we expect if clientAuth, err := clusterAdminClient.OAuthClientAuthorizations().Get("harold:" + oauthClientConfig.ClientId); err != nil { t.Fatalf("Unexpected error: %v", err) } else if !reflect.DeepEqual(clientAuth.Scopes, []string{"user:full"}) { t.Fatalf("Unexpected scopes: %v", clientAuth.Scopes) } else { // update the authorization to not contain any approved scopes clientAuth.Scopes = nil if _, err := clusterAdminClient.OAuthClientAuthorizations().Update(clientAuth); err != nil { t.Fatalf("Unexpected error: %v", err) } } // approval steps are needed again for unscoped access runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, true, true, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauth/approve", "form", "POST /oauth/approve", "redirect to /oauth/authorize", "redirect to /oauthcallback", "code", "scope:user:full", }) // with the authorization stored, approval steps are skipped runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, true, true, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauthcallback", "code", "scope:user:full", }) // Approval step is needed again t.Log("Testing restricted scope") oauthClientConfig.Scope = "user:info user:check-access" // filter to disapprove of granting the user:check-access scope deniedScope := false inputFilter := func(inputType, name, value string) bool { if inputType == "checkbox" && name == "scope" && value == "user:check-access" { deniedScope = true return false } return true } // our token only gets the approved one runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, inputFilter, authorizationCodes, authorizationErrors, true, false, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauth/approve", "form", "POST /oauth/approve", "redirect to /oauth/authorize", "redirect to /oauthcallback", "code", "scope:user:info", }) if !deniedScope { t.Errorf("Expected form filter to deny user:info scope") } // second time, we approve all, and our token gets all requested scopes runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, true, false, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauth/approve", "form", "POST /oauth/approve", "redirect to /oauth/authorize", "redirect to /oauthcallback", "code", "scope:" + oauthClientConfig.Scope, }) // third time, the approval steps is not needed, and the token gets all requested scopes runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, true, false, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauthcallback", "code", "scope:" + oauthClientConfig.Scope, }) // Now request an unscoped token again, and no approval should be needed t.Log("Testing unrestricted scope") oauthClientConfig.Scope = "" runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, true, true, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauthcallback", "code", "scope:user:full", }) clusterAdminClient.OAuthClientAuthorizations().Delete("harold:" + oauthClientConfig.ClientId) } { oauthClientConfig := &osincli.ClientConfig{ ClientId: serviceaccount.MakeUsername(defaultSA.Namespace, defaultSA.Name), ClientSecret: string(oauthSecret.Data[kapi.ServiceAccountTokenKey]), AuthorizeUrl: clusterAdminClientConfig.Host + "/oauth/authorize", TokenUrl: clusterAdminClientConfig.Host + "/oauth/token", RedirectUrl: redirectURL, Scope: scope.Join([]string{"user:info", "role:edit:" + projectName}), SendClientSecretInParams: true, } t.Log("Testing allowed scopes") // First time, the approval steps are needed // Second time, the approval steps are skipped runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, true, true, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauth/approve", "form", "POST /oauth/approve", "redirect to /oauth/authorize", "redirect to /oauthcallback", "code", "scope:" + oauthClientConfig.Scope, }) runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, true, true, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauthcallback", "code", "scope:" + oauthClientConfig.Scope, }) clusterAdminClient.OAuthClientAuthorizations().Delete("harold:" + oauthClientConfig.ClientId) } { oauthClientConfig := &osincli.ClientConfig{ ClientId: serviceaccount.MakeUsername(defaultSA.Namespace, defaultSA.Name), ClientSecret: string(oauthSecret.Data[kapi.ServiceAccountTokenKey]), AuthorizeUrl: clusterAdminClientConfig.Host + "/oauth/authorize", TokenUrl: clusterAdminClientConfig.Host + "/oauth/token", RedirectUrl: redirectURL, Scope: scope.Join([]string{"user:info", "role:edit:other-ns"}), SendClientSecretInParams: true, } t.Log("Testing disallowed scopes") runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, false, false, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauthcallback", "error:access_denied", }) clusterAdminClient.OAuthClientAuthorizations().Delete("harold:" + oauthClientConfig.ClientId) } { t.Log("Testing invalid scopes") oauthClientConfig := &osincli.ClientConfig{ ClientId: serviceaccount.MakeUsername(defaultSA.Namespace, defaultSA.Name), ClientSecret: string(oauthSecret.Data[kapi.ServiceAccountTokenKey]), AuthorizeUrl: clusterAdminClientConfig.Host + "/oauth/authorize", TokenUrl: clusterAdminClientConfig.Host + "/oauth/token", RedirectUrl: redirectURL, Scope: scope.Join([]string{"unknown-scope"}), SendClientSecretInParams: true, } runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, false, false, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauthcallback", "error:invalid_scope", }) clusterAdminClient.OAuthClientAuthorizations().Delete("harold:" + oauthClientConfig.ClientId) } { t.Log("Testing allowed scopes with failed API call") oauthClientConfig := &osincli.ClientConfig{ ClientId: serviceaccount.MakeUsername(defaultSA.Namespace, defaultSA.Name), ClientSecret: string(oauthSecret.Data[kapi.ServiceAccountTokenKey]), AuthorizeUrl: clusterAdminClientConfig.Host + "/oauth/authorize", TokenUrl: clusterAdminClientConfig.Host + "/oauth/token", RedirectUrl: redirectURL, Scope: scope.Join([]string{"user:info"}), SendClientSecretInParams: true, } // First time, the approval is needed // Second time, the approval is skipped runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, true, false, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauth/approve", "form", "POST /oauth/approve", "redirect to /oauth/authorize", "redirect to /oauthcallback", "code", "scope:" + oauthClientConfig.Scope, }) runOAuthFlow(t, clusterAdminClientConfig, projectName, oauthClientConfig, nil, authorizationCodes, authorizationErrors, true, false, []string{ "GET /oauth/authorize", "received challenge", "GET /oauth/authorize", "redirect to /oauthcallback", "code", "scope:" + oauthClientConfig.Scope, }) clusterAdminClient.OAuthClientAuthorizations().Delete("harold:" + oauthClientConfig.ClientId) } }