func setupRunOnceDurationTest(t *testing.T, pluginConfig *pluginapi.RunOnceDurationConfig, nsAnnotations map[string]string) kclient.Interface { testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{ "RunOnceDuration": { Configuration: pluginConfig, }, } kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } ns := &kapi.Namespace{} ns.Name = testutil.Namespace() ns.Annotations = nsAnnotations _, err = kubeClient.Namespaces().Create(ns) if err != nil { t.Fatalf("error creating namespace: %v", err) } if err := testserver.WaitForPodCreationServiceAccounts(kubeClient, testutil.Namespace()); err != nil { t.Errorf("unexpected error: %v", err) } return kubeClient }
func setupProjectRequestLimitTest(t *testing.T, pluginConfig *requestlimit.ProjectRequestLimitConfig) (kclientset.Interface, client.Interface, *restclient.Config) { testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } masterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{ "ProjectRequestLimit": { Configuration: pluginConfig, }, } kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } openshiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile) if err != nil { t.Fatalf("error getting openshift client: %v", err) } clientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile) if err != nil { t.Fatalf("error getting client config: %v", err) } return kubeClient, openshiftClient, clientConfig }
func TestAccessOriginWebConsole(t *testing.T) { testutil.RequireEtcd(t) masterOptions, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } if _, err = testserver.StartConfiguredMaster(masterOptions); err != nil { t.Fatalf("unexpected error: %v", err) } for endpoint, exp := range map[string]struct { statusCode int location string }{ "": {http.StatusFound, masterOptions.AssetConfig.PublicURL}, "healthz": {http.StatusOK, ""}, "login": {http.StatusOK, ""}, "oauth/token/request": {http.StatusFound, masterOptions.AssetConfig.MasterPublicURL + "/oauth/authorize"}, "console": {http.StatusMovedPermanently, "/console/"}, "console/": {http.StatusOK, ""}, "console/java": {http.StatusOK, ""}, } { url := masterOptions.AssetConfig.MasterPublicURL + "/" + endpoint tryAccessURL(t, url, exp.statusCode, exp.location) } }
func setupClusterResourceOverrideTest(t *testing.T, pluginConfig *overrideapi.ClusterResourceOverrideConfig) kclient.Interface { masterConfig, err := testserver.DefaultMasterOptions() checkErr(t, err) // fill in possibly-empty config values if masterConfig.KubernetesMasterConfig == nil { masterConfig.KubernetesMasterConfig = &api.KubernetesMasterConfig{} } kubeMaster := masterConfig.KubernetesMasterConfig if kubeMaster.AdmissionConfig.PluginConfig == nil { kubeMaster.AdmissionConfig.PluginConfig = map[string]api.AdmissionPluginConfig{} } // set our config as desired kubeMaster.AdmissionConfig.PluginConfig[overrideapi.PluginName] = api.AdmissionPluginConfig{Configuration: pluginConfig} // start up a server and return useful clients to that server clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig) checkErr(t, err) clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) checkErr(t, err) clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) checkErr(t, err) // need to create a project and return client for project admin clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) checkErr(t, err) _, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, testutil.Namespace(), "peon") checkErr(t, err) checkErr(t, testserver.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName})) return clusterAdminKubeClient }
func setupProjectRequestLimitTest(t *testing.T, pluginConfig *requestlimit.ProjectRequestLimitConfig) (kclient.Interface, client.Interface, *kclient.Config) { masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } masterConfig.AdmissionConfig.PluginOrderOverride = []string{"OriginNamespaceLifecycle", "BuildByStrategy", "ProjectRequestLimit"} masterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{ "ProjectRequestLimit": { Configuration: runtime.EmbeddedObject{ Object: pluginConfig, }, }, } kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } openshiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile) if err != nil { t.Fatalf("error getting openshift client: %v", err) } clientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile) if err != nil { t.Fatalf("error getting client config: %v", err) } return kubeClient, openshiftClient, clientConfig }
func setupRunOnceDurationTest(t *testing.T, pluginConfig *pluginapi.RunOnceDurationConfig, nsAnnotations map[string]string) kclient.Interface { masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } plugins := append([]string{"RunOnceDuration"}, kubemaster.AdmissionPlugins...) masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride = plugins masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{ "RunOnceDuration": { Configuration: runtime.EmbeddedObject{ Object: pluginConfig, }, }, } kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } ns := &kapi.Namespace{} ns.Name = testutil.Namespace() ns.Annotations = nsAnnotations _, err = kubeClient.Namespaces().Create(ns) if err != nil { t.Fatalf("error creating namespace: %v", err) } if err := testserver.WaitForServiceAccounts(kubeClient, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil { t.Errorf("unexpected error: %v", err) } return kubeClient }
func setupUserPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig, user string) (*client.Client, *kclientset.Clientset) { testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } cfg := map[string]configapi.AdmissionPluginConfig{ "PodNodeConstraints": { Configuration: pluginConfig, }, } masterConfig.AdmissionConfig.PluginConfig = cfg masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = cfg kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(kubeConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } userClient, userkubeClientset, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, user) if err != nil { t.Fatalf("error getting user/kube client: %v", err) } kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting kube client: %v", err) } ns := &kapi.Namespace{} ns.Name = testutil.Namespace() _, err = kubeClientset.Core().Namespaces().Create(ns) if err != nil { t.Fatalf("error creating namespace: %v", err) } if err := testserver.WaitForServiceAccounts(kubeClientset, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil { t.Fatalf("unexpected error: %v", err) } addUser := &policy.RoleModificationOptions{ RoleNamespace: ns.Name, RoleName: bootstrappolicy.AdminRoleName, RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(clusterAdminClient), Users: []string{user}, } if err := addUser.AddRole(); err != nil { t.Fatalf("unexpected error: %v", err) } return userClient, userkubeClientset }
func TestAlwaysPullImagesOn(t *testing.T) { testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{ "AlwaysPullImages": { Configuration: &configapi.DefaultAdmissionConfig{}, }, } kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } ns := &kapi.Namespace{} ns.Name = testutil.Namespace() _, err = kubeClientset.Core().Namespaces().Create(ns) if err != nil { t.Fatalf("error creating namespace: %v", err) } if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil { t.Fatalf("error getting client config: %v", err) } testPod := &kapi.Pod{} testPod.GenerateName = "test" testPod.Spec.Containers = []kapi.Container{ { Name: "container", Image: "openshift/origin-pod:notlatest", ImagePullPolicy: kapi.PullNever, }, } actualPod, err := kubeClientset.Core().Pods(testutil.Namespace()).Create(testPod) if err != nil { t.Fatalf("unexpected error: %v", err) } if actualPod.Spec.Containers[0].ImagePullPolicy != kapi.PullAlways { t.Errorf("expected %v, got %v", kapi.PullAlways, actualPod.Spec.Containers[0].ImagePullPolicy) } }
func TestOAuthDisabled(t *testing.T) { testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) // Build master config masterOptions, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } // Disable OAuth masterOptions.OAuthConfig = nil // Start server clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterOptions) if err != nil { t.Fatalf("unexpected error: %v", err) } client, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // Make sure cert auth still works namespaces, err := client.Namespaces().List(kapi.ListOptions{}) if err != nil { t.Fatalf("Unexpected error %v", err) } if len(namespaces.Items) == 0 { t.Errorf("Expected namespaces, got none") } // Use the server and CA info anonConfig := restclient.Config{} anonConfig.Host = clientConfig.Host anonConfig.CAFile = clientConfig.CAFile anonConfig.CAData = clientConfig.CAData // Make sure we can't authenticate using OAuth if _, err := tokencmd.RequestToken(&anonConfig, nil, "username", "password"); err == nil { t.Error("Expected error, got none") } }
func setupAdmissionTest(t *testing.T, setupConfig func(*configapi.MasterConfig)) (*kclient.Client, *client.Client) { masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } setupConfig(masterConfig) kubeConfigFile, err := testserver.StartConfiguredMasterAPI(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } openshiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile) if err != nil { t.Fatalf("error getting openshift client: %v", err) } return kubeClient, openshiftClient }
func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]configapi.AdmissionPluginConfig) (*client.Client, *kclientset.Clientset) { testutil.RequireEtcd(t) master, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("%v", err) } master.AdmissionConfig.PluginConfig = pluginConfig clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(master) if err != nil { t.Fatalf("%v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("%v", err) } clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("%v", err) } _, err = clusterAdminKubeClientset.Namespaces().Create(&kapi.Namespace{ ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()}, }) if err != nil { t.Fatalf("%v", err) } err = testserver.WaitForServiceAccounts( clusterAdminKubeClientset, testutil.Namespace(), []string{ bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName, }) if err != nil { t.Fatalf("%v", err) } return clusterAdminClient, clusterAdminKubeClientset }
func setupAuditTest(t *testing.T) (*kclient.Client, *client.Client) { testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } masterConfig.AuditConfig.Enabled = true kubeConfigFile, err := testserver.StartConfiguredMasterAPI(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } openshiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile) if err != nil { t.Fatalf("error getting openshift client: %v", err) } return kubeClient, openshiftClient }
func TestAccessDisabledWebConsole(t *testing.T) { testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) masterOptions, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } masterOptions.DisabledFeatures.Add(configapi.FeatureWebConsole) if _, err := testserver.StartConfiguredMaster(masterOptions); err != nil { t.Fatalf("unexpected error: %v", err) } resp := tryAccessURL(t, masterOptions.AssetConfig.MasterPublicURL+"/", http.StatusOK, "", nil) body, err := ioutil.ReadAll(resp.Body) if err != nil { t.Errorf("failed to read reposponse's body: %v", err) } else { var value interface{} if err = json.Unmarshal(body, &value); err != nil { t.Errorf("expected json body which couldn't be parsed: %v, got: %s", err, body) } } for endpoint, exp := range map[string]struct { statusCode int location string }{ "healthz": {http.StatusOK, ""}, "login": {http.StatusOK, ""}, "oauth/token/request": {http.StatusFound, masterOptions.AssetConfig.MasterPublicURL + "/oauth/authorize"}, "console": {http.StatusForbidden, ""}, "console/": {http.StatusForbidden, ""}, "console/java": {http.StatusForbidden, ""}, } { url := masterOptions.AssetConfig.MasterPublicURL + "/" + endpoint tryAccessURL(t, url, exp.statusCode, exp.location, nil) } }
func setupClusterAdminPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig) (*client.Client, *kclientset.Clientset) { testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } cfg := map[string]configapi.AdmissionPluginConfig{ "PodNodeConstraints": { Configuration: pluginConfig, }, } masterConfig.AdmissionConfig.PluginConfig = cfg masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = cfg kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } openShiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } ns := &kapi.Namespace{} ns.Name = testutil.Namespace() _, err = kubeClientset.Core().Namespaces().Create(ns) if err != nil { t.Fatalf("error creating namespace: %v", err) } if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil { t.Fatalf("unexpected error: %v", err) } return openShiftClient, kubeClientset }
func TestAccessStandaloneOriginWebConsole(t *testing.T) { testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) masterOptions, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } addr, err := testserver.FindAvailableBindAddress(13000, 13999) if err != nil { t.Fatalf("unexpected error: %v", err) } masterOptions.AssetConfig.ServingInfo.BindAddress = addr assetBaseURL := "https://" + addr masterOptions.AssetConfig.PublicURL = assetBaseURL + "/console/" masterOptions.OAuthConfig.AssetPublicURL = assetBaseURL + "/console/" if _, err = testserver.StartConfiguredMaster(masterOptions); err != nil { t.Fatalf("unexpected error: %v", err) } for endpoint, exp := range map[string]struct { statusCode int location string }{ "": {http.StatusFound, "/console/"}, "blarg": {http.StatusFound, "/console/"}, "console": {http.StatusMovedPermanently, "/console/"}, "console/": {http.StatusOK, ""}, "console/java": {http.StatusOK, ""}, } { url := assetBaseURL + "/" + endpoint tryAccessURL(t, url, exp.statusCode, exp.location, nil) } }
func TestEndpointAdmission(t *testing.T) { testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{ "RestrictedEndpointsAdmission": { Configuration: &configapi.DefaultAdmissionConfig{}, }, } masterConfig.NetworkConfig.ClusterNetworkCIDR = clusterNetworkCIDR masterConfig.NetworkConfig.ServiceNetworkCIDR = serviceNetworkCIDR kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile) if err != nil { t.Fatalf("error getting kube client: %v", err) } clusterAdminOSClient, err := testutil.GetClusterAdminClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } clientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile) if err != nil { t.Fatalf("error getting client config: %v", err) } // Cluster admin testOne(t, clusterAdminKubeClient, "default", "cluster", true) testOne(t, clusterAdminKubeClient, "default", "service", true) testOne(t, clusterAdminKubeClient, "default", "external", true) // Endpoint controller service account _, serviceAccountClient, _, err := testutil.GetClientForServiceAccount(clusterAdminKubeClient, *clientConfig, bootstrappolicy.DefaultOpenShiftInfraNamespace, bootstrappolicy.InfraEndpointControllerServiceAccountName) if err != nil { t.Fatalf("error getting endpoint controller service account: %v", err) } testOne(t, serviceAccountClient, "default", "cluster", true) testOne(t, serviceAccountClient, "default", "service", true) testOne(t, serviceAccountClient, "default", "external", true) // Project admin _, err = testserver.CreateNewProject(clusterAdminOSClient, *clientConfig, "myproject", "myadmin") if err != nil { t.Fatalf("error creating project: %v", err) } _, projectAdminClient, _, err := testutil.GetClientForUser(*clientConfig, "myadmin") if err != nil { t.Fatalf("error getting project admin client: %v", err) } testOne(t, projectAdminClient, "myproject", "cluster", false) testOne(t, projectAdminClient, "myproject", "service", false) testOne(t, projectAdminClient, "myproject", "external", true) // User without restricted endpoint permission can't modify IPs but can still do other modifications ep := testOne(t, clusterAdminKubeClient, "myproject", "cluster", true) ep.Annotations = map[string]string{"foo": "bar"} ep, err = projectAdminClient.Endpoints("myproject").Update(ep) if err != nil { t.Fatalf("unexpected error updating endpoint annotation: %v", err) } ep.Subsets[0].Addresses[0].IP = exampleAddresses["service"] ep, err = projectAdminClient.Endpoints("myproject").Update(ep) if err == nil { t.Fatalf("unexpected success modifying endpoint") } }
func runStorageTest(t *testing.T, ns string, autoscalingVersion, batchVersion, extensionsVersion unversioned.GroupVersion) { etcdServer := testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } keys := etcd.NewKeysAPI(etcdServer.Client) getGVKFromEtcd := func(prefix, name string) (*unversioned.GroupVersionKind, error) { key := path.Join(masterConfig.EtcdStorageConfig.KubernetesStoragePrefix, prefix, ns, name) resp, err := keys.Get(context.TODO(), key, nil) if err != nil { return nil, err } _, gvk, err := runtime.UnstructuredJSONScheme.Decode([]byte(resp.Node.Value), nil, nil) return gvk, err } // TODO: Set storage versions for API groups // masterConfig.EtcdStorageConfig.StorageVersions[autoscaling.GroupName] = autoscalingVersion.String() // masterConfig.EtcdStorageConfig.StorageVersions[batch.GroupName] = batchVersion.String() // masterConfig.EtcdStorageConfig.StorageVersions[extensions.GroupName] = extensionsVersion.String() clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // create the containing project if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, ns, "admin"); err != nil { t.Fatalf("unexpected error creating the project: %v", err) } projectAdminClient, projectAdminKubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "admin") if err != nil { t.Fatalf("unexpected error getting project admin client: %v", err) } if err := testutil.WaitForPolicyUpdate(projectAdminClient, ns, "get", extensions.Resource("horizontalpodautoscalers"), true); err != nil { t.Fatalf("unexpected error waiting for policy update: %v", err) } jobTestcases := map[string]struct { creator kclient.JobInterface }{ "batch": {creator: projectAdminKubeClient.Batch().Jobs(ns)}, } for name, testcase := range jobTestcases { job := batch.Job{ ObjectMeta: kapi.ObjectMeta{Name: name + "-job"}, Spec: batch.JobSpec{ Template: kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ RestartPolicy: kapi.RestartPolicyNever, Containers: []kapi.Container{{Name: "containername", Image: "containerimage"}}, }, }, }, } // Create a Job if _, err := testcase.creator.Create(&job); err != nil { t.Fatalf("%s: unexpected error creating Job: %v", name, err) } // Ensure it is persisted correctly if gvk, err := getGVKFromEtcd("jobs", job.Name); err != nil { t.Fatalf("%s: unexpected error reading Job: %v", name, err) } else if *gvk != batchVersion.WithKind("Job") { t.Fatalf("%s: expected api version %s in etcd, got %s reading Job", name, batchVersion, gvk) } // Ensure it is accessible from both APIs if _, err := projectAdminKubeClient.Batch().Jobs(ns).Get(job.Name); err != nil { t.Errorf("%s: Error reading Job from the batch client: %#v", name, err) } if _, err := projectAdminKubeClient.Extensions().Jobs(ns).Get(job.Name); err != nil { t.Errorf("%s: Error reading Job from the extensions client: %#v", name, err) } } hpaTestcases := map[string]struct { creator kclient.HorizontalPodAutoscalerInterface }{ "autoscaling": {creator: projectAdminKubeClient.Autoscaling().HorizontalPodAutoscalers(ns)}, "extensions": {creator: projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(ns)}, } for name, testcase := range hpaTestcases { hpa := extensions.HorizontalPodAutoscaler{ ObjectMeta: kapi.ObjectMeta{Name: name + "-hpa"}, Spec: extensions.HorizontalPodAutoscalerSpec{ MaxReplicas: 1, ScaleRef: extensions.SubresourceReference{Kind: "ReplicationController", Name: "myrc", Subresource: "scale"}, }, } // Create an HPA if _, err := testcase.creator.Create(&hpa); err != nil { t.Fatalf("%s: unexpected error creating HPA: %v", name, err) } // Make sure it is persisted correctly if gvk, err := getGVKFromEtcd("horizontalpodautoscalers", hpa.Name); err != nil { t.Fatalf("%s: unexpected error reading HPA: %v", name, err) } else if *gvk != autoscalingVersion.WithKind("HorizontalPodAutoscaler") { t.Fatalf("%s: expected api version %s in etcd, got %s reading HPA", name, autoscalingVersion, gvk) } // Make sure it is available from both APIs if _, err := projectAdminKubeClient.Autoscaling().HorizontalPodAutoscalers(ns).Get(hpa.Name); err != nil { t.Errorf("%s: Error reading HPA.autoscaling from the autoscaling/v1 API: %#v", name, err) } if _, err := projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(ns).Get(hpa.Name); err != nil { t.Errorf("%s: Error reading HPA.extensions from the extensions/v1beta1 API: %#v", name, err) } } }
func TestOAuthLDAP(t *testing.T) { var ( randomSuffix = string(kutil.NewUUID()) providerName = "myldapprovider" bindDN = "uid=admin,ou=company,ou=" + randomSuffix bindPassword = "******" + randomSuffix searchDN = "ou=company,ou=" + randomSuffix searchAttr = "myuid" + randomSuffix searchScope = "one" // must be "one","sub", or "base" searchFilter = "(myAttr=myValue)" // must be a valid LDAP filter format nameAttr1 = "missing-name-attr" nameAttr2 = "a-display-name" + randomSuffix idAttr1 = "missing-id-attr" idAttr2 = "dn" // "dn" is a special value, so don't add a random suffix to make sure we handle it correctly emailAttr1 = "missing-attr" emailAttr2 = "c-mail" + randomSuffix loginAttr1 = "missing-attr" loginAttr2 = "d-mylogin" + randomSuffix myUserUID = "myuser" myUserName = "******" myUserEmail = "*****@*****.**" myUserDN = searchAttr + "=" + myUserUID + "," + searchDN myUserPassword = "******" + randomSuffix ) expectedAttributes := [][]byte{} for _, attr := range kutil.NewStringSet(searchAttr, nameAttr1, nameAttr2, idAttr1, idAttr2, emailAttr1, emailAttr2, loginAttr1, loginAttr2).List() { expectedAttributes = append(expectedAttributes, []byte(attr)) } expectedSearchRequest := ldapserver.SearchRequest{ BaseObject: []byte(searchDN), Scope: ldapserver.SearchRequestSingleLevel, DerefAliases: 0, SizeLimit: 2, TimeLimit: 0, TypesOnly: false, Attributes: expectedAttributes, Filter: fmt.Sprintf("(&%s(%s=%s))", searchFilter, searchAttr, myUserUID), } // Start LDAP server ldapAddress, err := testserver.FindAvailableBindAddress(8389, 8400) if err != nil { t.Fatalf("could not allocate LDAP bind address: %v", err) } ldapServer := testutil.NewTestLDAPServer() ldapServer.SetPassword(bindDN, bindPassword) ldapServer.Start(ldapAddress) defer ldapServer.Stop() masterOptions, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } masterOptions.OAuthConfig.IdentityProviders[0] = configapi.IdentityProvider{ Name: providerName, UseAsChallenger: true, UseAsLogin: true, Provider: runtime.EmbeddedObject{ Object: &configapi.LDAPPasswordIdentityProvider{ URL: fmt.Sprintf("ldap://%s/%s?%s?%s?%s", ldapAddress, searchDN, searchAttr, searchScope, searchFilter), BindDN: bindDN, BindPassword: bindPassword, Insecure: true, CA: "", Attributes: configapi.LDAPAttributeMapping{ ID: []string{idAttr1, idAttr2}, PreferredUsername: []string{loginAttr1, loginAttr2}, Name: []string{nameAttr1, nameAttr2}, Email: []string{emailAttr1, emailAttr2}, }, }, }, } clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterOptions) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Errorf("unexpected error: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Errorf("unexpected error: %v", err) } // Use the server and CA info anonConfig := kclient.Config{} anonConfig.Host = clusterAdminClientConfig.Host anonConfig.CAFile = clusterAdminClientConfig.CAFile anonConfig.CAData = clusterAdminClientConfig.CAData // Make sure we can't authenticate as a missing user ldapServer.ResetRequests() if _, err := tokencmd.RequestToken(&anonConfig, nil, myUserUID, myUserPassword); err == nil { t.Error("Expected error, got none") } if len(ldapServer.BindRequests) != 1 { t.Errorf("Expected a single bind request for the search phase, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests) } if len(ldapServer.SearchRequests) != 1 { t.Errorf("Expected a single search request, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests) } // Add user ldapServer.SetPassword(myUserDN, myUserPassword) ldapServer.AddSearchResult(myUserDN, map[string]string{emailAttr2: myUserEmail, nameAttr2: myUserName, loginAttr2: myUserUID}) // Make sure we can't authenticate with a bad password ldapServer.ResetRequests() if _, err := tokencmd.RequestToken(&anonConfig, nil, myUserUID, "badpassword"); err == nil { t.Error("Expected error, got none") } if len(ldapServer.BindRequests) != 2 { t.Errorf("Expected a bind request for the search phase and a failed bind request for the auth phase, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests) } if len(ldapServer.SearchRequests) != 1 { t.Errorf("Expected a single search request, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests) } // Make sure we can get a token with a good password ldapServer.ResetRequests() accessToken, err := tokencmd.RequestToken(&anonConfig, nil, myUserUID, myUserPassword) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(accessToken) == 0 { t.Errorf("Expected access token, got none") } if len(ldapServer.BindRequests) != 2 { t.Errorf("Expected a bind request for the search phase and a failed bind request for the auth phase, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests) } if len(ldapServer.SearchRequests) != 1 { t.Errorf("Expected a single search request, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests) } if !reflect.DeepEqual(expectedSearchRequest.BaseObject, ldapServer.SearchRequests[0].BaseObject) { t.Errorf("Expected search base DN\n\t%#v\ngot\n\t%#v", string(expectedSearchRequest.BaseObject), string(ldapServer.SearchRequests[0].BaseObject), ) } if !reflect.DeepEqual(expectedSearchRequest.Filter, ldapServer.SearchRequests[0].Filter) { t.Errorf("Expected search filter\n\t%#v\ngot\n\t%#v", string(expectedSearchRequest.Filter), string(ldapServer.SearchRequests[0].Filter), ) } { expectedAttrs := []string{} for _, a := range expectedSearchRequest.Attributes { expectedAttrs = append(expectedAttrs, string(a)) } actualAttrs := []string{} for _, a := range ldapServer.SearchRequests[0].Attributes { actualAttrs = append(actualAttrs, string(a)) } if !reflect.DeepEqual(expectedAttrs, actualAttrs) { t.Errorf("Expected search attributes\n\t%#v\ngot\n\t%#v", expectedAttrs, actualAttrs) } } // Make sure we can use the token, and it represents who we expect userConfig := anonConfig userConfig.BearerToken = accessToken userClient, err := client.New(&userConfig) if err != nil { t.Fatalf("Unexpected error: %v", err) } user, err := userClient.Users().Get("~") if err != nil { t.Fatalf("Unexpected error: %v", err) } if user.Name != myUserUID { t.Fatalf("Expected %s as the user, got %v", myUserUID, user) } // Make sure the identity got created and contained the mapped attributes identity, err := clusterAdminClient.Identities().Get(fmt.Sprintf("%s:%s", providerName, myUserDN)) if err != nil { t.Fatalf("Unexpected error: %v", err) } if identity.ProviderUserName != myUserDN { t.Errorf("Expected %q, got %q", myUserDN, identity.ProviderUserName) } if v := identity.Extra[authapi.IdentityDisplayNameKey]; v != myUserName { t.Errorf("Expected %q, got %q", myUserName, v) } if v := identity.Extra[authapi.IdentityPreferredUsernameKey]; v != myUserUID { t.Errorf("Expected %q, got %q", myUserUID, v) } if v := identity.Extra[authapi.IdentityEmailKey]; v != myUserEmail { t.Errorf("Expected %q, got %q", myUserEmail, v) } }
func TestAuthProxyOnAuthorize(t *testing.T) { idp := configapi.IdentityProvider{} idp.Name = "front-proxy" idp.Provider = runtime.EmbeddedObject{&configapi.RequestHeaderIdentityProvider{Headers: []string{"X-Remote-User"}}} idp.MappingMethod = "claim" masterConfig, err := testserver.DefaultMasterOptions() checkErr(t, err) masterConfig.OAuthConfig.IdentityProviders = []configapi.IdentityProvider{idp} clusterAdminKubeConfig, err := testserver.StartConfiguredMasterAPI(masterConfig) checkErr(t, err) clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) checkErr(t, err) clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) checkErr(t, err) // set up a front proxy guarding the oauth server proxyHTTPHandler := NewBasicAuthChallenger("TestRegistryAndServer", validUsers, NewXRemoteUserProxyingHandler(clusterAdminClientConfig.Host)) proxyServer := httptest.NewServer(proxyHTTPHandler) defer proxyServer.Close() t.Logf("proxy server is on %v\n", proxyServer.URL) // need to prime clients so that we can get back a code. the client must be valid result := clusterAdminClient.RESTClient.Post().Resource("oAuthClients").Body(&oauthapi.OAuthClient{ObjectMeta: kapi.ObjectMeta{Name: "test"}, Secret: "secret", RedirectURIs: []string{clusterAdminClientConfig.Host}}).Do() checkErr(t, result.Error()) // our simple URL to get back a code. We want to go through the front proxy rawAuthorizeRequest := proxyServer.URL + origin.OpenShiftOAuthAPIPrefix + "/authorize?response_type=code&client_id=test" // the first request we make to the front proxy should challenge us for authentication info shouldBeAChallengeResponse, err := http.Get(rawAuthorizeRequest) if err != nil { t.Errorf("Unexpected error: %v", err) } if shouldBeAChallengeResponse.StatusCode != http.StatusUnauthorized { t.Errorf("Expected Unauthorized, but got %v", shouldBeAChallengeResponse.StatusCode) } // create an http.Client to make our next request. We need a custom Transport to authenticate us through our front proxy // and a custom CheckRedirect so that we can keep track of the redirect responses we're getting // OAuth requests a few redirects that we don't really care about checking, so this simpler than using a round tripper // and manually handling redirects and setting our auth information every time for the front proxy redirectedUrls := make([]url.URL, 10) httpClient := http.Client{ CheckRedirect: getRedirectMethod(t, &redirectedUrls), Transport: ktransport.NewBasicAuthRoundTripper("sanefarmer", "who?", insecureTransport()), } // make our authorize request again, but this time our transport has properly set the auth info for the front proxy req, err := http.NewRequest("GET", rawAuthorizeRequest, nil) _, err = httpClient.Do(req) if err != nil { t.Errorf("Unexpected error: %v", err) } // check the last redirect and see if we got a code foundCode := "" if len(redirectedUrls) > 0 { foundCode = redirectedUrls[len(redirectedUrls)-1].Query().Get("code") } if len(foundCode) == 0 { t.Errorf("Did not find code in any redirect: %v", redirectedUrls) } else { t.Logf("Found code %v\n", foundCode) } }
func TestCachingDiscoveryClient(t *testing.T) { testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) _, originKubeConfig, err := testserver.StartTestMasterAPI() if err != nil { t.Fatalf("unexpected error: %v", err) } originClient, err := testutil.GetClusterAdminClient(originKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } resourceType := "buildconfigs" originDiscoveryClient := client.NewDiscoveryClient(originClient.RESTClient) originUncachedMapper := clientcmd.NewShortcutExpander(originDiscoveryClient, nil) if !sets.NewString(originUncachedMapper.All...).Has(resourceType) { t.Errorf("expected %v, got: %v", resourceType, originUncachedMapper.All) } cacheDir, err := ioutil.TempDir("", "TestCachingDiscoveryClient") if err != nil { t.Fatalf("unexpected error: %v", err) } defer func() { if !t.Failed() { os.RemoveAll(cacheDir) } }() // this client should prime the cache originCachedDiscoveryClient := clientcmd.NewCachedDiscoveryClient(originDiscoveryClient, cacheDir, time.Duration(10*time.Minute)) originCachedMapper := clientcmd.NewShortcutExpander(originCachedDiscoveryClient, nil) if !sets.NewString(originCachedMapper.All...).Has(resourceType) { t.Errorf("expected %v, got: %v", resourceType, originCachedMapper.All) } // this client will fail if the cache fails unbackedDiscoveryClient := clientcmd.NewCachedDiscoveryClient(nil, cacheDir, time.Duration(10*time.Minute)) unbackedOriginCachedMapper := clientcmd.NewShortcutExpander(unbackedDiscoveryClient, nil) if !sets.NewString(unbackedOriginCachedMapper.All...).Has(resourceType) { t.Errorf("expected %v, got: %v", resourceType, unbackedOriginCachedMapper.All) } atomicConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } atomicConfig.DisabledFeatures = configapi.AtomicDisabledFeatures atomicConfig.DNSConfig = nil atomicKubeConfig, err := testserver.StartConfiguredMasterAPI(atomicConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } atomicClient, err := testutil.GetClusterAdminClient(atomicKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } atomicDiscoveryClient := client.NewDiscoveryClient(atomicClient.RESTClient) atomicUncachedMapper := clientcmd.NewShortcutExpander(atomicDiscoveryClient, nil) if sets.NewString(atomicUncachedMapper.All...).Has(resourceType) { t.Errorf("expected no %v, got: %v", resourceType, atomicUncachedMapper.All) } // this client will give different results if the cache fails conflictingDiscoveryClient := clientcmd.NewCachedDiscoveryClient(atomicDiscoveryClient, cacheDir, time.Duration(10*time.Minute)) conflictingCachedMapper := clientcmd.NewShortcutExpander(conflictingDiscoveryClient, nil) if !sets.NewString(conflictingCachedMapper.All...).Has(resourceType) { t.Errorf("expected %v, got: %v", resourceType, conflictingCachedMapper.All) } // this client should give different results as result of a live lookup expiredDiscoveryClient := clientcmd.NewCachedDiscoveryClient(atomicDiscoveryClient, cacheDir, time.Duration(-1*time.Second)) expiredAtomicCachedMapper := clientcmd.NewShortcutExpander(expiredDiscoveryClient, nil) if sets.NewString(expiredAtomicCachedMapper.All...).Has(resourceType) { t.Errorf("expected no %v, got: %v", resourceType, expiredAtomicCachedMapper.All) } }
func TestExtensionsAPIDisabled(t *testing.T) { const projName = "ext-disabled-proj" testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } // Disable all extensions API versions masterConfig.KubernetesMasterConfig.DisabledAPIGroupVersions = map[string][]string{"extensions": {"*"}, "autoscaling": {"*"}, "batch": {"*"}} clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // create the containing project if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, projName, "admin"); err != nil { t.Fatalf("unexpected error creating the project: %v", err) } projectAdminClient, projectAdminKubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "admin") if err != nil { t.Fatalf("unexpected error getting project admin client: %v", err) } if err := testutil.WaitForPolicyUpdate(projectAdminClient, projName, "get", expapi.Resource("horizontalpodautoscalers"), true); err != nil { t.Fatalf("unexpected error waiting for policy update: %v", err) } // make sure extensions API objects cannot be listed or created if _, err := projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(projName).List(kapi.ListOptions{}); !errors.IsNotFound(err) { t.Fatalf("expected NotFound error listing HPA, got %v", err) } if _, err := projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(projName).Create(&expapi.HorizontalPodAutoscaler{}); !errors.IsNotFound(err) { t.Fatalf("expected NotFound error creating HPA, got %v", err) } if _, err := projectAdminKubeClient.Extensions().Jobs(projName).List(kapi.ListOptions{}); !errors.IsNotFound(err) { t.Fatalf("expected NotFound error listing jobs, got %v", err) } if _, err := projectAdminKubeClient.Extensions().Jobs(projName).Create(&batch.Job{}); !errors.IsNotFound(err) { t.Fatalf("expected NotFound error creating job, got %v", err) } // Delete the containing project if err := testutil.DeleteAndWaitForNamespaceTermination(clusterAdminKubeClient, projName); err != nil { t.Fatalf("unexpected error: %#v", err) } }
func TestEnforcingServiceAccount(t *testing.T) { masterConfig, err := testserver.DefaultMasterOptions() masterConfig.ServiceAccountConfig.LimitSecretReferences = false if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminConfig, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // Get a service account token saToken, err := waitForServiceAccountToken(clusterAdminKubeClient, api.NamespaceDefault, serviceaccountadmission.DefaultServiceAccountName, 20, time.Second) if err != nil { t.Errorf("unexpected error: %v", err) } if len(saToken) == 0 { t.Errorf("token was not created") } pod := &api.Pod{} pod.Name = "foo" pod.Namespace = api.NamespaceDefault pod.Spec.ServiceAccountName = serviceaccountadmission.DefaultServiceAccountName container := api.Container{} container.Name = "foo" container.Image = "openshift/hello-openshift" pod.Spec.Containers = []api.Container{container} secretVolume := api.Volume{} secretVolume.Name = "bar-vol" secretVolume.Secret = &api.SecretVolumeSource{} secretVolume.Secret.SecretName = "bar" pod.Spec.Volumes = []api.Volume{secretVolume} err = wait.Poll(100*time.Millisecond, 5*time.Second, func() (bool, error) { if _, err := clusterAdminKubeClient.Pods(api.NamespaceDefault).Create(pod); err != nil { // The SA admission controller cache seems to take forever to update. This check comes after the limit check, so until we get it sorted out // check if we're getting this particular error if strings.Contains(err.Error(), "no API token found for service account") { return true, nil } t.Log(err) return false, nil } return true, nil }) if err != nil { t.Errorf("unexpected error: %v", err) } clusterAdminKubeClient.Pods(api.NamespaceDefault).Delete(pod.Name, nil) sa, err := clusterAdminKubeClient.ServiceAccounts(api.NamespaceDefault).Get(bootstrappolicy.DeployerServiceAccountName) if err != nil { t.Fatalf("unexpected error: %v", err) } if sa.Annotations == nil { sa.Annotations = map[string]string{} } sa.Annotations[serviceaccountadmission.EnforceMountableSecretsAnnotation] = "true" time.Sleep(5) _, err = clusterAdminKubeClient.ServiceAccounts(api.NamespaceDefault).Update(sa) if err != nil { t.Fatalf("unexpected error: %v", err) } expectedMessage := "is not allowed because service account deployer does not reference that secret" pod.Spec.ServiceAccountName = bootstrappolicy.DeployerServiceAccountName err = wait.Poll(100*time.Millisecond, 5*time.Second, func() (bool, error) { if _, err := clusterAdminKubeClient.Pods(api.NamespaceDefault).Create(pod); err == nil || !strings.Contains(err.Error(), expectedMessage) { clusterAdminKubeClient.Pods(api.NamespaceDefault).Delete(pod.Name, nil) return false, nil } return true, nil }) if err != nil { t.Errorf("unexpected error: %v", err) } }
func TestServiceAccountAuthorization(t *testing.T) { saNamespace := api.NamespaceDefault saName := serviceaccountadmission.DefaultServiceAccountName saUsername := serviceaccount.MakeUsername(saNamespace, saName) // Start one OpenShift master as "cluster1" to play the external kube server cluster1MasterConfig, cluster1AdminConfigFile, err := testserver.StartTestMaster() if err != nil { t.Fatalf("unexpected error: %v", err) } cluster1AdminConfig, err := testutil.GetClusterAdminClientConfig(cluster1AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } cluster1AdminKubeClient, err := testutil.GetClusterAdminKubeClient(cluster1AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } cluster1AdminOSClient, err := testutil.GetClusterAdminClient(cluster1AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } // Get a service account token and build a client saToken, err := waitForServiceAccountToken(cluster1AdminKubeClient, saNamespace, saName, 20, time.Second) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(saToken) == 0 { t.Fatalf("token was not created") } cluster1SAClientConfig := kclient.Config{ Host: cluster1AdminConfig.Host, Prefix: cluster1AdminConfig.Prefix, BearerToken: saToken, TLSClientConfig: kclient.TLSClientConfig{ CAFile: cluster1AdminConfig.CAFile, CAData: cluster1AdminConfig.CAData, }, } cluster1SAKubeClient, err := kclient.New(&cluster1SAClientConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // Make sure the service account doesn't have access failNS := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-fail"}} if _, err := cluster1SAKubeClient.Namespaces().Create(failNS); !errors.IsForbidden(err) { t.Fatalf("expected forbidden error, got %v", err) } // Make the service account a cluster admin on cluster1 addRoleOptions := &policy.RoleModificationOptions{ RoleName: bootstrappolicy.ClusterAdminRoleName, RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(cluster1AdminOSClient), Users: []string{saUsername}, } if err := addRoleOptions.AddRole(); err != nil { t.Fatalf("could not add role to service account") } // Give the policy cache a second to catch it's breath time.Sleep(time.Second) // Make sure the service account now has access // This tests authentication using the etcd-based token getter passNS := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-pass"}} if _, err := cluster1SAKubeClient.Namespaces().Create(passNS); err != nil { t.Fatalf("unexpected error: %v", err) } // Create a kubeconfig from the serviceaccount config cluster1SAKubeConfigFile, err := ioutil.TempFile(testutil.GetBaseDir(), "cluster1-service-account.kubeconfig") if err != nil { t.Fatalf("error creating tmpfile: %v", err) } defer os.Remove(cluster1SAKubeConfigFile.Name()) if err := writeClientConfigToKubeConfig(cluster1SAClientConfig, cluster1SAKubeConfigFile.Name()); err != nil { t.Fatalf("error creating kubeconfig: %v", err) } // Set up cluster 2 to run against cluster 1 as external kubernetes cluster2MasterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } // Don't start kubernetes in process cluster2MasterConfig.KubernetesMasterConfig = nil // Connect to cluster1 using the service account credentials cluster2MasterConfig.MasterClients.ExternalKubernetesKubeConfig = cluster1SAKubeConfigFile.Name() // Don't start etcd cluster2MasterConfig.EtcdConfig = nil // Use the same credentials as cluster1 to connect to existing etcd cluster2MasterConfig.EtcdClientInfo = cluster1MasterConfig.EtcdClientInfo // Set a custom etcd prefix to make sure data is getting sent to cluster1 cluster2MasterConfig.EtcdStorageConfig.KubernetesStoragePrefix += "2" cluster2MasterConfig.EtcdStorageConfig.OpenShiftStoragePrefix += "2" // Don't manage any names in cluster2 cluster2MasterConfig.ServiceAccountConfig.ManagedNames = []string{} // Don't create any service account tokens in cluster2 cluster2MasterConfig.ServiceAccountConfig.PrivateKeyFile = "" // Use the same public keys to validate tokens as cluster1 cluster2MasterConfig.ServiceAccountConfig.PublicKeyFiles = cluster1MasterConfig.ServiceAccountConfig.PublicKeyFiles // don't try to start second dns server cluster2MasterConfig.DNSConfig = nil // Start cluster 2 (without clearing etcd) and get admin client configs and clients cluster2Options := testserver.TestOptions{DeleteAllEtcdKeys: false} cluster2AdminConfigFile, err := testserver.StartConfiguredMasterWithOptions(cluster2MasterConfig, cluster2Options) if err != nil { t.Fatalf("unexpected error: %v", err) } cluster2AdminConfig, err := testutil.GetClusterAdminClientConfig(cluster2AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } cluster2AdminOSClient, err := testutil.GetClusterAdminClient(cluster2AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } // Build a client to use the same service account token against cluster2 cluster2SAClientConfig := cluster1SAClientConfig cluster2SAClientConfig.Host = cluster2AdminConfig.Host cluster2SAKubeClient, err := kclient.New(&cluster2SAClientConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // Make sure the service account doesn't have access // A forbidden error makes sure the token was recognized, and policy denied us // This exercises the client-based token getter // It also makes sure we don't loop back through the cluster2 kube proxy which would cause an auth loop failNS2 := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-fail2"}} if _, err := cluster2SAKubeClient.Namespaces().Create(failNS2); !errors.IsForbidden(err) { t.Fatalf("expected forbidden error, got %v", err) } // Make the service account a cluster admin on cluster2 addRoleOptions2 := &policy.RoleModificationOptions{ RoleName: bootstrappolicy.ClusterAdminRoleName, RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(cluster2AdminOSClient), Users: []string{saUsername}, } if err := addRoleOptions2.AddRole(); err != nil { t.Fatalf("could not add role to service account") } // Give the policy cache a second to catch it's breath time.Sleep(time.Second) // Make sure the service account now has access to cluster2 passNS2 := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-pass2"}} if _, err := cluster2SAKubeClient.Namespaces().Create(passNS2); err != nil { t.Fatalf("unexpected error: %v", err) } // Make sure the ns actually got created in cluster1 if _, err := cluster1SAKubeClient.Namespaces().Get(passNS2.Name); err != nil { t.Fatalf("unexpected error: %v", err) } }
func TestExtensionsAPIDisabledAutoscaleBatchEnabled(t *testing.T) { const projName = "ext-disabled-batch-enabled-proj" testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } // Disable all extensions API versions // Leave autoscaling/batch APIs enabled masterConfig.KubernetesMasterConfig.DisabledAPIGroupVersions = map[string][]string{"extensions": {"*"}} clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // create the containing project if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, projName, "admin"); err != nil { t.Fatalf("unexpected error creating the project: %v", err) } projectAdminClient, projectAdminKubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "admin") if err != nil { t.Fatalf("unexpected error getting project admin client: %v", err) } if err := testutil.WaitForPolicyUpdate(projectAdminClient, projName, "get", expapi.Resource("horizontalpodautoscalers"), true); err != nil { t.Fatalf("unexpected error waiting for policy update: %v", err) } validHPA := &expapi.HorizontalPodAutoscaler{ ObjectMeta: kapi.ObjectMeta{Name: "myjob"}, Spec: expapi.HorizontalPodAutoscalerSpec{ ScaleRef: expapi.SubresourceReference{Name: "foo", Kind: "ReplicationController", Subresource: "scale"}, MaxReplicas: 1, }, } validJob := &batch.Job{ ObjectMeta: kapi.ObjectMeta{Name: "myjob"}, Spec: batch.JobSpec{ Template: kapi.PodTemplateSpec{ Spec: kapi.PodSpec{ Containers: []kapi.Container{{Name: "mycontainer", Image: "myimage"}}, RestartPolicy: kapi.RestartPolicyNever, }, }, }, } // make sure extensions API objects cannot be listed or created if _, err := projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(projName).List(kapi.ListOptions{}); !errors.IsNotFound(err) { t.Fatalf("expected NotFound error listing HPA, got %v", err) } if _, err := projectAdminKubeClient.Extensions().HorizontalPodAutoscalers(projName).Create(validHPA); !errors.IsNotFound(err) { t.Fatalf("expected NotFound error creating HPA, got %v", err) } if _, err := projectAdminKubeClient.Extensions().Jobs(projName).List(kapi.ListOptions{}); !errors.IsNotFound(err) { t.Fatalf("expected NotFound error listing jobs, got %v", err) } if _, err := projectAdminKubeClient.Extensions().Jobs(projName).Create(validJob); !errors.IsNotFound(err) { t.Fatalf("expected NotFound error creating job, got %v", err) } // make sure autoscaling and batch API objects can be listed and created if _, err := projectAdminKubeClient.Autoscaling().HorizontalPodAutoscalers(projName).List(kapi.ListOptions{}); err != nil { t.Fatalf("unexpected error: %#v", err) } if _, err := projectAdminKubeClient.Autoscaling().HorizontalPodAutoscalers(projName).Create(validHPA); err != nil { t.Fatalf("unexpected error: %#v", err) } if _, err := projectAdminKubeClient.Batch().Jobs(projName).List(kapi.ListOptions{}); err != nil { t.Fatalf("unexpected error: %#v", err) } if _, err := projectAdminKubeClient.Batch().Jobs(projName).Create(validJob); err != nil { t.Fatalf("unexpected error: %#v", err) } // Delete the containing project if err := testutil.DeleteAndWaitForNamespaceTermination(clusterAdminKubeClient, projName); err != nil { t.Fatalf("unexpected error: %#v", err) } // recreate the containing project if _, err := testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, projName, "admin"); err != nil { t.Fatalf("unexpected error creating the project: %v", err) } projectAdminClient, projectAdminKubeClient, _, err = testutil.GetClientForUser(*clusterAdminClientConfig, "admin") if err != nil { t.Fatalf("unexpected error getting project admin client: %v", err) } if err := testutil.WaitForPolicyUpdate(projectAdminClient, projName, "get", expapi.Resource("horizontalpodautoscalers"), true); err != nil { t.Fatalf("unexpected error waiting for policy update: %v", err) } // make sure the created objects got cleaned up by namespace deletion if hpas, err := projectAdminKubeClient.Autoscaling().HorizontalPodAutoscalers(projName).List(kapi.ListOptions{}); err != nil { t.Fatalf("unexpected error: %#v", err) } else if len(hpas.Items) > 0 { t.Fatalf("expected 0 HPA objects, got %#v", hpas.Items) } if jobs, err := projectAdminKubeClient.Batch().Jobs(projName).List(kapi.ListOptions{}); err != nil { t.Fatalf("unexpected error: %#v", err) } else if len(jobs.Items) > 0 { t.Fatalf("expected 0 Job objects, got %#v", jobs.Items) } }
func TestExternalKube(t *testing.T) { // Start one OpenShift master as "cluster1" to play the external kube server cluster1MasterConfig, cluster1AdminConfigFile, err := testserver.StartTestMasterAPI() if err != nil { t.Fatalf("unexpected error: %v", err) } cluster1AdminKubeClient, err := testutil.GetClusterAdminKubeClient(cluster1AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } // Copy admin.kubeconfig with a name-change top stop from over-writing it later persistentCluster1AdminConfigFile := cluster1AdminConfigFile + "old" err = copyFile(cluster1AdminConfigFile, persistentCluster1AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } // Set up cluster 2 to run against cluster 1 as external kubernetes cluster2MasterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } // Don't start kubernetes in process cluster2MasterConfig.KubernetesMasterConfig = nil // Connect to cluster1 using the service account credentials cluster2MasterConfig.MasterClients.ExternalKubernetesKubeConfig = persistentCluster1AdminConfigFile // Don't start etcd cluster2MasterConfig.EtcdConfig = nil // Use the same credentials as cluster1 to connect to existing etcd cluster2MasterConfig.EtcdClientInfo = cluster1MasterConfig.EtcdClientInfo // Set a custom etcd prefix to make sure data is getting sent to cluster1 cluster2MasterConfig.EtcdStorageConfig.KubernetesStoragePrefix += "2" cluster2MasterConfig.EtcdStorageConfig.OpenShiftStoragePrefix += "2" // Don't manage any names in cluster2 cluster2MasterConfig.ServiceAccountConfig.ManagedNames = []string{} // Don't create any service account tokens in cluster2 cluster2MasterConfig.ServiceAccountConfig.PrivateKeyFile = "" // Use the same public keys to validate tokens as cluster1 cluster2MasterConfig.ServiceAccountConfig.PublicKeyFiles = cluster1MasterConfig.ServiceAccountConfig.PublicKeyFiles // Don't run controllers in the second cluster cluster2MasterConfig.PauseControllers = true // don't try to start second dns server cluster2MasterConfig.DNSConfig = nil // Start cluster 2 (without clearing etcd) and get admin client configs and clients cluster2Options := testserver.TestOptions{DeleteAllEtcdKeys: false, EnableControllers: true} cluster2AdminConfigFile, err := testserver.StartConfiguredMasterWithOptions(cluster2MasterConfig, cluster2Options) if err != nil { t.Fatalf("unexpected error: %v", err) } cluster2AdminKubeClient, err := testutil.GetClusterAdminKubeClient(cluster2AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } healthzProxyTest(cluster2MasterConfig, t) watchProxyTest(cluster1AdminKubeClient, cluster2AdminKubeClient, t) }
func TestUnprivilegedNewProjectFromTemplate(t *testing.T) { namespace := "foo" templateName := "bar" masterOptions, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } masterOptions.ProjectConfig.ProjectRequestTemplate = namespace + "/" + templateName clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterOptions) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } valerieClientConfig := *clusterAdminClientConfig valerieClientConfig.Username = "" valerieClientConfig.Password = "" valerieClientConfig.BearerToken = "" valerieClientConfig.CertFile = "" valerieClientConfig.KeyFile = "" valerieClientConfig.CertData = nil valerieClientConfig.KeyData = nil accessToken, err := tokencmd.RequestToken(&valerieClientConfig, nil, "valerie", "security!") if err != nil { t.Fatalf("unexpected error: %v", err) } valerieClientConfig.BearerToken = accessToken valerieOpenshiftClient, err := client.New(&valerieClientConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } if _, err := clusterAdminClient.Projects().Create(&projectapi.Project{ObjectMeta: kapi.ObjectMeta{Name: namespace}}); err != nil { t.Fatalf("unexpected error: %v", err) } template := projectrequeststorage.DefaultTemplate() template.Name = templateName template.Namespace = namespace template.Objects[0].(*projectapi.Project).Annotations["extra"] = "here" _, err = clusterAdminClient.Templates(namespace).Create(template) if err != nil { t.Fatalf("unexpected error: %v", err) } requestProject := oc.NewProjectOptions{ ProjectName: "new-project", DisplayName: "display name here", Description: "the special description", Client: valerieOpenshiftClient, Out: ioutil.Discard, } if err := requestProject.Run(); err != nil { t.Fatalf("unexpected error: %v", err) } waitForProject(t, valerieOpenshiftClient, "new-project", 5*time.Second, 10) project, err := valerieOpenshiftClient.Projects().Get("new-project") if err != nil { t.Fatalf("unexpected error: %v", err) } if project.Annotations["extra"] != "here" { t.Errorf("unexpected project %#v", project) } if err := clusterAdminClient.Templates(namespace).Delete(templateName); err != nil { t.Fatalf("unexpected error: %v", err) } requestProject.ProjectName = "different" // This should fail during the template retrieve if err := requestProject.Run(); !kapierrors.IsNotFound(err) { t.Fatalf("expected a not found error, but got %v", err) } }
func TestAccessOriginWebConsoleMultipleIdentityProviders(t *testing.T) { testutil.RequireEtcd(t) masterOptions, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } // Replace the default IdentityProvider with an AllowAll provider masterOptions.OAuthConfig.IdentityProviders[0] = configapi.IdentityProvider{ Name: "foo", UseAsChallenger: true, UseAsLogin: true, MappingMethod: "claim", Provider: &configapi.AllowAllPasswordIdentityProvider{}, } // Set up a second AllowAll provider masterOptions.OAuthConfig.IdentityProviders = append(masterOptions.OAuthConfig.IdentityProviders, configapi.IdentityProvider{ Name: "bar", UseAsChallenger: true, UseAsLogin: true, MappingMethod: "claim", Provider: &configapi.AllowAllPasswordIdentityProvider{}, }) // Set up a third AllowAll provider with a space in the name and some unicode characters masterOptions.OAuthConfig.IdentityProviders = append(masterOptions.OAuthConfig.IdentityProviders, configapi.IdentityProvider{ Name: "Iñtërnâtiônàlizætiøn, !@#$^&*()", UseAsChallenger: true, UseAsLogin: true, MappingMethod: "claim", Provider: &configapi.AllowAllPasswordIdentityProvider{}, }) // Launch the configured server if _, err = testserver.StartConfiguredMaster(masterOptions); err != nil { t.Fatalf("unexpected error: %v", err) } // Create a map of URLs to test type urlResults struct { statusCode int location string } urlMap := make(map[string]urlResults) linkRegexps := make([]string, 0) // Verify that the plain /login URI is unavailable when multiple IDPs are in use. urlMap["/login"] = urlResults{http.StatusForbidden, ""} // Create the common base URLs escapedPublicURL := url.QueryEscape(masterOptions.OAuthConfig.AssetPublicURL) loginSelectorBase := "/oauth/authorize?client_id=openshift-web-console&response_type=token&state=%2F&redirect_uri=" + escapedPublicURL // Iterate through each of the providers and verify that they redirect to // the appropriate login page and that the login page exists. // This is done in a loop so that we can add an arbitrary additional set // of providers to test. for _, value := range masterOptions.OAuthConfig.IdentityProviders { // Query-encode the idp=<provider name> parameter name and value idpQueryParam := url.Values{"idp": []string{value.Name}}.Encode() // Construct a URL that will select that IDP providerSelectionURL := loginSelectorBase + "&" + idpQueryParam // URL-path-encode the idp name to construct the login page URL loginURL := (&url.URL{Path: path.Join("/login", value.Name)}).String() // Expect the providerSelectionURL to redirect to the loginURL urlMap[providerSelectionURL] = urlResults{http.StatusFound, loginURL} // Expect the loginURL to be valid urlMap[loginURL] = urlResults{http.StatusOK, ""} // escape the query param the way the template will templateIDPParam := templateEscapeHref(t, idpQueryParam) // quote for the regex regexIDPParam := regexp.QuoteMeta(templateIDPParam) // Expect to see a link to the provider selection page URL with the idp param linkRegexps = append(linkRegexps, fmt.Sprintf(`/oauth/authorize\?(.*&)?%s(&|")`, regexIDPParam)) } // Test the loginSelectorBase for links to all of the IDPs url := masterOptions.AssetConfig.MasterPublicURL + loginSelectorBase tryAccessURL(t, url, http.StatusOK, "", linkRegexps) // Test all of these URLs for endpoint, exp := range urlMap { url := masterOptions.AssetConfig.MasterPublicURL + endpoint tryAccessURL(t, url, exp.statusCode, exp.location, nil) } }
func TestOadmPodNetwork(t *testing.T) { testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } masterConfig.NetworkConfig.NetworkPluginName = sdnplugin.MultiTenantPluginName kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } osClient, err := testutil.GetClusterAdminClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } clientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile) if err != nil { t.Fatalf("error getting client config: %v", err) } origNetns1, err := createProject(osClient, clientConfig, "one") if err != nil { t.Fatalf("could not create namespace %q: %v", "one", err) } origNetns2, err := createProject(osClient, clientConfig, "two") if err != nil { t.Fatalf("could not create namespace %q: %v", "two", err) } origNetns3, err := createProject(osClient, clientConfig, "three") if err != nil { t.Fatalf("could not create namespace %q: %v", "three", err) } if origNetns1.NetID == 0 || origNetns2.NetID == 0 || origNetns3.NetID == 0 { t.Fatalf("expected non-0 NetIDs, got %d, %d, %d", origNetns1.NetID, origNetns2.NetID, origNetns3.NetID) } if origNetns1.NetID == origNetns2.NetID || origNetns1.NetID == origNetns3.NetID || origNetns2.NetID == origNetns3.NetID { t.Fatalf("expected unique NetIDs, got %d, %d, %d", origNetns1.NetID, origNetns2.NetID, origNetns3.NetID) } newNetns2, err := updateNetNamespace(osClient, origNetns2, sdnapi.JoinPodNetwork, "one") if err != nil { t.Fatalf("error updating namespace: %v", err) } if newNetns2.NetID != origNetns1.NetID { t.Fatalf("expected netns2 (%d) to be joined to netns1 (%d)", newNetns2.NetID, origNetns1.NetID) } newNetns1, err := osClient.NetNamespaces().Get("one") if err != nil { t.Fatalf("error getting refetching NetNamespace: %v", err) } if newNetns1.NetID != origNetns1.NetID { t.Fatalf("expected netns1 (%d) to be unchanged (%d)", newNetns1.NetID, origNetns1.NetID) } newNetns1, err = updateNetNamespace(osClient, origNetns1, sdnapi.GlobalPodNetwork, "") if err != nil { t.Fatalf("error updating namespace: %v", err) } if newNetns1.NetID != 0 { t.Fatalf("expected netns1 (%d) to be global", newNetns1.NetID) } newNetns2, err = osClient.NetNamespaces().Get("two") if err != nil { t.Fatalf("error getting refetching NetNamespace: %v", err) } if newNetns2.NetID != origNetns1.NetID { t.Fatalf("expected netns2 (%d) to be unchanged (%d)", newNetns2.NetID, origNetns1.NetID) } newNetns1, err = updateNetNamespace(osClient, newNetns1, sdnapi.IsolatePodNetwork, "") if err != nil { t.Fatalf("error updating namespace: %v", err) } if newNetns1.NetID == 0 { t.Fatalf("expected netns1 (%d) to be non-global", newNetns1.NetID) } if newNetns1.NetID == newNetns2.NetID || newNetns1.NetID == origNetns3.NetID { t.Fatalf("expected netns1 (%d) to be unique (not %d, %d)", newNetns1.NetID, newNetns2.NetID, origNetns3.NetID) } }
func TestOAuthBasicAuthPassword(t *testing.T) { remotePrefix := "remote" expectedLogin := "******" expectedPassword := "******" expectedAuthHeader := "Basic " + base64.StdEncoding.EncodeToString([]byte(expectedLogin+":"+expectedPassword)) expectedUsername := remotePrefix + expectedLogin // Create tempfiles with certs and keys we're going to use certNames := map[string]string{} for certName, certContents := range basicAuthCerts { f, err := ioutil.TempFile("", certName) if err != nil { t.Fatalf("unexpected error: %v", err) } defer os.Remove(f.Name()) if err := ioutil.WriteFile(f.Name(), certContents, os.FileMode(0600)); err != nil { t.Fatalf("unexpected error: %v", err) } certNames[certName] = f.Name() } // Build client cert pool clientCAs, err := util.CertPoolFromFile(certNames[basicAuthRemoteCACert]) if err != nil { t.Fatalf("unexpected error: %v", err) } // Build remote handler remoteHandler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { if req.TLS == nil { w.WriteHeader(http.StatusUnauthorized) t.Fatalf("Expected TLS") } if len(req.TLS.VerifiedChains) != 1 { w.WriteHeader(http.StatusUnauthorized) t.Fatalf("Expected peer cert verified by server") } if req.Header.Get("Authorization") != expectedAuthHeader { w.WriteHeader(http.StatusUnauthorized) t.Fatalf("Unexpected auth header: %s", req.Header.Get("Authorization")) } w.Header().Set("Content-Type", "application/json") w.Write([]byte(fmt.Sprintf(`{"sub":"%s"}`, expectedUsername))) }) // Start remote server remoteAddr, err := testserver.FindAvailableBindAddress(9443, 9999) if err != nil { t.Fatalf("Couldn't get free address for test server: %v", err) } remoteServer := &http.Server{ Addr: remoteAddr, Handler: remoteHandler, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, // RequireAndVerifyClientCert lets us limit requests to ones with a valid client certificate ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: clientCAs, }, } go func() { if err := remoteServer.ListenAndServeTLS(certNames[basicAuthRemoteServerCert], certNames[basicAuthRemoteServerKey]); err != nil { t.Fatalf("unexpected error: %v", err) } }() // Build master config masterOptions, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } masterOptions.OAuthConfig.IdentityProviders[0] = configapi.IdentityProvider{ Name: "basicauth", UseAsChallenger: true, UseAsLogin: true, Provider: runtime.EmbeddedObject{ Object: &configapi.BasicAuthPasswordIdentityProvider{ RemoteConnectionInfo: configapi.RemoteConnectionInfo{ URL: fmt.Sprintf("https://%s", remoteAddr), CA: certNames[basicAuthRemoteCACert], ClientCert: configapi.CertInfo{ CertFile: certNames[basicAuthClientCert], KeyFile: certNames[basicAuthClientKey], }, }, }, }, } // Start server clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterOptions) if err != nil { t.Fatalf("unexpected error: %v", err) } clientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // Use the server and CA info anonConfig := kclient.Config{} anonConfig.Host = clientConfig.Host anonConfig.CAFile = clientConfig.CAFile anonConfig.CAData = clientConfig.CAData // Make sure we can get a token accessToken, err := tokencmd.RequestToken(&anonConfig, nil, expectedLogin, expectedPassword) if err != nil { t.Fatalf("Unexpected error: %v", err) } if len(accessToken) == 0 { t.Errorf("Expected access token, got none") } // Make sure we can use the token, and it represents who we expect userConfig := anonConfig userConfig.BearerToken = accessToken userClient, err := client.New(&userConfig) if err != nil { t.Fatalf("Unexpected error: %v", err) } user, err := userClient.Users().Get("~") if err != nil { t.Fatalf("Unexpected error: %v", err) } if user.Name != expectedUsername { t.Fatalf("Expected username as the user, got %v", user) } }
func TestSNI(t *testing.T) { // Create tempfiles with certs and keys we're going to use certNames := map[string]string{} for certName, certContents := range sniCerts { f, err := ioutil.TempFile("", certName) if err != nil { t.Fatalf("unexpected error: %v", err) } defer os.Remove(f.Name()) if err := ioutil.WriteFile(f.Name(), certContents, os.FileMode(0600)); err != nil { t.Fatalf("unexpected error: %v", err) } certNames[certName] = f.Name() } // Build master config masterOptions, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } // Set custom cert masterOptions.ServingInfo.NamedCertificates = []configapi.NamedCertificate{ { Names: []string{"customhost.com"}, CertInfo: configapi.CertInfo{ CertFile: certNames[sniServerCert], KeyFile: certNames[sniServerKey], }, }, { Names: []string{"*.wildcardhost.com"}, CertInfo: configapi.CertInfo{ CertFile: certNames[sniServerCert], KeyFile: certNames[sniServerKey], }, }, } // Start server _, err = testserver.StartConfiguredMaster(masterOptions) if err != nil { t.Fatalf("unexpected error: %v", err) } // Build transports sniRoots, err := util.CertPoolFromFile(certNames[sniCACert]) if err != nil { t.Fatalf("unexpected error: %v", err) } sniConfig := &tls.Config{RootCAs: sniRoots} generatedRoots, err := util.CertPoolFromFile(masterOptions.ServiceAccountConfig.MasterCA) if err != nil { t.Fatalf("unexpected error: %v", err) } generatedConfig := &tls.Config{RootCAs: generatedRoots} insecureConfig := &tls.Config{InsecureSkipVerify: true} tests := map[string]struct { Hostname string TLSConfig *tls.Config ExpectedOK bool }{ "sni client -> generated ip": { Hostname: "127.0.0.1", TLSConfig: sniConfig, }, "sni client -> generated hostname": { Hostname: "openshift", TLSConfig: sniConfig, }, "sni client -> sni host": { Hostname: "customhost.com", TLSConfig: sniConfig, ExpectedOK: true, }, "sni client -> sni wildcard host": { Hostname: "www.wildcardhost.com", TLSConfig: sniConfig, ExpectedOK: true, }, "sni client -> invalid ip": { Hostname: "10.10.10.10", TLSConfig: sniConfig, }, "sni client -> invalid host": { Hostname: "invalidhost.com", TLSConfig: sniConfig, }, "generated client -> generated ip": { Hostname: "127.0.0.1", TLSConfig: generatedConfig, ExpectedOK: true, }, "generated client -> generated hostname": { Hostname: "openshift", TLSConfig: generatedConfig, ExpectedOK: true, }, "generated client -> sni host": { Hostname: "customhost.com", TLSConfig: generatedConfig, }, "generated client -> sni wildcard host": { Hostname: "www.wildcardhost.com", TLSConfig: generatedConfig, }, "generated client -> invalid ip": { Hostname: "10.10.10.10", TLSConfig: generatedConfig, }, "generated client -> invalid host": { Hostname: "invalidhost.com", TLSConfig: generatedConfig, }, "insecure client -> generated ip": { Hostname: "127.0.0.1", TLSConfig: insecureConfig, ExpectedOK: true, }, "insecure client -> generated hostname": { Hostname: "openshift", TLSConfig: insecureConfig, ExpectedOK: true, }, "insecure client -> sni host": { Hostname: "customhost.com", TLSConfig: insecureConfig, ExpectedOK: true, }, "insecure client -> sni wildcard host": { Hostname: "www.wildcardhost.com", TLSConfig: insecureConfig, ExpectedOK: true, }, "insecure client -> invalid ip": { Hostname: "10.10.10.10", TLSConfig: insecureConfig, ExpectedOK: true, }, "insecure client -> invalid host": { Hostname: "invalidhost.com", TLSConfig: insecureConfig, ExpectedOK: true, }, } masterPublicURL, err := url.Parse(masterOptions.MasterPublicURL) if err != nil { t.Fatalf("unexpected error: %v", err) } for k, tc := range tests { u := *masterPublicURL if err != nil { t.Errorf("%s: unexpected error: %v", k, err) continue } u.Path = "/healthz" if _, port, err := net.SplitHostPort(u.Host); err == nil { u.Host = net.JoinHostPort(tc.Hostname, port) } else { u.Host = tc.Hostname } req, err := http.NewRequest("GET", u.String(), nil) if err != nil { t.Errorf("%s: unexpected error: %v", k, err) continue } transport := &http.Transport{ // Custom Dial func to always dial the real master, no matter what host is asked for Dial: func(network, addr string) (net.Conn, error) { // t.Logf("%s: Dialing for %s", k, addr) return net.Dial(network, masterPublicURL.Host) }, TLSClientConfig: tc.TLSConfig, } resp, err := transport.RoundTrip(req) if tc.ExpectedOK && err != nil { t.Errorf("%s: unexpected error: %v", k, err) continue } if !tc.ExpectedOK && err == nil { t.Errorf("%s: expected error, got none", k) continue } if err == nil { data, err := ioutil.ReadAll(resp.Body) if err != nil { t.Errorf("%s: unexpected error: %v", k, err) continue } if string(data) != "ok" { t.Errorf("%s: expected %q, got %q", k, "ok", string(data)) continue } } } }