func BuildAuthConfig(options configapi.MasterConfig) (*AuthConfig, error) { client, err := etcd.GetAndTestEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdHelper, err := NewEtcdHelper(client, options.EtcdStorageConfig.OpenShiftStorageVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } apiServerCAs, err := configapi.GetAPIServerCertCAPool(options) if err != nil { return nil, err } var sessionAuth *session.Authenticator if options.OAuthConfig.SessionConfig != nil { secure := isHTTPS(options.OAuthConfig.MasterPublicURL) auth, err := BuildSessionAuth(secure, options.OAuthConfig.SessionConfig) if err != nil { return nil, err } sessionAuth = auth } // Build the list of valid redirect_uri prefixes for a login using the openshift-web-console client to redirect to // TODO: allow configuring this // TODO: remove hard-coding of development UI server assetPublicURLs := []string{options.OAuthConfig.AssetPublicURL, "http://localhost:9000", "https://localhost:9000"} userStorage := useretcd.NewREST(etcdHelper) userRegistry := userregistry.NewRegistry(userStorage) identityStorage := identityetcd.NewREST(etcdHelper) identityRegistry := identityregistry.NewRegistry(identityStorage) ret := &AuthConfig{ Options: *options.OAuthConfig, OpenshiftEnabled: options.OpenshiftEnabled, AssetPublicAddresses: assetPublicURLs, MasterRoots: apiServerCAs, EtcdHelper: etcdHelper, IdentityRegistry: identityRegistry, UserRegistry: userRegistry, SessionAuth: sessionAuth, } return ret, nil }
func TestBootstrapPolicyOverwritePolicyCommand(t *testing.T) { masterConfig, clusterAdminKubeConfig, err := testutil.StartTestMaster() if err != nil { t.Fatalf("unexpected error: %v", err) } client, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Errorf("unexpected error: %v", err) } if err := client.ClusterPolicies().Delete(authorizationapi.PolicyName); err != nil { t.Errorf("unexpected error: %v", err) } // after the policy is deleted, we must wait for it to be cleared from the policy cache err = wait.Poll(10*time.Millisecond, 10*time.Second, func() (bool, error) { _, err := client.ClusterPolicies().List(labels.Everything(), fields.Everything()) if err == nil { return false, nil } if !kapierror.IsForbidden(err) { t.Errorf("unexpected error: %v", err) } return true, nil }) if err != nil { t.Errorf("timeout: %v", err) } etcdClient, err := etcd.GetAndTestEtcdClient(masterConfig.EtcdClientInfo) if err != nil { t.Errorf("unexpected error: %v", err) } etcdHelper, err := origin.NewEtcdHelper(etcdClient, masterConfig.EtcdStorageConfig.OpenShiftStorageVersion, masterConfig.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { t.Errorf("unexpected error: %v", err) } if err := admin.OverwriteBootstrapPolicy(etcdHelper, masterConfig.PolicyConfig.BootstrapPolicyFile, admin.CreateBootstrapPolicyFileFullCommand, true, ioutil.Discard); err != nil { t.Errorf("unexpected error: %v", err) } if _, err := client.ClusterPolicies().List(labels.Everything(), fields.Everything()); err != nil { t.Errorf("unexpected error: %v", err) } }
func (o OverwriteBootstrapPolicyOptions) OverwriteBootstrapPolicy() error { masterConfig, err := configapilatest.ReadAndResolveMasterConfig(o.MasterConfigFile) if err != nil { return err } // Connect and setup etcd interfaces etcdClient, err := etcd.GetAndTestEtcdClient(masterConfig.EtcdClientInfo) if err != nil { return err } etcdHelper, err := newEtcdHelper(etcdClient, masterConfig.EtcdStorageConfig.OpenShiftStorageVersion, masterConfig.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return err } return OverwriteBootstrapPolicy(etcdHelper, o.File, o.CreateBootstrapPolicyCommand, o.Force, o.Out) }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.GetAndTestEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } ketcdHelper, err := master.NewEtcdHelper(etcdClient, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.PortalNet = util.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePorts = *portRange server.AdmissionControl = strings.Join([]string{ "NamespaceExists", "NamespaceLifecycle", "OriginPodNodeEnvironment", "LimitRanger", "ServiceAccount", "SecurityContextConstraint", "ResourceQuota", }, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } admissionController := admission.NewFromPlugins(kubeClient, strings.Split(server.AdmissionControl, ","), server.AdmissionControlConfigFile) m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, ReadOnlyPort: port, EtcdHelper: ketcdHelper, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, PortalNet: (*net.IPNet)(&server.PortalNet), ServiceNodePorts: server.ServiceNodePorts, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, DisableV1Beta1: true, DisableV1Beta2: true, DisableV1Beta3: !configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1beta3"), EnableV1: configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1"), } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.GetAndTestEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdHelper, err := NewEtcdHelper(client, options.EtcdStorageConfig.OpenShiftStorageVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest policyCache, policyClient := newReadOnlyCacheAndClient(etcdHelper) requestContextMapper := kapi.NewRequestContextMapper() kubeletClientConfig := configapi.GetKubeletClientConfig(options) // in-order list of plug-ins that should intercept admission decisions (origin only intercepts) admissionControlPluginNames := []string{"OriginNamespaceLifecycle", "BuildByStrategy"} admissionClient := admissionControlClient(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient) admissionController := admission.NewFromPlugins(admissionClient, admissionControlPluginNames, "") serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, client) if err != nil { return nil, err } config := &MasterConfig{ Options: options, OpenshiftEnabled: options.OpenshiftEnabled, Authenticator: newAuthenticator(options, etcdHelper, serviceAccountTokenGetter, apiClientCAs), Authorizer: newAuthorizer(policyClient, options.ProjectConfig.ProjectRequestMessage), AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), PolicyCache: policyCache, ProjectAuthorizationCache: newProjectAuthorizationCache(privilegedLoopbackOpenShiftClient, privilegedLoopbackKubeClient, policyClient), RequestContextMapper: requestContextMapper, AdmissionControl: admissionController, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug.NewPlug(!options.PauseControllers), ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, BuildControllerServiceAccount: bootstrappolicy.InfraBuildControllerServiceAccountName, DeploymentControllerServiceAccount: bootstrappolicy.InfraDeploymentControllerServiceAccountName, ReplicationControllerServiceAccount: bootstrappolicy.InfraReplicationControllerServiceAccountName, } return config, nil }
func TestUserInitialization(t *testing.T) { masterConfig, clusterAdminKubeConfig, err := testutil.StartTestMaster() if err != nil { t.Fatalf("unexpected error: %v", err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } etcdClient, err := etcd.GetAndTestEtcdClient(masterConfig.EtcdClientInfo) if err != nil { t.Errorf("unexpected error: %v", err) } etcdHelper, err := origin.NewEtcdHelper(etcdClient, masterConfig.EtcdStorageConfig.OpenShiftStorageVersion, masterConfig.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { t.Errorf("unexpected error: %v", err) } userRegistry := userregistry.NewRegistry(useretcd.NewREST(etcdHelper)) identityRegistry := identityregistry.NewRegistry(identityetcd.NewREST(etcdHelper)) useridentityMappingRegistry := useridentitymapping.NewRegistry(useridentitymapping.NewREST(userRegistry, identityRegistry)) lookup := identitymapper.NewLookupIdentityMapper(useridentityMappingRegistry, userRegistry) provisioner := identitymapper.NewAlwaysCreateUserIdentityToUserMapper(identityRegistry, userRegistry) testcases := map[string]struct { Identity authapi.UserIdentityInfo Mapper authapi.UserIdentityMapper CreateIdentity *api.Identity CreateUser *api.User CreateMapping *api.UserIdentityMapping UpdateUser *api.User ExpectedErr error ExpectedUserName string ExpectedFullName string }{ "lookup missing identity": { Identity: makeIdentityInfo("idp", "bob", nil), Mapper: lookup, ExpectedErr: kerrs.NewNotFound("UserIdentityMapping", "idp:bob"), }, "lookup existing identity": { Identity: makeIdentityInfo("idp", "bob", nil), Mapper: lookup, CreateUser: makeUser("mappeduser"), CreateIdentity: makeIdentity("idp", "bob"), CreateMapping: makeMapping("mappeduser", "idp:bob"), ExpectedUserName: "******", }, "provision missing identity and user": { Identity: makeIdentityInfo("idp", "bob", nil), Mapper: provisioner, ExpectedUserName: "******", }, "provision missing identity and user with preferred username and display name": { Identity: makeIdentityInfo("idp", "bob", map[string]string{authapi.IdentityDisplayNameKey: "Bob, Sr.", authapi.IdentityPreferredUsernameKey: "admin"}), Mapper: provisioner, ExpectedUserName: "******", ExpectedFullName: "Bob, Sr.", }, "provision missing identity for existing user": { Identity: makeIdentityInfo("idp", "bob", nil), Mapper: provisioner, CreateUser: makeUser("bob", "idp:bob"), ExpectedUserName: "******", }, "provision missing identity with conflicting user": { Identity: makeIdentityInfo("idp", "bob", nil), Mapper: provisioner, CreateUser: makeUser("bob"), ExpectedUserName: "******", }, "provision missing identity with conflicting user and preferred username": { Identity: makeIdentityInfo("idp", "bob", map[string]string{authapi.IdentityPreferredUsernameKey: "admin"}), Mapper: provisioner, CreateUser: makeUser("admin"), ExpectedUserName: "******", }, "provision with existing unmapped identity": { Identity: makeIdentityInfo("idp", "bob", nil), Mapper: provisioner, CreateIdentity: makeIdentity("idp", "bob"), ExpectedErr: kerrs.NewNotFound("UserIdentityMapping", "idp:bob"), }, "provision with existing mapped identity with invalid user UID": { Identity: makeIdentityInfo("idp", "bob", nil), Mapper: provisioner, CreateUser: makeUser("mappeduser"), CreateIdentity: makeIdentityWithUserReference("idp", "bob", "mappeduser", "invalidUID"), ExpectedErr: kerrs.NewNotFound("UserIdentityMapping", "idp:bob"), }, "provision with existing mapped identity without user backreference": { Identity: makeIdentityInfo("idp", "bob", nil), Mapper: provisioner, CreateUser: makeUser("mappeduser"), CreateIdentity: makeIdentity("idp", "bob"), CreateMapping: makeMapping("mappeduser", "idp:bob"), // Update user to a version which does not reference the identity UpdateUser: makeUser("mappeduser"), ExpectedErr: kerrs.NewNotFound("UserIdentityMapping", "idp:bob"), }, "provision returns existing mapping": { Identity: makeIdentityInfo("idp", "bob", nil), Mapper: provisioner, CreateUser: makeUser("mappeduser"), CreateIdentity: makeIdentity("idp", "bob"), CreateMapping: makeMapping("mappeduser", "idp:bob"), ExpectedUserName: "******", }, } for k, testcase := range testcases { // Cleanup if err := etcdHelper.Delete(useretcd.EtcdPrefix, true); err != nil && !tools.IsEtcdNotFound(err) { t.Fatalf("Could not clean up users: %v", err) } if err := etcdHelper.Delete(identityetcd.EtcdPrefix, true); err != nil && !tools.IsEtcdNotFound(err) { t.Fatalf("Could not clean up identities: %v", err) } // Pre-create items if testcase.CreateUser != nil { _, err := clusterAdminClient.Users().Create(testcase.CreateUser) if err != nil { t.Errorf("%s: Could not create user: %v", k, err) continue } } if testcase.CreateIdentity != nil { _, err := clusterAdminClient.Identities().Create(testcase.CreateIdentity) if err != nil { t.Errorf("%s: Could not create identity: %v", k, err) continue } } if testcase.CreateMapping != nil { _, err := clusterAdminClient.UserIdentityMappings().Update(testcase.CreateMapping) if err != nil { t.Errorf("%s: Could not create mapping: %v", k, err) continue } } if testcase.UpdateUser != nil { if testcase.UpdateUser.ResourceVersion == "" { existingUser, err := clusterAdminClient.Users().Get(testcase.UpdateUser.Name) if err != nil { t.Errorf("%s: Could not get user to update: %v", k, err) continue } testcase.UpdateUser.ResourceVersion = existingUser.ResourceVersion } _, err := clusterAdminClient.Users().Update(testcase.UpdateUser) if err != nil { t.Errorf("%s: Could not update user: %v", k, err) continue } } // Spawn 5 simultaneous mappers to test race conditions var wg sync.WaitGroup for i := 0; i < 5; i++ { wg.Add(1) go func() { defer wg.Done() userInfo, err := testcase.Mapper.UserFor(testcase.Identity) if err != nil { if testcase.ExpectedErr == nil { t.Errorf("%s: Expected success, got error '%v'", k, err) } else if err.Error() != testcase.ExpectedErr.Error() { t.Errorf("%s: Expected error %v, got '%v'", k, testcase.ExpectedErr.Error(), err) } return } if err == nil && testcase.ExpectedErr != nil { t.Errorf("%s: Expected error '%v', got none", k, testcase.ExpectedErr) return } if userInfo.GetName() != testcase.ExpectedUserName { t.Errorf("%s: Expected username %s, got %s", k, testcase.ExpectedUserName, userInfo.GetName()) return } user, err := clusterAdminClient.Users().Get(userInfo.GetName()) if err != nil { t.Errorf("%s: Error getting user: %v", k, err) } if user.FullName != testcase.ExpectedFullName { t.Errorf("%s: Expected full name %s, got %s", k, testcase.ExpectedFullName, user.FullName) } }() } wg.Wait() } }