func TestBootstrapPolicyOverwritePolicyCommand(t *testing.T) {
	testutil.RequireEtcd(t)
	masterConfig, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	client, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if err := client.ClusterPolicies().Delete(authorizationapi.PolicyName); err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	// after the policy is deleted, we must wait for it to be cleared from the policy cache
	err = wait.Poll(10*time.Millisecond, 10*time.Second, func() (bool, error) {
		_, err := client.ClusterPolicies().List(kapi.ListOptions{})
		if err == nil {
			return false, nil
		}
		if !kapierror.IsForbidden(err) {
			t.Errorf("unexpected error: %v", err)
		}
		return true, nil
	})
	if err != nil {
		t.Errorf("timeout: %v", err)
	}

	etcdClient, err := etcd.MakeNewEtcdClient(masterConfig.EtcdClientInfo)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	storageVersion := unversioned.GroupVersion{Group: "", Version: masterConfig.EtcdStorageConfig.OpenShiftStorageVersion}
	etcdHelper, err := origin.NewEtcdStorage(etcdClient, storageVersion, masterConfig.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if err := admin.OverwriteBootstrapPolicy(etcdHelper, masterConfig.PolicyConfig.BootstrapPolicyFile, admin.CreateBootstrapPolicyFileFullCommand, true, ioutil.Discard); err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if _, err := client.ClusterPolicies().List(kapi.ListOptions{}); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
}
示例#2
0
func (g *configRESTOptionsGetter) GetRESTOptions(resource unversioned.GroupResource) (genericrest.RESTOptions, error) {
	g.restOptionsLock.Lock()
	defer g.restOptionsLock.Unlock()
	if resourceOptions, ok := g.restOptionsMap[resource]; ok {
		return resourceOptions, nil
	}

	if g.etcdHelper == nil {
		// TODO: choose destination etcd based on input resource
		etcdClient, err := etcd.MakeNewEtcdClient(g.masterOptions.EtcdClientInfo)
		if err != nil {
			return genericrest.RESTOptions{}, err
		}
		// TODO: choose destination group/version based on input group/resource
		// TODO: Tune the cache size
		groupVersion := unversioned.GroupVersion{Group: "", Version: g.masterOptions.EtcdStorageConfig.OpenShiftStorageVersion}
		g.etcdHelper = etcdstorage.NewEtcdStorage(etcdClient, kapi.Codecs.LegacyCodec(groupVersion), g.masterOptions.EtcdStorageConfig.OpenShiftStoragePrefix, false, genericapiserver.DefaultDeserializationCacheSize)
	}

	configuredCacheSize, specified := g.cacheSizes[resource]
	if !specified || configuredCacheSize < 0 {
		configuredCacheSize = g.defaultCacheSize
	}

	decorator := func(s storage.Interface, requestedSize int, objectType runtime.Object, resourcePrefix string, scopeStrategy rest.NamespaceScopedStrategy, newListFunc func() runtime.Object) storage.Interface {
		capacity := requestedSize
		if capacity == UseConfiguredCacheSize {
			capacity = configuredCacheSize
		}

		if capacity == 0 || !g.cacheEnabled {
			glog.V(5).Infof("using uncached watch storage for %s", resource.String())
			return genericrest.UndecoratedStorage(s, capacity, objectType, resourcePrefix, scopeStrategy, newListFunc)
		} else {
			glog.V(5).Infof("using watch cache storage (capacity=%d) for %s", capacity, resource.String())
			return registry.StorageWithCacher(s, capacity, objectType, resourcePrefix, scopeStrategy, newListFunc)
		}
	}

	resourceOptions := genericrest.RESTOptions{
		Storage:                 g.etcdHelper,
		Decorator:               decorator,
		DeleteCollectionWorkers: 1,
	}
	g.restOptionsMap[resource] = resourceOptions

	return resourceOptions, nil
}
示例#3
0
func (o OverwriteBootstrapPolicyOptions) OverwriteBootstrapPolicy() error {
	masterConfig, err := configapilatest.ReadAndResolveMasterConfig(o.MasterConfigFile)
	if err != nil {
		return err
	}

	// Connect and setup etcd interfaces
	_, err = etcd.GetAndTestEtcdClient(masterConfig.EtcdClientInfo)
	if err != nil {
		return err
	}
	etcdClient, err := etcd.MakeNewEtcdClient(masterConfig.EtcdClientInfo)
	if err != nil {
		return err
	}

	storage, err := newStorage(etcdClient, masterConfig.EtcdStorageConfig.OpenShiftStorageVersion, masterConfig.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		return err
	}

	return OverwriteBootstrapPolicy(storage, o.File, o.CreateBootstrapPolicyCommand, o.Force, o.Out)
}
示例#4
0
// BuildMasterConfig builds and returns the OpenShift master configuration based on the
// provided options
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) {
	client, err := etcd.EtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion}
	etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		return nil, fmt.Errorf("Error setting up server storage: %v", err)
	}

	clientCAs, err := configapi.GetClientCertCAPool(options)
	if err != nil {
		return nil, err
	}
	apiClientCAs, err := configapi.GetAPIClientCertCAPool(options)
	if err != nil {
		return nil, err
	}

	privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}
	privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	policyCache, policyClient := newReadOnlyCacheAndClient(etcdHelper)
	requestContextMapper := kapi.NewRequestContextMapper()

	groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupstorage.NewREST(etcdHelper)))
	projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector)

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)

	// in-order list of plug-ins that should intercept admission decisions (origin only intercepts)
	admissionControlPluginNames := []string{"OriginNamespaceLifecycle", "BuildByStrategy"}
	if len(options.AdmissionConfig.PluginOrderOverride) > 0 {
		admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride
	}

	pluginInitializer := oadmission.PluginInitializer{
		OpenshiftClient: privilegedLoopbackOpenShiftClient,
		ProjectCache:    projectCache,
	}
	plugins := []admission.Interface{}
	for _, pluginName := range admissionControlPluginNames {
		configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName])
		if err != nil {
			return nil, err
		}
		plugin := admission.InitPlugin(pluginName, privilegedLoopbackKubeClient, configFile)
		if plugin != nil {
			plugins = append(plugins, plugin)
		}
	}
	pluginInitializer.Initialize(plugins)
	// ensure that plugins have been properly initialized
	if err := oadmission.Validate(plugins); err != nil {
		return nil, err
	}
	admissionController := admission.NewChainHandler(plugins...)

	serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient)
	if err != nil {
		return nil, err
	}

	plug, plugStart := newControllerPlug(options, client)

	authorizer := newAuthorizer(policyClient, options.ProjectConfig.ProjectRequestMessage)

	config := &MasterConfig{
		Options: options,

		Authenticator:                 newAuthenticator(options, etcdHelper, serviceAccountTokenGetter, apiClientCAs, groupCache),
		Authorizer:                    authorizer,
		AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper),

		PolicyCache:               policyCache,
		GroupCache:                groupCache,
		ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, policyClient),
		ProjectCache:              projectCache,

		RequestContextMapper: requestContextMapper,

		AdmissionControl: admissionController,

		TLS: configapi.UseTLS(options.ServingInfo.ServingInfo),

		ControllerPlug:      plug,
		ControllerPlugStart: plugStart,

		ImageFor:            imageTemplate.ExpandOrDie,
		EtcdHelper:          etcdHelper,
		KubeletClientConfig: kubeletClientConfig,

		ClientCAs:    clientCAs,
		APIClientCAs: apiClientCAs,

		PrivilegedLoopbackClientConfig:     *privilegedLoopbackClientConfig,
		PrivilegedLoopbackOpenShiftClient:  privilegedLoopbackOpenShiftClient,
		PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient,
	}

	return config, nil
}
示例#5
0
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, pluginInitializer oadmission.PluginInitializer) (*MasterConfig, error) {
	if options.KubernetesMasterConfig == nil {
		return nil, errors.New("insufficient information to build KubernetesMasterConfig")
	}

	// Connect and setup etcd interfaces
	etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)
	kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig)
	if err != nil {
		return nil, fmt.Errorf("unable to configure Kubelet client: %v", err)
	}

	// in-order list of plug-ins that should intercept admission decisions
	// TODO: Push node environment support to upstream in future

	_, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress)
	if err != nil {
		return nil, err
	}
	port, err := strconv.Atoi(portString)
	if err != nil {
		return nil, err
	}

	portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange)
	if err != nil {
		return nil, err
	}

	podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout)
	if err != nil {
		return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err)
	}

	// Defaults are tested in TestAPIServerDefaults
	server := apiserveroptions.NewAPIServer()
	// Adjust defaults
	server.EventTTL = 2 * time.Hour
	server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet))
	server.ServiceNodePortRange = *portRange
	server.AdmissionControl = strings.Join(AdmissionPlugins, ",")
	server.EnableLogsSupport = false // don't expose server logs

	// resolve extended arguments
	// TODO: this should be done in config validation (along with the above) so we can provide
	// proper errors
	if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 {
		return nil, kerrors.NewAggregate(err)
	}

	if len(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride) > 0 {
		server.AdmissionControl = strings.Join(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride, ",")
	}

	// Defaults are tested in TestCMServerDefaults
	cmserver := cmapp.NewCMServer()
	// Adjust defaults
	cmserver.Address = "" // no healthz endpoint
	cmserver.Port = 0     // no healthz endpoint
	cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout}

	// resolve extended arguments
	// TODO: this should be done in config validation (along with the above) so we can provide
	// proper errors
	if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 {
		return nil, kerrors.NewAggregate(err)
	}

	cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile)
	if err != nil {
		return nil, err
	}
	if cloud != nil {
		glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile)
	}

	plugins := []admission.Interface{}
	for _, pluginName := range strings.Split(server.AdmissionControl, ",") {
		switch pluginName {
		case serviceadmit.ExternalIPPluginName:
			// this needs to be moved upstream to be part of core config
			reject, admit, err := serviceadmit.ParseCIDRRules(options.NetworkConfig.ExternalIPNetworkCIDRs)
			if err != nil {
				// should have been caught with validation
				return nil, err
			}
			plugins = append(plugins, serviceadmit.NewExternalIPRanger(reject, admit))
		case saadmit.PluginName:
			// we need to set some custom parameters on the service account admission controller, so create that one by hand
			saAdmitter := saadmit.NewServiceAccount(internalclientset.FromUnversionedClient(kubeClient))
			saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences
			saAdmitter.Run()
			plugins = append(plugins, saAdmitter)

		default:
			configFile, err := pluginconfig.GetPluginConfigFile(options.KubernetesMasterConfig.AdmissionConfig.PluginConfig, pluginName, server.AdmissionControlConfigFile)
			if err != nil {
				return nil, err
			}
			plugin := admission.InitPlugin(pluginName, internalclientset.FromUnversionedClient(kubeClient), configFile)
			if plugin != nil {
				plugins = append(plugins, plugin)
			}

		}
	}
	pluginInitializer.Initialize(plugins)
	// ensure that plugins have been properly initialized
	if err := oadmission.Validate(plugins); err != nil {
		return nil, err
	}
	admissionController := admission.NewChainHandler(plugins...)

	var proxyClientCerts []tls.Certificate
	if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 {
		clientCert, err := tls.LoadX509KeyPair(
			options.KubernetesMasterConfig.ProxyClientInfo.CertFile,
			options.KubernetesMasterConfig.ProxyClientInfo.KeyFile,
		)
		if err != nil {
			return nil, err
		}
		proxyClientCerts = append(proxyClientCerts, clientCert)
	}

	// TODO you have to know every APIGroup you're enabling or upstream will panic.  It's alternative to panicing is Fataling
	// It needs a refactor to return errors
	storageDestinations := genericapiserver.NewStorageDestinations()
	// storageVersions is a map from API group to allowed versions that must be a version exposed by the REST API or it breaks.
	// We need to fix the upstream to stop using the storage version as a preferred api version.
	storageVersions := map[string]string{}

	enabledKubeVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupKube)
	if len(enabledKubeVersions) > 0 {
		kubeStorageVersion := unversioned.GroupVersion{Group: configapi.APIGroupKube, Version: options.EtcdStorageConfig.KubernetesStorageVersion}
		databaseStorage, err := NewEtcdStorage(etcdClient, kubeStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix)
		if err != nil {
			return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err)
		}
		storageDestinations.AddAPIGroup(configapi.APIGroupKube, databaseStorage)
		storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion
	}

	// enable this if extensions API is enabled (or batch or autoscaling, since they persist to extensions/v1beta1 for now)
	// TODO: replace this with a loop over configured storage versions
	extensionsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions)) > 0
	batchEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupBatch)) > 0
	autoscalingEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupAutoscaling)) > 0
	if extensionsEnabled || autoscalingEnabled || batchEnabled {
		// TODO: replace this with a configured storage version for extensions once configuration exposes this
		extensionsStorageVersion := unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"}
		databaseStorage, err := NewEtcdStorage(etcdClient, extensionsStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix)
		if err != nil {
			return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err)
		}
		storageDestinations.AddAPIGroup(configapi.APIGroupExtensions, databaseStorage)
		storageVersions[configapi.APIGroupExtensions] = extensionsStorageVersion.String()
	}

	// Preserve previous behavior of using the first non-loopback address
	// TODO: Deprecate this behavior and just require a valid value to be passed in
	publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP)
	if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() {
		hostIP, err := knet.ChooseHostInterface()
		if err != nil {
			glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err)
		}
		publicAddress = hostIP
		glog.Infof("Will report %v as public IP address.", publicAddress)
	}

	m := &master.Config{
		Config: &genericapiserver.Config{
			PublicAddress: publicAddress,
			ReadWritePort: port,

			Authorizer:       apiserver.NewAlwaysAllowAuthorizer(),
			AdmissionControl: admissionController,

			StorageDestinations: storageDestinations,
			StorageVersions:     storageVersions,

			ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange),
			ServiceNodePortRange:  server.ServiceNodePortRange,

			RequestContextMapper: requestContextMapper,

			APIGroupVersionOverrides: getAPIGroupVersionOverrides(options),
			APIPrefix:                KubeAPIPrefix,
			APIGroupPrefix:           KubeAPIGroupPrefix,

			MasterCount: options.KubernetesMasterConfig.MasterCount,

			// Set the TLS options for proxying to pods and services
			// Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname)
			ProxyTLSClientConfig: &tls.Config{
				// Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs
				InsecureSkipVerify: true,
				Certificates:       proxyClientCerts,
			},

			Serializer: kapi.Codecs,
		},

		EventTTL: server.EventTTL,
		//MinRequestTimeout: server.MinRequestTimeout,

		KubeletClient: kubeletClient,

		EnableCoreControllers: true,
	}

	if options.DNSConfig != nil {
		_, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress)
		if err != nil {
			return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err)
		}
		dnsPort, err := strconv.Atoi(dnsPortStr)
		if err != nil {
			return nil, fmt.Errorf("invalid DNS port: %v", err)
		}
		m.ExtraServicePorts = append(m.ExtraServicePorts,
			kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)},
			kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)},
		)
		m.ExtraEndpointPorts = append(m.ExtraEndpointPorts,
			kapi.EndpointPort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP},
			kapi.EndpointPort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP},
		)
	}

	kmaster := &MasterConfig{
		Options:    *options.KubernetesMasterConfig,
		KubeClient: kubeClient,

		Master:            m,
		ControllerManager: cmserver,
		CloudProvider:     cloud,
	}

	return kmaster, nil
}
示例#6
0
func TestUserInitialization(t *testing.T) {
	testutil.RequireEtcd(t)
	defer testutil.DumpEtcdOnFailure(t)
	masterConfig, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	optsGetter := originrest.StorageOptions(*masterConfig)

	userStorage, err := useretcd.NewREST(optsGetter)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	userRegistry := userregistry.NewRegistry(userStorage)

	identityStorage, err := identityetcd.NewREST(optsGetter)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	identityRegistry := identityregistry.NewRegistry(identityStorage)

	lookup, err := identitymapper.NewIdentityUserMapper(identityRegistry, userRegistry, identitymapper.MappingMethodLookup)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	generate, err := identitymapper.NewIdentityUserMapper(identityRegistry, userRegistry, identitymapper.MappingMethodGenerate)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	add, err := identitymapper.NewIdentityUserMapper(identityRegistry, userRegistry, identitymapper.MappingMethodAdd)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	claim, err := identitymapper.NewIdentityUserMapper(identityRegistry, userRegistry, identitymapper.MappingMethodClaim)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	testcases := map[string]struct {
		Identity authapi.UserIdentityInfo
		Mapper   authapi.UserIdentityMapper

		CreateIdentity *api.Identity
		CreateUser     *api.User
		CreateMapping  *api.UserIdentityMapping
		UpdateUser     *api.User

		ExpectedErr        error
		ExpectedUserName   string
		ExpectedFullName   string
		ExpectedIdentities []string
	}{
		"lookup missing identity": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   lookup,

			ExpectedErr: identitymapper.NewLookupError(makeIdentityInfo("idp", "bob", nil), kerrs.NewNotFound(api.Resource("useridentitymapping"), "idp:bob")),
		},
		"lookup existing identity": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   lookup,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentity("idp", "bob"),
			CreateMapping:  makeMapping("mappeduser", "idp:bob"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"generate missing identity and user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   generate,

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"generate missing identity and user with preferred username and display name": {
			Identity: makeIdentityInfo("idp", "bob", map[string]string{authapi.IdentityDisplayNameKey: "Bob, Sr.", authapi.IdentityPreferredUsernameKey: "admin"}),
			Mapper:   generate,

			ExpectedUserName:   "******",
			ExpectedFullName:   "Bob, Sr.",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"generate missing identity for existing user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   generate,

			CreateUser: makeUser("bob", "idp:bob"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"generate missing identity with conflicting user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   generate,

			CreateUser: makeUser("bob"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"generate missing identity with conflicting user and preferred username": {
			Identity: makeIdentityInfo("idp", "bob", map[string]string{authapi.IdentityPreferredUsernameKey: "admin"}),
			Mapper:   generate,

			CreateUser: makeUser("admin"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"generate with existing unmapped identity": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   generate,

			CreateIdentity: makeIdentity("idp", "bob"),

			ExpectedErr: kerrs.NewNotFound(api.Resource("useridentitymapping"), "idp:bob"),
		},
		"generate with existing mapped identity with invalid user UID": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   generate,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentityWithUserReference("idp", "bob", "mappeduser", "invalidUID"),

			ExpectedErr:        kerrs.NewNotFound(api.Resource("useridentitymapping"), "idp:bob"),
			ExpectedIdentities: []string{"idp:bob"},
		},
		"generate with existing mapped identity without user backreference": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   generate,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentity("idp", "bob"),
			CreateMapping:  makeMapping("mappeduser", "idp:bob"),
			// Update user to a version which does not reference the identity
			UpdateUser: makeUser("mappeduser"),

			ExpectedErr: kerrs.NewNotFound(api.Resource("useridentitymapping"), "idp:bob"),
		},
		"generate returns existing mapping": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   generate,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentity("idp", "bob"),
			CreateMapping:  makeMapping("mappeduser", "idp:bob"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},

		"add missing identity and user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   add,

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"add missing identity and user with preferred username and display name": {
			Identity: makeIdentityInfo("idp", "bob", map[string]string{authapi.IdentityDisplayNameKey: "Bob, Sr.", authapi.IdentityPreferredUsernameKey: "admin"}),
			Mapper:   add,

			ExpectedUserName:   "******",
			ExpectedFullName:   "Bob, Sr.",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"add missing identity for existing user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   add,

			CreateUser: makeUser("bob", "idp:bob"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"add missing identity with conflicting user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   add,

			CreateUser: makeUser("bob", "otheridp:otheruser"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"otheridp:otheruser", "idp:bob"},
		},
		"add missing identity with conflicting user and preferred username": {
			Identity: makeIdentityInfo("idp", "bob", map[string]string{authapi.IdentityPreferredUsernameKey: "admin"}),
			Mapper:   add,

			CreateUser: makeUser("admin", "otheridp:otheruser"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"otheridp:otheruser", "idp:bob"},
		},
		"add with existing unmapped identity": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   add,

			CreateIdentity: makeIdentity("idp", "bob"),

			ExpectedErr: kerrs.NewNotFound(api.Resource("useridentitymapping"), "idp:bob"),
		},
		"add with existing mapped identity with invalid user UID": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   add,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentityWithUserReference("idp", "bob", "mappeduser", "invalidUID"),

			ExpectedErr: kerrs.NewNotFound(api.Resource("useridentitymapping"), "idp:bob"),
		},
		"add with existing mapped identity without user backreference": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   add,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentity("idp", "bob"),
			CreateMapping:  makeMapping("mappeduser", "idp:bob"),
			// Update user to a version which does not reference the identity
			UpdateUser: makeUser("mappeduser"),

			ExpectedErr: kerrs.NewNotFound(api.Resource("useridentitymapping"), "idp:bob"),
		},
		"add returns existing mapping": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   add,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentity("idp", "bob"),
			CreateMapping:  makeMapping("mappeduser", "idp:bob"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},

		"claim missing identity and user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   claim,

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"claim missing identity and user with preferred username and display name": {
			Identity: makeIdentityInfo("idp", "bob", map[string]string{authapi.IdentityDisplayNameKey: "Bob, Sr.", authapi.IdentityPreferredUsernameKey: "admin"}),
			Mapper:   claim,

			ExpectedUserName:   "******",
			ExpectedFullName:   "Bob, Sr.",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"claim missing identity for existing user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   claim,

			CreateUser: makeUser("bob", "idp:bob"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"claim missing identity with existing available user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   claim,

			CreateUser: makeUser("bob"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
		"claim missing identity with conflicting user": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   claim,

			CreateUser: makeUser("bob", "otheridp:otheruser"),

			ExpectedErr: identitymapper.NewClaimError(makeUser("bob", "otheridp:otheruser"), makeIdentity("idp", "bob")),
		},
		"claim missing identity with conflicting user and preferred username": {
			Identity: makeIdentityInfo("idp", "bob", map[string]string{authapi.IdentityPreferredUsernameKey: "admin"}),
			Mapper:   claim,

			CreateUser: makeUser("admin", "otheridp:otheruser"),

			ExpectedErr: identitymapper.NewClaimError(makeUser("admin", "otheridp:otheruser"), makeIdentity("idp", "bob")),
		},
		"claim with existing unmapped identity": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   claim,

			CreateIdentity: makeIdentity("idp", "bob"),

			ExpectedErr: kerrs.NewNotFound(api.Resource("useridentitymapping"), "idp:bob"),
		},
		"claim with existing mapped identity with invalid user UID": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   claim,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentityWithUserReference("idp", "bob", "mappeduser", "invalidUID"),

			ExpectedErr: kerrs.NewNotFound(api.Resource("useridentitymapping"), "idp:bob"),
		},
		"claim with existing mapped identity without user backreference": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   claim,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentity("idp", "bob"),
			CreateMapping:  makeMapping("mappeduser", "idp:bob"),
			// Update user to a version which does not reference the identity
			UpdateUser: makeUser("mappeduser"),

			ExpectedErr: kerrs.NewNotFound(api.Resource("useridentitymapping"), "idp:bob"),
		},
		"claim returns existing mapping": {
			Identity: makeIdentityInfo("idp", "bob", nil),
			Mapper:   claim,

			CreateUser:     makeUser("mappeduser"),
			CreateIdentity: makeIdentity("idp", "bob"),
			CreateMapping:  makeMapping("mappeduser", "idp:bob"),

			ExpectedUserName:   "******",
			ExpectedIdentities: []string{"idp:bob"},
		},
	}

	oldEtcdClient, err := etcd.MakeNewEtcdClient(masterConfig.EtcdClientInfo)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	etcdClient := etcdclient.NewKeysAPI(oldEtcdClient)

	for k, testcase := range testcases {
		// Cleanup
		if _, err := etcdClient.Delete(context.Background(), path.Join(masterConfig.EtcdStorageConfig.OpenShiftStoragePrefix, "/users"), &etcdclient.DeleteOptions{Recursive: true}); err != nil && !etcdutil.IsEtcdNotFound(err) {
			t.Fatalf("Could not clean up users: %v", err)
		}
		if _, err := etcdClient.Delete(context.Background(), path.Join(masterConfig.EtcdStorageConfig.OpenShiftStoragePrefix, "/identities"), &etcdclient.DeleteOptions{Recursive: true}); err != nil && !etcdutil.IsEtcdNotFound(err) {
			t.Fatalf("Could not clean up identities: %v", err)
		}

		// Pre-create items
		if testcase.CreateUser != nil {
			_, err := clusterAdminClient.Users().Create(testcase.CreateUser)
			if err != nil {
				t.Errorf("%s: Could not create user: %v", k, err)
				continue
			}
		}
		if testcase.CreateIdentity != nil {
			_, err := clusterAdminClient.Identities().Create(testcase.CreateIdentity)
			if err != nil {
				t.Errorf("%s: Could not create identity: %v", k, err)
				continue
			}
		}
		if testcase.CreateMapping != nil {
			_, err := clusterAdminClient.UserIdentityMappings().Update(testcase.CreateMapping)
			if err != nil {
				t.Errorf("%s: Could not create mapping: %v", k, err)
				continue
			}
		}
		if testcase.UpdateUser != nil {
			if testcase.UpdateUser.ResourceVersion == "" {
				existingUser, err := clusterAdminClient.Users().Get(testcase.UpdateUser.Name)
				if err != nil {
					t.Errorf("%s: Could not get user to update: %v", k, err)
					continue
				}
				testcase.UpdateUser.ResourceVersion = existingUser.ResourceVersion
			}
			_, err := clusterAdminClient.Users().Update(testcase.UpdateUser)
			if err != nil {
				t.Errorf("%s: Could not update user: %v", k, err)
				continue
			}
		}

		// Spawn 5 simultaneous mappers to test race conditions
		var wg sync.WaitGroup
		for i := 0; i < 5; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				userInfo, err := testcase.Mapper.UserFor(testcase.Identity)
				if err != nil {
					if testcase.ExpectedErr == nil {
						t.Errorf("%s: Expected success, got error '%v'", k, err)
					} else if err.Error() != testcase.ExpectedErr.Error() {
						t.Errorf("%s: Expected error %v, got '%v'", k, testcase.ExpectedErr.Error(), err)
					}
					return
				}
				if err == nil && testcase.ExpectedErr != nil {
					t.Errorf("%s: Expected error '%v', got none", k, testcase.ExpectedErr)
					return
				}

				if userInfo.GetName() != testcase.ExpectedUserName {
					t.Errorf("%s: Expected username %s, got %s", k, testcase.ExpectedUserName, userInfo.GetName())
					return
				}

				user, err := clusterAdminClient.Users().Get(userInfo.GetName())
				if err != nil {
					t.Errorf("%s: Error getting user: %v", k, err)
				}
				if user.FullName != testcase.ExpectedFullName {
					t.Errorf("%s: Expected full name %s, got %s", k, testcase.ExpectedFullName, user.FullName)
				}
				if !reflect.DeepEqual(user.Identities, testcase.ExpectedIdentities) {
					t.Errorf("%s: Expected identities %v, got %v", k, testcase.ExpectedIdentities, user.Identities)
				}
			}()
		}
		wg.Wait()
	}
}
示例#7
0
// BuildMasterConfig builds and returns the OpenShift master configuration based on the
// provided options
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) {
	client, err := etcd.EtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion}
	etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		return nil, fmt.Errorf("Error setting up server storage: %v", err)
	}

	restOptsGetter := restoptions.NewConfigGetter(options)

	clientCAs, err := configapi.GetClientCertCAPool(options)
	if err != nil {
		return nil, err
	}
	apiClientCAs, err := configapi.GetAPIClientCertCAPool(options)
	if err != nil {
		return nil, err
	}

	privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}
	privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}

	customListerWatchers := shared.DefaultListerWatcherOverrides{}
	if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil {
		return nil, err
	}
	informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute)

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	requestContextMapper := kapi.NewRequestContextMapper()

	groupStorage, err := groupstorage.NewREST(restOptsGetter)
	if err != nil {
		return nil, err
	}
	groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage))
	projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector)
	clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas())

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)

	// in-order list of plug-ins that should intercept admission decisions (origin only intercepts)
	admissionControlPluginNames := []string{
		"ProjectRequestLimit",
		"OriginNamespaceLifecycle",
		"PodNodeConstraints",
		"JenkinsBootstrapper",
		"BuildByStrategy",
		imageadmission.PluginName,
		quotaadmission.PluginName,
	}
	if len(options.AdmissionConfig.PluginOrderOverride) > 0 {
		admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride
	}

	quotaRegistry := quota.NewOriginQuotaRegistry(privilegedLoopbackOpenShiftClient)
	ruleResolver := rulevalidation.NewDefaultRuleResolver(
		informerFactory.Policies().Lister(),
		informerFactory.PolicyBindings().Lister(),
		informerFactory.ClusterPolicies().Lister().ClusterPolicies(),
		informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(),
	)
	authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage)

	pluginInitializer := oadmission.PluginInitializer{
		OpenshiftClient:       privilegedLoopbackOpenShiftClient,
		ProjectCache:          projectCache,
		OriginQuotaRegistry:   quotaRegistry,
		Authorizer:            authorizer,
		JenkinsPipelineConfig: options.JenkinsPipelineConfig,
		RESTClientConfig:      *privilegedLoopbackClientConfig,
	}

	plugins := []admission.Interface{}
	clientsetClient := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient)
	for _, pluginName := range admissionControlPluginNames {
		configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName])
		if err != nil {
			return nil, err
		}
		plugin := admission.InitPlugin(pluginName, clientsetClient, configFile)
		if plugin != nil {
			plugins = append(plugins, plugin)
		}
	}
	pluginInitializer.Initialize(plugins)
	// ensure that plugins have been properly initialized
	if err := oadmission.Validate(plugins); err != nil {
		return nil, err
	}
	admissionController := admission.NewChainHandler(plugins...)

	// TODO: look up storage by resource
	serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient)
	if err != nil {
		return nil, err
	}

	authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache)
	if err != nil {
		return nil, err
	}

	plug, plugStart := newControllerPlug(options, client)

	config := &MasterConfig{
		Options: options,

		RESTOptionsGetter: restOptsGetter,

		RuleResolver:                  ruleResolver,
		Authenticator:                 authenticator,
		Authorizer:                    authorizer,
		AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper),

		GroupCache:                    groupCache,
		ProjectAuthorizationCache:     newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory),
		ProjectCache:                  projectCache,
		ClusterQuotaMappingController: clusterQuotaMappingController,

		RequestContextMapper: requestContextMapper,

		AdmissionControl: admissionController,

		TLS: configapi.UseTLS(options.ServingInfo.ServingInfo),

		ControllerPlug:      plug,
		ControllerPlugStart: plugStart,

		ImageFor:            imageTemplate.ExpandOrDie,
		EtcdHelper:          etcdHelper,
		KubeletClientConfig: kubeletClientConfig,

		ClientCAs:    clientCAs,
		APIClientCAs: apiClientCAs,

		PluginInitializer: pluginInitializer,

		PrivilegedLoopbackClientConfig:     *privilegedLoopbackClientConfig,
		PrivilegedLoopbackOpenShiftClient:  privilegedLoopbackOpenShiftClient,
		PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient,
		Informers:                          informerFactory,
	}

	return config, nil
}
示例#8
0
// BuildMasterConfig builds and returns the OpenShift master configuration based on the
// provided options
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) {
	client, err := etcd.EtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion}
	etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		return nil, fmt.Errorf("Error setting up server storage: %v", err)
	}

	restOptsGetter := restoptions.NewConfigGetter(options)

	clientCAs, err := configapi.GetClientCertCAPool(options)
	if err != nil {
		return nil, err
	}
	apiClientCAs, err := configapi.GetAPIClientCertCAPool(options)
	if err != nil {
		return nil, err
	}

	privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}
	privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}

	customListerWatchers := shared.DefaultListerWatcherOverrides{}
	if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil {
		return nil, err
	}
	informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute)

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	requestContextMapper := kapi.NewRequestContextMapper()

	groupStorage, err := groupstorage.NewREST(restOptsGetter)
	if err != nil {
		return nil, err
	}
	groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage))
	projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector)
	clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas())

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)

	kubeClientSet := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient)
	quotaRegistry := quota.NewAllResourceQuotaRegistry(privilegedLoopbackOpenShiftClient, kubeClientSet)
	ruleResolver := rulevalidation.NewDefaultRuleResolver(
		informerFactory.Policies().Lister(),
		informerFactory.PolicyBindings().Lister(),
		informerFactory.ClusterPolicies().Lister().ClusterPolicies(),
		informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(),
	)
	authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage)

	pluginInitializer := oadmission.PluginInitializer{
		OpenshiftClient:       privilegedLoopbackOpenShiftClient,
		ProjectCache:          projectCache,
		OriginQuotaRegistry:   quotaRegistry,
		Authorizer:            authorizer,
		JenkinsPipelineConfig: options.JenkinsPipelineConfig,
		RESTClientConfig:      *privilegedLoopbackClientConfig,
		Informers:             informerFactory,
		ClusterQuotaMapper:    clusterQuotaMappingController.GetClusterQuotaMapper(),
	}
	originAdmission, kubeAdmission, err := buildAdmissionChains(options, kubeClientSet, pluginInitializer)

	// TODO: look up storage by resource
	serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient)
	if err != nil {
		return nil, err
	}

	authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache)
	if err != nil {
		return nil, err
	}

	plug, plugStart := newControllerPlug(options, client)

	config := &MasterConfig{
		Options: options,

		RESTOptionsGetter: restOptsGetter,

		RuleResolver:                  ruleResolver,
		Authenticator:                 authenticator,
		Authorizer:                    authorizer,
		AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper),

		GroupCache:                    groupCache,
		ProjectAuthorizationCache:     newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory),
		ProjectCache:                  projectCache,
		ClusterQuotaMappingController: clusterQuotaMappingController,

		RequestContextMapper: requestContextMapper,

		AdmissionControl:     originAdmission,
		KubeAdmissionControl: kubeAdmission,

		TLS: configapi.UseTLS(options.ServingInfo.ServingInfo),

		ControllerPlug:      plug,
		ControllerPlugStart: plugStart,

		ImageFor:            imageTemplate.ExpandOrDie,
		EtcdHelper:          etcdHelper,
		KubeletClientConfig: kubeletClientConfig,

		ClientCAs:    clientCAs,
		APIClientCAs: apiClientCAs,

		PrivilegedLoopbackClientConfig:     *privilegedLoopbackClientConfig,
		PrivilegedLoopbackOpenShiftClient:  privilegedLoopbackOpenShiftClient,
		PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient,
		Informers:                          informerFactory,
	}

	return config, nil
}
示例#9
0
func BuildAuthConfig(options configapi.MasterConfig) (*AuthConfig, error) {
	etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion}
	etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		return nil, fmt.Errorf("Error setting up server storage: %v", err)
	}

	// Build a list of storage.Interface objects, each of which only speaks to one of the etcd backends
	etcdBackends := []storage.Interface{}
	for _, url := range options.EtcdClientInfo.URLs {
		backendClientInfo := options.EtcdClientInfo
		backendClientInfo.URLs = []string{url}
		backendClient, err := etcd.MakeNewEtcdClient(backendClientInfo)
		if err != nil {
			return nil, err
		}
		backendEtcdHelper, err := NewEtcdStorage(backendClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix)
		if err != nil {
			return nil, fmt.Errorf("Error setting up server storage: %v", err)
		}
		etcdBackends = append(etcdBackends, backendEtcdHelper)
	}

	var sessionAuth *session.Authenticator
	var sessionHandlerWrapper handlerWrapper
	if options.OAuthConfig.SessionConfig != nil {
		secure := isHTTPS(options.OAuthConfig.MasterPublicURL)
		auth, wrapper, err := buildSessionAuth(secure, options.OAuthConfig.SessionConfig)
		if err != nil {
			return nil, err
		}
		sessionAuth = auth
		sessionHandlerWrapper = wrapper
	}

	// Build the list of valid redirect_uri prefixes for a login using the openshift-web-console client to redirect to
	// TODO: allow configuring this
	// TODO: remove hard-coding of development UI server
	assetPublicURLs := []string{}
	if !options.DisabledFeatures.Has(configapi.FeatureWebConsole) {
		assetPublicURLs = []string{options.OAuthConfig.AssetPublicURL, "http://localhost:9000", "https://localhost:9000"}
	}

	userStorage := useretcd.NewREST(etcdHelper)
	userRegistry := userregistry.NewRegistry(userStorage)
	identityStorage := identityetcd.NewREST(etcdHelper)
	identityRegistry := identityregistry.NewRegistry(identityStorage)

	ret := &AuthConfig{
		Options: *options.OAuthConfig,

		AssetPublicAddresses: assetPublicURLs,
		EtcdHelper:           etcdHelper,
		EtcdBackends:         etcdBackends,

		IdentityRegistry: identityRegistry,
		UserRegistry:     userRegistry,

		SessionAuth: sessionAuth,

		HandlerWrapper: sessionHandlerWrapper,
	}

	return ret, nil
}
示例#10
0
// BuildMasterConfig builds and returns the OpenShift master configuration based on the
// provided options
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) {
	client, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}

	restOptsGetter := originrest.StorageOptions(options)

	clientCAs, err := configapi.GetClientCertCAPool(options)
	if err != nil {
		return nil, err
	}
	apiClientCAs, err := configapi.GetAPIClientCertCAPool(options)
	if err != nil {
		return nil, err
	}

	privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig, options.MasterClients.OpenShiftLoopbackClientConnectionOverrides)
	if err != nil {
		return nil, err
	}
	privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig, options.MasterClients.OpenShiftLoopbackClientConnectionOverrides)
	if err != nil {
		return nil, err
	}

	customListerWatchers := shared.DefaultListerWatcherOverrides{}
	if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil {
		return nil, err
	}
	informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute)

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	defaultRegistry := env("OPENSHIFT_DEFAULT_REGISTRY", "${DOCKER_REGISTRY_SERVICE_HOST}:${DOCKER_REGISTRY_SERVICE_PORT}")
	svcCache := service.NewServiceResolverCache(privilegedLoopbackKubeClient.Services(kapi.NamespaceDefault).Get)
	defaultRegistryFunc, err := svcCache.Defer(defaultRegistry)
	if err != nil {
		return nil, fmt.Errorf("OPENSHIFT_DEFAULT_REGISTRY variable is invalid %q: %v", defaultRegistry, err)
	}

	requestContextMapper := kapi.NewRequestContextMapper()

	groupStorage, err := groupstorage.NewREST(restOptsGetter)
	if err != nil {
		return nil, err
	}
	groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage))
	projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector)
	clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas())

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)

	kubeClientSet := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient)
	quotaRegistry := quota.NewAllResourceQuotaRegistry(privilegedLoopbackOpenShiftClient, kubeClientSet)
	ruleResolver := rulevalidation.NewDefaultRuleResolver(
		informerFactory.Policies().Lister(),
		informerFactory.PolicyBindings().Lister(),
		informerFactory.ClusterPolicies().Lister().ClusterPolicies(),
		informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(),
	)
	authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage)

	pluginInitializer := oadmission.PluginInitializer{
		OpenshiftClient:       privilegedLoopbackOpenShiftClient,
		ProjectCache:          projectCache,
		OriginQuotaRegistry:   quotaRegistry,
		Authorizer:            authorizer,
		JenkinsPipelineConfig: options.JenkinsPipelineConfig,
		RESTClientConfig:      *privilegedLoopbackClientConfig,
		Informers:             informerFactory,
		ClusterQuotaMapper:    clusterQuotaMappingController.GetClusterQuotaMapper(),
		DefaultRegistryFn:     imageapi.DefaultRegistryFunc(defaultRegistryFunc),
	}
	originAdmission, kubeAdmission, err := buildAdmissionChains(options, kubeClientSet, pluginInitializer)
	if err != nil {
		return nil, err
	}

	serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options)
	if err != nil {
		return nil, err
	}

	authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache)
	if err != nil {
		return nil, err
	}

	plug, plugStart := newControllerPlug(options, client)

	config := &MasterConfig{
		Options: options,

		RESTOptionsGetter: restOptsGetter,

		RuleResolver:                  ruleResolver,
		Authenticator:                 authenticator,
		Authorizer:                    authorizer,
		AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper),

		GroupCache:                    groupCache,
		ProjectAuthorizationCache:     newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory),
		ProjectCache:                  projectCache,
		ClusterQuotaMappingController: clusterQuotaMappingController,

		RequestContextMapper: requestContextMapper,

		AdmissionControl:     originAdmission,
		KubeAdmissionControl: kubeAdmission,

		TLS: configapi.UseTLS(options.ServingInfo.ServingInfo),

		ControllerPlug:      plug,
		ControllerPlugStart: plugStart,

		ImageFor:       imageTemplate.ExpandOrDie,
		RegistryNameFn: imageapi.DefaultRegistryFunc(defaultRegistryFunc),

		// TODO: migration of versions of resources stored in annotations must be sorted out
		ExternalVersionCodec: kapi.Codecs.LegacyCodec(unversioned.GroupVersion{Group: "", Version: "v1"}),

		KubeletClientConfig: kubeletClientConfig,

		ClientCAs:    clientCAs,
		APIClientCAs: apiClientCAs,

		PrivilegedLoopbackClientConfig:     *privilegedLoopbackClientConfig,
		PrivilegedLoopbackOpenShiftClient:  privilegedLoopbackOpenShiftClient,
		PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient,
		Informers:                          informerFactory,
	}

	// ensure that the limit range informer will be started
	informer := config.Informers.LimitRanges().Informer()
	config.LimitVerifier = imageadmission.NewLimitVerifier(imageadmission.LimitRangesForNamespaceFunc(func(ns string) ([]*kapi.LimitRange, error) {
		list, err := config.Informers.LimitRanges().Lister().LimitRanges(ns).List(labels.Everything())
		if err != nil {
			return nil, err
		}
		// the verifier must return an error
		if len(list) == 0 && len(informer.LastSyncResourceVersion()) == 0 {
			glog.V(4).Infof("LimitVerifier still waiting for ranges to load: %#v", informer)
			forbiddenErr := kapierrors.NewForbidden(unversioned.GroupResource{Resource: "limitranges"}, "", fmt.Errorf("the server is still loading limit information"))
			forbiddenErr.ErrStatus.Details.RetryAfterSeconds = 1
			return nil, forbiddenErr
		}
		return list, nil
	}))

	return config, nil
}