// fakeMasterConfig creates a new fake master config with an empty kubelet config and dummy storage.
func fakeMasterConfig() *MasterConfig {
	informerFactory := shared.NewInformerFactory(testclient.NewSimpleFake(), otestclient.NewSimpleFake(), shared.DefaultListerWatcherOverrides{}, 1*time.Second)
	return &MasterConfig{
		KubeletClientConfig:           &kubeletclient.KubeletClientConfig{},
		RESTOptionsGetter:             restoptions.NewSimpleGetter(&storagebackend.Config{ServerList: []string{"localhost"}}),
		Informers:                     informerFactory,
		ClusterQuotaMappingController: clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()),
	}
}
// fakeMasterConfig creates a new fake master config with an empty kubelet config and dummy storage.
func fakeMasterConfig() *MasterConfig {
	etcdHelper := etcdstorage.NewEtcdStorage(nil, api.Codecs.LegacyCodec(), "", false, genericapiserveroptions.DefaultDeserializationCacheSize)

	informerFactory := shared.NewInformerFactory(testclient.NewSimpleFake(), otestclient.NewSimpleFake(), shared.DefaultListerWatcherOverrides{}, 1*time.Second)
	return &MasterConfig{
		KubeletClientConfig:           &kubeletclient.KubeletClientConfig{},
		RESTOptionsGetter:             restoptions.NewSimpleGetter(etcdHelper),
		EtcdHelper:                    etcdHelper,
		Informers:                     informerFactory,
		ClusterQuotaMappingController: clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()),
	}
}
예제 #3
0
// TestCreatesAllowedDuringNamespaceDeletion checks to make sure that the resources in the whitelist are allowed
func TestCreatesAllowedDuringNamespaceDeletion(t *testing.T) {
	etcdHelper := etcdstorage.NewEtcdStorage(nil, kapi.Codecs.LegacyCodec(), "", false, genericapiserveroptions.DefaultDeserializationCacheSize)

	informerFactory := shared.NewInformerFactory(testclient.NewSimpleFake(), otestclient.NewSimpleFake(), shared.DefaultListerWatcherOverrides{}, 1*time.Second)
	config := &origin.MasterConfig{
		KubeletClientConfig:           &kubeletclient.KubeletClientConfig{},
		RESTOptionsGetter:             restoptions.NewSimpleGetter(etcdHelper),
		EtcdHelper:                    etcdHelper,
		Informers:                     informerFactory,
		ClusterQuotaMappingController: clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()),
	}
	storageMap := config.GetRestStorage()
	resources := sets.String{}

	for resource := range storageMap {
		resources.Insert(strings.ToLower(resource))
	}

	for resource := range recommendedCreatableResources {
		if !resources.Has(resource) {
			t.Errorf("recommendedCreatableResources has resource %v, but that resource isn't registered.", resource)
		}
	}
}
예제 #4
0
func runFuzzer(t *testing.T) {
	stopCh := make(chan struct{})
	defer close(stopCh)

	startingNamespaces := CreateStartingNamespaces()
	kubeclient := ktestclient.NewSimpleFake(startingNamespaces...)
	nsWatch := watch.NewFake()
	kubeclient.PrependWatchReactor("namespaces", ktestclient.DefaultWatchReactor(nsWatch, nil))

	startingQuotas := CreateStartingQuotas()
	originclient := testclient.NewSimpleFake(startingQuotas...)
	quotaWatch := watch.NewFake()
	originclient.AddWatchReactor("clusterresourcequotas", ktestclient.DefaultWatchReactor(quotaWatch, nil))

	informerFactory := shared.NewInformerFactory(kubeclient, originclient, shared.DefaultListerWatcherOverrides{}, 10*time.Minute)
	controller := NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas())
	go controller.Run(5, stopCh)
	informerFactory.Start(stopCh)
	informerFactory.StartCore(stopCh)

	finalNamespaces := map[string]*kapi.Namespace{}
	finalQuotas := map[string]*quotaapi.ClusterResourceQuota{}
	quotaActions := map[string][]string{}
	namespaceActions := map[string][]string{}
	finishedNamespaces := make(chan struct{})
	finishedQuotas := make(chan struct{})

	for _, quota := range startingQuotas {
		name := quota.(*quotaapi.ClusterResourceQuota).Name
		quotaActions[name] = append(quotaActions[name], fmt.Sprintf("inserting %v to %v", name, quota.(*quotaapi.ClusterResourceQuota).Spec.Selector))
		finalQuotas[name] = quota.(*quotaapi.ClusterResourceQuota)
	}
	for _, namespace := range startingNamespaces {
		name := namespace.(*kapi.Namespace).Name
		namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("inserting %v to %v", name, namespace.(*kapi.Namespace).Labels))
		finalNamespaces[name] = namespace.(*kapi.Namespace)
	}

	go func() {
		for i := 0; i < 200; i++ {
			name := quotaNames[rand.Intn(len(quotaNames))]
			_, exists := finalQuotas[name]
			if rand.Intn(50) == 0 {
				if !exists {
					continue
				}
				// due to the compression race (see big comment for impl), clear the queue then delete
				for {
					if len(quotaWatch.ResultChan()) == 0 {
						break
					}
					time.Sleep(10 * time.Millisecond)
				}

				quotaActions[name] = append(quotaActions[name], "deleting "+name)
				quotaWatch.Delete(finalQuotas[name])
				delete(finalQuotas, name)
				continue
			}

			quota := NewQuota(name)
			finalQuotas[name] = quota
			copied, err := kapi.Scheme.Copy(quota)
			if err != nil {
				t.Fatal(err)
			}
			if exists {
				quotaActions[name] = append(quotaActions[name], fmt.Sprintf("updating %v to %v", name, quota.Spec.Selector))
				quotaWatch.Modify(copied)
			} else {
				quotaActions[name] = append(quotaActions[name], fmt.Sprintf("adding %v to %v", name, quota.Spec.Selector))
				quotaWatch.Add(copied)
			}
		}
		close(finishedQuotas)
	}()

	go func() {
		for i := 0; i < 200; i++ {
			name := namespaceNames[rand.Intn(len(namespaceNames))]
			_, exists := finalNamespaces[name]
			if rand.Intn(50) == 0 {
				if !exists {
					continue
				}
				// due to the compression race (see big comment for impl), clear the queue then delete
				for {
					if len(nsWatch.ResultChan()) == 0 {
						break
					}
					time.Sleep(10 * time.Millisecond)
				}

				namespaceActions[name] = append(namespaceActions[name], "deleting "+name)
				nsWatch.Delete(finalNamespaces[name])
				delete(finalNamespaces, name)
				continue
			}

			ns := NewNamespace(name)
			finalNamespaces[name] = ns
			copied, err := kapi.Scheme.Copy(ns)
			if err != nil {
				t.Fatal(err)
			}
			if exists {
				namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("updating %v to %v", name, ns.Labels))
				nsWatch.Modify(copied)
			} else {
				namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("adding %v to %v", name, ns.Labels))
				nsWatch.Add(copied)
			}
		}
		close(finishedNamespaces)
	}()

	<-finishedQuotas
	<-finishedNamespaces

	finalFailures := []string{}
	for i := 0; i < 200; i++ {
		// better suggestions for testing doneness?  Check the condition a few times?
		time.Sleep(50 * time.Millisecond)

		finalFailures = checkState(controller, finalNamespaces, finalQuotas, t, quotaActions, namespaceActions)
		if len(finalFailures) == 0 {
			break
		}
	}

	if len(finalFailures) > 0 {
		t.Logf("have %d quotas and %d namespaces", len(quotaWatch.ResultChan()), len(nsWatch.ResultChan()))
		t.Fatalf("failed on \n%v", strings.Join(finalFailures, "\n"))
	}
}
예제 #5
0
// BuildMasterConfig builds and returns the OpenShift master configuration based on the
// provided options
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) {
	client, err := etcd.EtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion}
	etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		return nil, fmt.Errorf("Error setting up server storage: %v", err)
	}

	restOptsGetter := restoptions.NewConfigGetter(options)

	clientCAs, err := configapi.GetClientCertCAPool(options)
	if err != nil {
		return nil, err
	}
	apiClientCAs, err := configapi.GetAPIClientCertCAPool(options)
	if err != nil {
		return nil, err
	}

	privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}
	privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}

	customListerWatchers := shared.DefaultListerWatcherOverrides{}
	if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil {
		return nil, err
	}
	informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute)

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	requestContextMapper := kapi.NewRequestContextMapper()

	groupStorage, err := groupstorage.NewREST(restOptsGetter)
	if err != nil {
		return nil, err
	}
	groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage))
	projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector)
	clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas())

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)

	// in-order list of plug-ins that should intercept admission decisions (origin only intercepts)
	admissionControlPluginNames := []string{
		"ProjectRequestLimit",
		"OriginNamespaceLifecycle",
		"PodNodeConstraints",
		"JenkinsBootstrapper",
		"BuildByStrategy",
		imageadmission.PluginName,
		quotaadmission.PluginName,
	}
	if len(options.AdmissionConfig.PluginOrderOverride) > 0 {
		admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride
	}

	quotaRegistry := quota.NewOriginQuotaRegistry(privilegedLoopbackOpenShiftClient)
	ruleResolver := rulevalidation.NewDefaultRuleResolver(
		informerFactory.Policies().Lister(),
		informerFactory.PolicyBindings().Lister(),
		informerFactory.ClusterPolicies().Lister().ClusterPolicies(),
		informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(),
	)
	authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage)

	pluginInitializer := oadmission.PluginInitializer{
		OpenshiftClient:       privilegedLoopbackOpenShiftClient,
		ProjectCache:          projectCache,
		OriginQuotaRegistry:   quotaRegistry,
		Authorizer:            authorizer,
		JenkinsPipelineConfig: options.JenkinsPipelineConfig,
		RESTClientConfig:      *privilegedLoopbackClientConfig,
	}

	plugins := []admission.Interface{}
	clientsetClient := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient)
	for _, pluginName := range admissionControlPluginNames {
		configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName])
		if err != nil {
			return nil, err
		}
		plugin := admission.InitPlugin(pluginName, clientsetClient, configFile)
		if plugin != nil {
			plugins = append(plugins, plugin)
		}
	}
	pluginInitializer.Initialize(plugins)
	// ensure that plugins have been properly initialized
	if err := oadmission.Validate(plugins); err != nil {
		return nil, err
	}
	admissionController := admission.NewChainHandler(plugins...)

	// TODO: look up storage by resource
	serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient)
	if err != nil {
		return nil, err
	}

	authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache)
	if err != nil {
		return nil, err
	}

	plug, plugStart := newControllerPlug(options, client)

	config := &MasterConfig{
		Options: options,

		RESTOptionsGetter: restOptsGetter,

		RuleResolver:                  ruleResolver,
		Authenticator:                 authenticator,
		Authorizer:                    authorizer,
		AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper),

		GroupCache:                    groupCache,
		ProjectAuthorizationCache:     newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory),
		ProjectCache:                  projectCache,
		ClusterQuotaMappingController: clusterQuotaMappingController,

		RequestContextMapper: requestContextMapper,

		AdmissionControl: admissionController,

		TLS: configapi.UseTLS(options.ServingInfo.ServingInfo),

		ControllerPlug:      plug,
		ControllerPlugStart: plugStart,

		ImageFor:            imageTemplate.ExpandOrDie,
		EtcdHelper:          etcdHelper,
		KubeletClientConfig: kubeletClientConfig,

		ClientCAs:    clientCAs,
		APIClientCAs: apiClientCAs,

		PluginInitializer: pluginInitializer,

		PrivilegedLoopbackClientConfig:     *privilegedLoopbackClientConfig,
		PrivilegedLoopbackOpenShiftClient:  privilegedLoopbackOpenShiftClient,
		PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient,
		Informers:                          informerFactory,
	}

	return config, nil
}
예제 #6
0
// BuildMasterConfig builds and returns the OpenShift master configuration based on the
// provided options
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) {
	client, err := etcd.EtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion}
	etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		return nil, fmt.Errorf("Error setting up server storage: %v", err)
	}

	restOptsGetter := restoptions.NewConfigGetter(options)

	clientCAs, err := configapi.GetClientCertCAPool(options)
	if err != nil {
		return nil, err
	}
	apiClientCAs, err := configapi.GetAPIClientCertCAPool(options)
	if err != nil {
		return nil, err
	}

	privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}
	privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}

	customListerWatchers := shared.DefaultListerWatcherOverrides{}
	if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil {
		return nil, err
	}
	informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute)

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	requestContextMapper := kapi.NewRequestContextMapper()

	groupStorage, err := groupstorage.NewREST(restOptsGetter)
	if err != nil {
		return nil, err
	}
	groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage))
	projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector)
	clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas())

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)

	kubeClientSet := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient)
	quotaRegistry := quota.NewAllResourceQuotaRegistry(privilegedLoopbackOpenShiftClient, kubeClientSet)
	ruleResolver := rulevalidation.NewDefaultRuleResolver(
		informerFactory.Policies().Lister(),
		informerFactory.PolicyBindings().Lister(),
		informerFactory.ClusterPolicies().Lister().ClusterPolicies(),
		informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(),
	)
	authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage)

	pluginInitializer := oadmission.PluginInitializer{
		OpenshiftClient:       privilegedLoopbackOpenShiftClient,
		ProjectCache:          projectCache,
		OriginQuotaRegistry:   quotaRegistry,
		Authorizer:            authorizer,
		JenkinsPipelineConfig: options.JenkinsPipelineConfig,
		RESTClientConfig:      *privilegedLoopbackClientConfig,
		Informers:             informerFactory,
		ClusterQuotaMapper:    clusterQuotaMappingController.GetClusterQuotaMapper(),
	}
	originAdmission, kubeAdmission, err := buildAdmissionChains(options, kubeClientSet, pluginInitializer)

	// TODO: look up storage by resource
	serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient)
	if err != nil {
		return nil, err
	}

	authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache)
	if err != nil {
		return nil, err
	}

	plug, plugStart := newControllerPlug(options, client)

	config := &MasterConfig{
		Options: options,

		RESTOptionsGetter: restOptsGetter,

		RuleResolver:                  ruleResolver,
		Authenticator:                 authenticator,
		Authorizer:                    authorizer,
		AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper),

		GroupCache:                    groupCache,
		ProjectAuthorizationCache:     newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory),
		ProjectCache:                  projectCache,
		ClusterQuotaMappingController: clusterQuotaMappingController,

		RequestContextMapper: requestContextMapper,

		AdmissionControl:     originAdmission,
		KubeAdmissionControl: kubeAdmission,

		TLS: configapi.UseTLS(options.ServingInfo.ServingInfo),

		ControllerPlug:      plug,
		ControllerPlugStart: plugStart,

		ImageFor:            imageTemplate.ExpandOrDie,
		EtcdHelper:          etcdHelper,
		KubeletClientConfig: kubeletClientConfig,

		ClientCAs:    clientCAs,
		APIClientCAs: apiClientCAs,

		PrivilegedLoopbackClientConfig:     *privilegedLoopbackClientConfig,
		PrivilegedLoopbackOpenShiftClient:  privilegedLoopbackOpenShiftClient,
		PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient,
		Informers:                          informerFactory,
	}

	return config, nil
}
예제 #7
0
// BuildMasterConfig builds and returns the OpenShift master configuration based on the
// provided options
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) {
	client, err := etcd.MakeEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}

	restOptsGetter := originrest.StorageOptions(options)

	clientCAs, err := configapi.GetClientCertCAPool(options)
	if err != nil {
		return nil, err
	}
	apiClientCAs, err := configapi.GetAPIClientCertCAPool(options)
	if err != nil {
		return nil, err
	}

	privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig, options.MasterClients.OpenShiftLoopbackClientConnectionOverrides)
	if err != nil {
		return nil, err
	}
	privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig, options.MasterClients.OpenShiftLoopbackClientConnectionOverrides)
	if err != nil {
		return nil, err
	}

	customListerWatchers := shared.DefaultListerWatcherOverrides{}
	if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil {
		return nil, err
	}
	informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute)

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	defaultRegistry := env("OPENSHIFT_DEFAULT_REGISTRY", "${DOCKER_REGISTRY_SERVICE_HOST}:${DOCKER_REGISTRY_SERVICE_PORT}")
	svcCache := service.NewServiceResolverCache(privilegedLoopbackKubeClient.Services(kapi.NamespaceDefault).Get)
	defaultRegistryFunc, err := svcCache.Defer(defaultRegistry)
	if err != nil {
		return nil, fmt.Errorf("OPENSHIFT_DEFAULT_REGISTRY variable is invalid %q: %v", defaultRegistry, err)
	}

	requestContextMapper := kapi.NewRequestContextMapper()

	groupStorage, err := groupstorage.NewREST(restOptsGetter)
	if err != nil {
		return nil, err
	}
	groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage))
	projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector)
	clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas())

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)

	kubeClientSet := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient)
	quotaRegistry := quota.NewAllResourceQuotaRegistry(privilegedLoopbackOpenShiftClient, kubeClientSet)
	ruleResolver := rulevalidation.NewDefaultRuleResolver(
		informerFactory.Policies().Lister(),
		informerFactory.PolicyBindings().Lister(),
		informerFactory.ClusterPolicies().Lister().ClusterPolicies(),
		informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(),
	)
	authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage)

	pluginInitializer := oadmission.PluginInitializer{
		OpenshiftClient:       privilegedLoopbackOpenShiftClient,
		ProjectCache:          projectCache,
		OriginQuotaRegistry:   quotaRegistry,
		Authorizer:            authorizer,
		JenkinsPipelineConfig: options.JenkinsPipelineConfig,
		RESTClientConfig:      *privilegedLoopbackClientConfig,
		Informers:             informerFactory,
		ClusterQuotaMapper:    clusterQuotaMappingController.GetClusterQuotaMapper(),
		DefaultRegistryFn:     imageapi.DefaultRegistryFunc(defaultRegistryFunc),
	}
	originAdmission, kubeAdmission, err := buildAdmissionChains(options, kubeClientSet, pluginInitializer)
	if err != nil {
		return nil, err
	}

	serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options)
	if err != nil {
		return nil, err
	}

	authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache)
	if err != nil {
		return nil, err
	}

	plug, plugStart := newControllerPlug(options, client)

	config := &MasterConfig{
		Options: options,

		RESTOptionsGetter: restOptsGetter,

		RuleResolver:                  ruleResolver,
		Authenticator:                 authenticator,
		Authorizer:                    authorizer,
		AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper),

		GroupCache:                    groupCache,
		ProjectAuthorizationCache:     newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory),
		ProjectCache:                  projectCache,
		ClusterQuotaMappingController: clusterQuotaMappingController,

		RequestContextMapper: requestContextMapper,

		AdmissionControl:     originAdmission,
		KubeAdmissionControl: kubeAdmission,

		TLS: configapi.UseTLS(options.ServingInfo.ServingInfo),

		ControllerPlug:      plug,
		ControllerPlugStart: plugStart,

		ImageFor:       imageTemplate.ExpandOrDie,
		RegistryNameFn: imageapi.DefaultRegistryFunc(defaultRegistryFunc),

		// TODO: migration of versions of resources stored in annotations must be sorted out
		ExternalVersionCodec: kapi.Codecs.LegacyCodec(unversioned.GroupVersion{Group: "", Version: "v1"}),

		KubeletClientConfig: kubeletClientConfig,

		ClientCAs:    clientCAs,
		APIClientCAs: apiClientCAs,

		PrivilegedLoopbackClientConfig:     *privilegedLoopbackClientConfig,
		PrivilegedLoopbackOpenShiftClient:  privilegedLoopbackOpenShiftClient,
		PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient,
		Informers:                          informerFactory,
	}

	// ensure that the limit range informer will be started
	informer := config.Informers.LimitRanges().Informer()
	config.LimitVerifier = imageadmission.NewLimitVerifier(imageadmission.LimitRangesForNamespaceFunc(func(ns string) ([]*kapi.LimitRange, error) {
		list, err := config.Informers.LimitRanges().Lister().LimitRanges(ns).List(labels.Everything())
		if err != nil {
			return nil, err
		}
		// the verifier must return an error
		if len(list) == 0 && len(informer.LastSyncResourceVersion()) == 0 {
			glog.V(4).Infof("LimitVerifier still waiting for ranges to load: %#v", informer)
			forbiddenErr := kapierrors.NewForbidden(unversioned.GroupResource{Resource: "limitranges"}, "", fmt.Errorf("the server is still loading limit information"))
			forbiddenErr.ErrStatus.Details.RetryAfterSeconds = 1
			return nil, forbiddenErr
		}
		return list, nil
	}))

	return config, nil
}