func TestNodeAuth(t *testing.T) { // Server config masterConfig, nodeConfig, adminKubeConfigFile, err := testserver.StartTestAllInOne() if err != nil { t.Fatalf("unexpected error: %v", err) } // Cluster admin clients and client configs adminClient, err := testutil.GetClusterAdminKubeClient(adminKubeConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } originAdminClient, err := testutil.GetClusterAdminClient(adminKubeConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } adminConfig, err := testutil.GetClusterAdminClientConfig(adminKubeConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } // Client configs for lesser users masterKubeletClientConfig := configapi.GetKubeletClientConfig(*masterConfig) anonymousConfig := clientcmd.AnonymousClientConfig(*adminConfig) badTokenConfig := clientcmd.AnonymousClientConfig(*adminConfig) badTokenConfig.BearerToken = "bad-token" bobClient, _, bobConfig, err := testutil.GetClientForUser(*adminConfig, "bob") _, _, aliceConfig, err := testutil.GetClientForUser(*adminConfig, "alice") sa1Client, _, sa1Config, err := testutil.GetClientForServiceAccount(adminClient, *adminConfig, "default", "sa1") _, _, sa2Config, err := testutil.GetClientForServiceAccount(adminClient, *adminConfig, "default", "sa2") // Grant Bob system:node-reader, which should let them read metrics and stats addBob := &policy.RoleModificationOptions{ RoleName: bootstrappolicy.NodeReaderRoleName, RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(originAdminClient), Subjects: []kapi.ObjectReference{{Kind: "User", Name: "bob"}}, } if err := addBob.AddRole(); err != nil { t.Fatalf("unexpected error: %v", err) } // Grant sa1 system:cluster-reader, which should let them read metrics and stats addSA1 := &policy.RoleModificationOptions{ RoleName: bootstrappolicy.ClusterReaderRoleName, RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(originAdminClient), Subjects: []kapi.ObjectReference{{Kind: "ServiceAccount", Namespace: "default", Name: "sa1"}}, } if err := addSA1.AddRole(); err != nil { t.Fatalf("unexpected error: %v", err) } // Wait for policy cache if err := testutil.WaitForClusterPolicyUpdate(bobClient, "get", "nodes/metrics", true); err != nil { t.Fatalf("unexpected error: %v", err) } if err := testutil.WaitForClusterPolicyUpdate(sa1Client, "get", "nodes/metrics", true); err != nil { t.Fatalf("unexpected error: %v", err) } _, nodePort, err := net.SplitHostPort(nodeConfig.ServingInfo.BindAddress) if err != nil { t.Fatalf("unexpected error: %v", err) } nodePortInt, err := strconv.ParseInt(nodePort, 0, 0) if err != nil { t.Fatalf("unexpected error: %v", err) } nodeTLS := configapi.UseTLS(nodeConfig.ServingInfo) kubeletClientConfig := func(config *kclient.Config) *kubeletclient.KubeletClientConfig { return &kubeletclient.KubeletClientConfig{ Port: uint(nodePortInt), EnableHttps: nodeTLS, TLSClientConfig: config.TLSClientConfig, BearerToken: config.BearerToken, } } testCases := map[string]struct { KubeletClientConfig *kubeletclient.KubeletClientConfig Forbidden bool NodeViewer bool NodeAdmin bool }{ "bad token": { KubeletClientConfig: kubeletClientConfig(&badTokenConfig), }, "anonymous": { KubeletClientConfig: kubeletClientConfig(&anonymousConfig), Forbidden: true, }, "cluster admin": { KubeletClientConfig: kubeletClientConfig(adminConfig), NodeAdmin: true, }, "master kubelet client": { KubeletClientConfig: masterKubeletClientConfig, NodeAdmin: true, }, "bob": { KubeletClientConfig: kubeletClientConfig(bobConfig), NodeViewer: true, }, "alice": { KubeletClientConfig: kubeletClientConfig(aliceConfig), Forbidden: true, }, "sa1": { KubeletClientConfig: kubeletClientConfig(sa1Config), NodeViewer: true, }, "sa2": { KubeletClientConfig: kubeletClientConfig(sa2Config), Forbidden: true, }, } for k, tc := range testCases { var ( // expected result for requests a viewer should be able to make viewResult int // expected result for requests an admin should be able to make (that can actually complete with a 200 in our tests) adminResultOK int // expected result for requests an admin should be able to make (that return a 404 in this test if the authn/authz layer is completed) adminResultMissing int ) switch { case tc.NodeAdmin: viewResult = http.StatusOK adminResultOK = http.StatusOK adminResultMissing = http.StatusNotFound case tc.NodeViewer: viewResult = http.StatusOK adminResultOK = http.StatusForbidden adminResultMissing = http.StatusForbidden case tc.Forbidden: viewResult = http.StatusForbidden adminResultOK = http.StatusForbidden adminResultMissing = http.StatusForbidden default: viewResult = http.StatusUnauthorized adminResultOK = http.StatusUnauthorized adminResultMissing = http.StatusUnauthorized } requests := []testRequest{ // Responses to invalid paths are the same for all users {"GET", "/", http.StatusNotFound}, {"GET", "/stats", http.StatusMovedPermanently}, // ServeMux redirects to the directory {"GET", "/logs", http.StatusMovedPermanently}, // ServeMux redirects to the directory {"GET", "/invalid", http.StatusNotFound}, // viewer requests {"GET", "/metrics", viewResult}, {"GET", "/stats/", viewResult}, {"POST", "/stats/", viewResult}, // stats requests can be POSTs which contain query options // successful admin requests {"GET", "/healthz", adminResultOK}, {"GET", "/pods", adminResultOK}, {"GET", "/logs/", adminResultOK}, // not found admin requests {"GET", "/containerLogs/mynamespace/mypod/mycontainer", adminResultMissing}, {"POST", "/exec/mynamespace/mypod/mycontainer", adminResultMissing}, {"POST", "/run/mynamespace/mypod/mycontainer", adminResultMissing}, {"POST", "/attach/mynamespace/mypod/mycontainer", adminResultMissing}, {"POST", "/portForward/mynamespace/mypod/mycontainer", adminResultMissing}, // GET is supported in origin on /exec and /attach for backwards compatibility // make sure node admin permissions are required {"GET", "/exec/mynamespace/mypod/mycontainer", adminResultMissing}, {"GET", "/attach/mynamespace/mypod/mycontainer", adminResultMissing}, } rt, err := kubeletclient.MakeTransport(tc.KubeletClientConfig) if err != nil { t.Errorf("%s: unexpected error: %v", k, err) continue } for _, r := range requests { req, err := http.NewRequest(r.Method, "https://"+nodeConfig.NodeName+":10250"+r.Path, nil) if err != nil { t.Errorf("%s: %s: unexpected error: %v", k, r.Path, err) continue } resp, err := rt.RoundTrip(req) if err != nil { t.Errorf("%s: %s: unexpected error: %v", k, r.Path, err) continue } resp.Body.Close() if resp.StatusCode != r.Result { t.Errorf("%s: %s: expected %d, got %d", k, r.Path, r.Result, resp.StatusCode) continue } } } }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(kubeClient) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: plugin := admission.InitPlugin(pluginName, kubeClient, server.AdmissionControlConfigFile) if plugin != nil { plugins = append(plugins, plugin) } } } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } // TODO you have to know every APIGroup you're enabling or upstream will panic. It's alternative to panicing is Fataling // It needs a refactor to return errors storageDestinations := master.NewStorageDestinations() // storageVersions is a map from API group to allowed versions that must be a version exposed by the REST API or it breaks. // We need to fix the upstream to stop using the storage version as a preferred api version. storageVersions := map[string]string{} enabledKubeVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupKube) enabledKubeVersionSet := sets.NewString(enabledKubeVersions...) if len(enabledKubeVersions) > 0 { databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesForLegacyGroup, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupKube, databaseStorage) storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion } enabledExtensionsVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions) if len(enabledExtensionsVersions) > 0 { groupMeta, err := kapilatest.Group(configapi.APIGroupExtensions) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } // TODO expose storage version options for api groups databaseStorage, err := master.NewEtcdStorage(etcdClient, groupMeta.InterfacesFor, groupMeta.GroupVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupExtensions, databaseStorage) storageVersions[configapi.APIGroupExtensions] = enabledExtensionsVersions[0] } m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, StorageDestinations: storageDestinations, StorageVersions: storageVersions, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, APIGroupPrefix: KubeAPIGroupPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableExp: len(enabledExtensionsVersions) > 0, DisableV1: !enabledKubeVersionSet.Has("v1"), // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, } // set for consistency -- Origin only used m.EnableExp cmserver.EnableExperimental = m.EnableExp if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP, TargetPort: util.NewIntOrStringFromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP, TargetPort: util.NewIntOrStringFromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdHelper, err := NewEtcdStorage(client, options.EtcdStorageConfig.OpenShiftStorageVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest policyCache, policyClient := newReadOnlyCacheAndClient(etcdHelper) requestContextMapper := kapi.NewRequestContextMapper() groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupstorage.NewREST(etcdHelper))) kubeletClientConfig := configapi.GetKubeletClientConfig(options) // in-order list of plug-ins that should intercept admission decisions (origin only intercepts) admissionControlPluginNames := []string{"OriginNamespaceLifecycle", "BuildByStrategy"} admissionClient := admissionControlClient(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient) admissionController := admission.NewFromPlugins(admissionClient, admissionControlPluginNames, "") serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, client) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) config := &MasterConfig{ Options: options, Authenticator: newAuthenticator(options, etcdHelper, serviceAccountTokenGetter, apiClientCAs, groupCache), Authorizer: newAuthorizer(policyClient, options.ProjectConfig.ProjectRequestMessage), AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), PolicyCache: policyCache, GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(privilegedLoopbackOpenShiftClient, privilegedLoopbackKubeClient, policyClient), RequestContextMapper: requestContextMapper, AdmissionControl: admissionController, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, EtcdClient: client, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, BuildControllerServiceAccount: bootstrappolicy.InfraBuildControllerServiceAccountName, DeploymentControllerServiceAccount: bootstrappolicy.InfraDeploymentControllerServiceAccountName, ReplicationControllerServiceAccount: bootstrappolicy.InfraReplicationControllerServiceAccountName, } return config, nil }
// BuildMasterConfig builds and returns the OpenShift master configuration based on the // provided options func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion} etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest policyCache, policyClient := newReadOnlyCacheAndClient(etcdHelper) requestContextMapper := kapi.NewRequestContextMapper() groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupstorage.NewREST(etcdHelper))) projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector) kubeletClientConfig := configapi.GetKubeletClientConfig(options) // in-order list of plug-ins that should intercept admission decisions (origin only intercepts) admissionControlPluginNames := []string{"OriginNamespaceLifecycle", "BuildByStrategy"} if len(options.AdmissionConfig.PluginOrderOverride) > 0 { admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride } pluginInitializer := oadmission.PluginInitializer{ OpenshiftClient: privilegedLoopbackOpenShiftClient, ProjectCache: projectCache, } plugins := []admission.Interface{} for _, pluginName := range admissionControlPluginNames { configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName]) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, privilegedLoopbackKubeClient, configFile) if plugin != nil { plugins = append(plugins, plugin) } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) authorizer := newAuthorizer(policyClient, options.ProjectConfig.ProjectRequestMessage) config := &MasterConfig{ Options: options, Authenticator: newAuthenticator(options, etcdHelper, serviceAccountTokenGetter, apiClientCAs, groupCache), Authorizer: authorizer, AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), PolicyCache: policyCache, GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, policyClient), ProjectCache: projectCache, RequestContextMapper: requestContextMapper, AdmissionControl: admissionController, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, } return config, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesFor, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(kubeClient) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: plugin := admission.InitPlugin(pluginName, kubeClient, server.AdmissionControlConfigFile) if plugin != nil { plugins = append(plugins, plugin) } } } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, DatabaseStorage: databaseStorage, ExpDatabaseStorage: databaseStorage, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableV1Beta3: configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1beta3"), DisableV1: !configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1"), // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
// BuildMasterConfig builds and returns the OpenShift master configuration based on the // provided options func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion} etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } restOptsGetter := restoptions.NewConfigGetter(options) clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } customListerWatchers := shared.DefaultListerWatcherOverrides{} if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil { return nil, err } informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute) imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest requestContextMapper := kapi.NewRequestContextMapper() groupStorage, err := groupstorage.NewREST(restOptsGetter) if err != nil { return nil, err } groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage)) projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector) clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()) kubeletClientConfig := configapi.GetKubeletClientConfig(options) // in-order list of plug-ins that should intercept admission decisions (origin only intercepts) admissionControlPluginNames := []string{ "ProjectRequestLimit", "OriginNamespaceLifecycle", "PodNodeConstraints", "JenkinsBootstrapper", "BuildByStrategy", imageadmission.PluginName, quotaadmission.PluginName, } if len(options.AdmissionConfig.PluginOrderOverride) > 0 { admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride } quotaRegistry := quota.NewOriginQuotaRegistry(privilegedLoopbackOpenShiftClient) ruleResolver := rulevalidation.NewDefaultRuleResolver( informerFactory.Policies().Lister(), informerFactory.PolicyBindings().Lister(), informerFactory.ClusterPolicies().Lister().ClusterPolicies(), informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(), ) authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage) pluginInitializer := oadmission.PluginInitializer{ OpenshiftClient: privilegedLoopbackOpenShiftClient, ProjectCache: projectCache, OriginQuotaRegistry: quotaRegistry, Authorizer: authorizer, JenkinsPipelineConfig: options.JenkinsPipelineConfig, RESTClientConfig: *privilegedLoopbackClientConfig, } plugins := []admission.Interface{} clientsetClient := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient) for _, pluginName := range admissionControlPluginNames { configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName]) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, clientsetClient, configFile) if plugin != nil { plugins = append(plugins, plugin) } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) // TODO: look up storage by resource serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient) if err != nil { return nil, err } authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) config := &MasterConfig{ Options: options, RESTOptionsGetter: restOptsGetter, RuleResolver: ruleResolver, Authenticator: authenticator, Authorizer: authorizer, AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory), ProjectCache: projectCache, ClusterQuotaMappingController: clusterQuotaMappingController, RequestContextMapper: requestContextMapper, AdmissionControl: admissionController, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PluginInitializer: pluginInitializer, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, Informers: informerFactory, } return config, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, pluginInitializer oadmission.PluginInitializer) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } // Defaults are tested in TestAPIServerDefaults server := apiserveroptions.NewAPIServer() // Adjust defaults server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") server.EnableLogsSupport = false // don't expose server logs // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } if len(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride) > 0 { server.AdmissionControl = strings.Join(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride, ",") } // Defaults are tested in TestCMServerDefaults cmserver := cmapp.NewCMServer() // Adjust defaults cmserver.Address = "" // no healthz endpoint cmserver.Port = 0 // no healthz endpoint cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout} // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case serviceadmit.ExternalIPPluginName: // this needs to be moved upstream to be part of core config reject, admit, err := serviceadmit.ParseCIDRRules(options.NetworkConfig.ExternalIPNetworkCIDRs) if err != nil { // should have been caught with validation return nil, err } plugins = append(plugins, serviceadmit.NewExternalIPRanger(reject, admit)) case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(internalclientset.FromUnversionedClient(kubeClient)) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: configFile, err := pluginconfig.GetPluginConfigFile(options.KubernetesMasterConfig.AdmissionConfig.PluginConfig, pluginName, server.AdmissionControlConfigFile) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, internalclientset.FromUnversionedClient(kubeClient), configFile) if plugin != nil { plugins = append(plugins, plugin) } } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } // TODO you have to know every APIGroup you're enabling or upstream will panic. It's alternative to panicing is Fataling // It needs a refactor to return errors storageDestinations := genericapiserver.NewStorageDestinations() // storageVersions is a map from API group to allowed versions that must be a version exposed by the REST API or it breaks. // We need to fix the upstream to stop using the storage version as a preferred api version. storageVersions := map[string]string{} enabledKubeVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupKube) if len(enabledKubeVersions) > 0 { kubeStorageVersion := unversioned.GroupVersion{Group: configapi.APIGroupKube, Version: options.EtcdStorageConfig.KubernetesStorageVersion} databaseStorage, err := NewEtcdStorage(etcdClient, kubeStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupKube, databaseStorage) storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion } // enable this if extensions API is enabled (or batch or autoscaling, since they persist to extensions/v1beta1 for now) // TODO: replace this with a loop over configured storage versions extensionsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions)) > 0 batchEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupBatch)) > 0 autoscalingEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupAutoscaling)) > 0 if extensionsEnabled || autoscalingEnabled || batchEnabled { // TODO: replace this with a configured storage version for extensions once configuration exposes this extensionsStorageVersion := unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"} databaseStorage, err := NewEtcdStorage(etcdClient, extensionsStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupExtensions, databaseStorage) storageVersions[configapi.APIGroupExtensions] = extensionsStorageVersion.String() } // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := knet.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ Config: &genericapiserver.Config{ PublicAddress: publicAddress, ReadWritePort: port, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, StorageDestinations: storageDestinations, StorageVersions: storageVersions, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, APIGroupVersionOverrides: getAPIGroupVersionOverrides(options), APIPrefix: KubeAPIPrefix, APIGroupPrefix: KubeAPIGroupPrefix, MasterCount: options.KubernetesMasterConfig.MasterCount, // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, Serializer: kapi.Codecs, }, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, KubeletClient: kubeletClient, EnableCoreControllers: true, } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
// BuildMasterConfig builds and returns the OpenShift master configuration based on the // provided options func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion} etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } restOptsGetter := restoptions.NewConfigGetter(options) clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } customListerWatchers := shared.DefaultListerWatcherOverrides{} if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil { return nil, err } informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute) imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest requestContextMapper := kapi.NewRequestContextMapper() groupStorage, err := groupstorage.NewREST(restOptsGetter) if err != nil { return nil, err } groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage)) projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector) clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()) kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeClientSet := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient) quotaRegistry := quota.NewAllResourceQuotaRegistry(privilegedLoopbackOpenShiftClient, kubeClientSet) ruleResolver := rulevalidation.NewDefaultRuleResolver( informerFactory.Policies().Lister(), informerFactory.PolicyBindings().Lister(), informerFactory.ClusterPolicies().Lister().ClusterPolicies(), informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(), ) authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage) pluginInitializer := oadmission.PluginInitializer{ OpenshiftClient: privilegedLoopbackOpenShiftClient, ProjectCache: projectCache, OriginQuotaRegistry: quotaRegistry, Authorizer: authorizer, JenkinsPipelineConfig: options.JenkinsPipelineConfig, RESTClientConfig: *privilegedLoopbackClientConfig, Informers: informerFactory, ClusterQuotaMapper: clusterQuotaMappingController.GetClusterQuotaMapper(), } originAdmission, kubeAdmission, err := buildAdmissionChains(options, kubeClientSet, pluginInitializer) // TODO: look up storage by resource serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient) if err != nil { return nil, err } authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) config := &MasterConfig{ Options: options, RESTOptionsGetter: restOptsGetter, RuleResolver: ruleResolver, Authenticator: authenticator, Authorizer: authorizer, AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory), ProjectCache: projectCache, ClusterQuotaMappingController: clusterQuotaMappingController, RequestContextMapper: requestContextMapper, AdmissionControl: originAdmission, KubeAdmissionControl: kubeAdmission, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, Informers: informerFactory, } return config, nil }
// BuildMasterConfig builds and returns the OpenShift master configuration based on the // provided options func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.MakeEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } restOptsGetter := originrest.StorageOptions(options) clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig, options.MasterClients.OpenShiftLoopbackClientConnectionOverrides) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig, options.MasterClients.OpenShiftLoopbackClientConnectionOverrides) if err != nil { return nil, err } customListerWatchers := shared.DefaultListerWatcherOverrides{} if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil { return nil, err } informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute) imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest defaultRegistry := env("OPENSHIFT_DEFAULT_REGISTRY", "${DOCKER_REGISTRY_SERVICE_HOST}:${DOCKER_REGISTRY_SERVICE_PORT}") svcCache := service.NewServiceResolverCache(privilegedLoopbackKubeClient.Services(kapi.NamespaceDefault).Get) defaultRegistryFunc, err := svcCache.Defer(defaultRegistry) if err != nil { return nil, fmt.Errorf("OPENSHIFT_DEFAULT_REGISTRY variable is invalid %q: %v", defaultRegistry, err) } requestContextMapper := kapi.NewRequestContextMapper() groupStorage, err := groupstorage.NewREST(restOptsGetter) if err != nil { return nil, err } groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage)) projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector) clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()) kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeClientSet := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient) quotaRegistry := quota.NewAllResourceQuotaRegistry(privilegedLoopbackOpenShiftClient, kubeClientSet) ruleResolver := rulevalidation.NewDefaultRuleResolver( informerFactory.Policies().Lister(), informerFactory.PolicyBindings().Lister(), informerFactory.ClusterPolicies().Lister().ClusterPolicies(), informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(), ) authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage) pluginInitializer := oadmission.PluginInitializer{ OpenshiftClient: privilegedLoopbackOpenShiftClient, ProjectCache: projectCache, OriginQuotaRegistry: quotaRegistry, Authorizer: authorizer, JenkinsPipelineConfig: options.JenkinsPipelineConfig, RESTClientConfig: *privilegedLoopbackClientConfig, Informers: informerFactory, ClusterQuotaMapper: clusterQuotaMappingController.GetClusterQuotaMapper(), DefaultRegistryFn: imageapi.DefaultRegistryFunc(defaultRegistryFunc), } originAdmission, kubeAdmission, err := buildAdmissionChains(options, kubeClientSet, pluginInitializer) if err != nil { return nil, err } serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options) if err != nil { return nil, err } authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) config := &MasterConfig{ Options: options, RESTOptionsGetter: restOptsGetter, RuleResolver: ruleResolver, Authenticator: authenticator, Authorizer: authorizer, AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory), ProjectCache: projectCache, ClusterQuotaMappingController: clusterQuotaMappingController, RequestContextMapper: requestContextMapper, AdmissionControl: originAdmission, KubeAdmissionControl: kubeAdmission, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, RegistryNameFn: imageapi.DefaultRegistryFunc(defaultRegistryFunc), // TODO: migration of versions of resources stored in annotations must be sorted out ExternalVersionCodec: kapi.Codecs.LegacyCodec(unversioned.GroupVersion{Group: "", Version: "v1"}), KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, Informers: informerFactory, } // ensure that the limit range informer will be started informer := config.Informers.LimitRanges().Informer() config.LimitVerifier = imageadmission.NewLimitVerifier(imageadmission.LimitRangesForNamespaceFunc(func(ns string) ([]*kapi.LimitRange, error) { list, err := config.Informers.LimitRanges().Lister().LimitRanges(ns).List(labels.Everything()) if err != nil { return nil, err } // the verifier must return an error if len(list) == 0 && len(informer.LastSyncResourceVersion()) == 0 { glog.V(4).Infof("LimitVerifier still waiting for ranges to load: %#v", informer) forbiddenErr := kapierrors.NewForbidden(unversioned.GroupResource{Resource: "limitranges"}, "", fmt.Errorf("the server is still loading limit information")) forbiddenErr.ErrStatus.Details.RetryAfterSeconds = 1 return nil, forbiddenErr } return list, nil })) return config, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, informers shared.InformerFactory, admissionControl admission.Interface, originAuthenticator authenticator.Request) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } // Defaults are tested in TestCMServerDefaults cmserver := cmapp.NewCMServer() // Adjust defaults cmserver.Address = "" // no healthz endpoint cmserver.Port = 0 // no healthz endpoint cmserver.EnableGarbageCollector = false // disabled until we add the controller cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout} cmserver.VolumeConfiguration.EnableDynamicProvisioning = options.VolumeConfig.DynamicProvisioningEnabled // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors schedulerserver := scheduleroptions.NewSchedulerServer() schedulerserver.PolicyConfigFile = options.KubernetesMasterConfig.SchedulerConfigFile if err := cmdflags.Resolve(options.KubernetesMasterConfig.SchedulerArguments, schedulerserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", cmserver.CloudProvider, cmserver.CloudConfigFile) } var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } server, storageFactory, err := BuildDefaultAPIServer(options) if err != nil { return nil, err } // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := knet.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ Config: &genericapiserver.Config{ PublicAddress: publicAddress, ReadWritePort: server.SecurePort, Authenticator: originAuthenticator, // this is used to fulfill the tokenreviews endpoint which is used by node authentication Authorizer: authorizer.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionControl, StorageFactory: storageFactory, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, APIResourceConfigSource: getAPIResourceConfig(options), APIPrefix: server.APIPrefix, APIGroupPrefix: server.APIGroupPrefix, MasterCount: server.MasterCount, // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, Serializer: kapi.Codecs, EnableLogsSupport: server.EnableLogsSupport, EnableProfiling: server.EnableProfiling, EnableWatchCache: server.EnableWatchCache, MasterServiceNamespace: server.MasterServiceNamespace, ExternalHost: server.ExternalHost, MinRequestTimeout: server.MinRequestTimeout, KubernetesServiceNodePort: server.KubernetesServiceNodePort, }, EventTTL: server.EventTTL, KubeletClient: kubeletClient, EnableCoreControllers: true, DeleteCollectionWorkers: server.DeleteCollectionWorkers, } if server.EnableWatchCache { cachesize.SetWatchCacheSizes(server.WatchCacheSizes) } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: int32(dnsPort), Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: int32(dnsPort), Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, SchedulerServer: schedulerserver, Informers: informers, } return kmaster, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.GetAndTestEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesFor, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = util.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join([]string{ "NamespaceExists", "NamespaceLifecycle", "OriginPodNodeEnvironment", "LimitRanger", "ServiceAccount", "SecurityContextConstraint", "ResourceQuota", }, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } admissionController := admission.NewFromPlugins(kubeClient, strings.Split(server.AdmissionControl, ","), server.AdmissionControlConfigFile) m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, DatabaseStorage: databaseStorage, ExpDatabaseStorage: databaseStorage, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableV1Beta3: configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1beta3"), DisableV1: !configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1"), } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, informers shared.InformerFactory, pluginInitializer oadmission.PluginInitializer) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } // Defaults are tested in TestAPIServerDefaults server := apiserveroptions.NewAPIServer() // Adjust defaults server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") server.EnableLogsSupport = false // don't expose server logs server.EnableProfiling = false server.APIPrefix = KubeAPIPrefix server.APIGroupPrefix = KubeAPIGroupPrefix server.SecurePort = port server.MasterCount = options.KubernetesMasterConfig.MasterCount // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } if len(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride) > 0 { server.AdmissionControl = strings.Join(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride, ",") } // Defaults are tested in TestCMServerDefaults cmserver := cmapp.NewCMServer() // Adjust defaults cmserver.Address = "" // no healthz endpoint cmserver.Port = 0 // no healthz endpoint cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout} cmserver.VolumeConfiguration.EnableDynamicProvisioning = options.VolumeConfig.DynamicProvisioningEnabled // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case lifecycle.PluginName: // We need to include our infrastructure and shared resource namespaces in the immortal namespaces list immortalNamespaces := sets.NewString(kapi.NamespaceDefault) if len(options.PolicyConfig.OpenShiftSharedResourcesNamespace) > 0 { immortalNamespaces.Insert(options.PolicyConfig.OpenShiftSharedResourcesNamespace) } if len(options.PolicyConfig.OpenShiftInfrastructureNamespace) > 0 { immortalNamespaces.Insert(options.PolicyConfig.OpenShiftInfrastructureNamespace) } plugins = append(plugins, lifecycle.NewLifecycle(clientadapter.FromUnversionedClient(kubeClient), immortalNamespaces)) case serviceadmit.ExternalIPPluginName: // this needs to be moved upstream to be part of core config reject, admit, err := serviceadmit.ParseCIDRRules(options.NetworkConfig.ExternalIPNetworkCIDRs) if err != nil { // should have been caught with validation return nil, err } plugins = append(plugins, serviceadmit.NewExternalIPRanger(reject, admit)) case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(clientadapter.FromUnversionedClient(kubeClient)) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: configFile, err := pluginconfig.GetPluginConfigFile(options.KubernetesMasterConfig.AdmissionConfig.PluginConfig, pluginName, server.AdmissionControlConfigFile) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, clientadapter.FromUnversionedClient(kubeClient), configFile) if plugin != nil { plugins = append(plugins, plugin) } } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } resourceEncodingConfig := genericapiserver.NewDefaultResourceEncodingConfig() resourceEncodingConfig.SetVersionEncoding( kapi.GroupName, unversioned.GroupVersion{Group: kapi.GroupName, Version: options.EtcdStorageConfig.KubernetesStorageVersion}, kapi.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( extensions.GroupName, unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"}, extensions.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( batch.GroupName, unversioned.GroupVersion{Group: batch.GroupName, Version: "v1"}, batch.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( autoscaling.GroupName, unversioned.GroupVersion{Group: autoscaling.GroupName, Version: "v1"}, autoscaling.SchemeGroupVersion, ) etcdConfig := storagebackend.Config{ Prefix: options.EtcdStorageConfig.KubernetesStoragePrefix, ServerList: options.EtcdClientInfo.URLs, KeyFile: options.EtcdClientInfo.ClientCert.KeyFile, CertFile: options.EtcdClientInfo.ClientCert.CertFile, CAFile: options.EtcdClientInfo.CA, DeserializationCacheSize: genericapiserveroptions.DefaultDeserializationCacheSize, } storageFactory := genericapiserver.NewDefaultStorageFactory(etcdConfig, "", kapi.Codecs, resourceEncodingConfig, master.DefaultAPIResourceConfigSource()) // the order here is important, it defines which version will be used for storage storageFactory.AddCohabitatingResources(extensions.Resource("jobs"), batch.Resource("jobs")) storageFactory.AddCohabitatingResources(extensions.Resource("horizontalpodautoscalers"), autoscaling.Resource("horizontalpodautoscalers")) // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := knet.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ Config: &genericapiserver.Config{ PublicAddress: publicAddress, ReadWritePort: port, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, StorageFactory: storageFactory, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, APIResourceConfigSource: getAPIResourceConfig(options), APIPrefix: server.APIPrefix, APIGroupPrefix: server.APIGroupPrefix, MasterCount: server.MasterCount, // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, Serializer: kapi.Codecs, EnableLogsSupport: server.EnableLogsSupport, EnableProfiling: server.EnableProfiling, EnableWatchCache: server.EnableWatchCache, MasterServiceNamespace: server.MasterServiceNamespace, ExternalHost: server.ExternalHost, MinRequestTimeout: server.MinRequestTimeout, KubernetesServiceNodePort: server.KubernetesServiceNodePort, }, EventTTL: server.EventTTL, KubeletClient: kubeletClient, EnableCoreControllers: true, DeleteCollectionWorkers: server.DeleteCollectionWorkers, } if server.EnableWatchCache { cachesize.SetWatchCacheSizes(server.WatchCacheSizes) } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: int32(dnsPort), Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: int32(dnsPort), Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, Informers: informers, } return kmaster, nil }