// createTestingNS delegates to custom namespace creation functions if registered. // otherwise, it ensures that kubernetes e2e tests have their service accounts in the privileged and anyuid SCCs func createTestingNS(baseName string, c *kclient.Client, labels map[string]string) (*kapi.Namespace, error) { // If a custom function exists, call it if fn, exists := customCreateTestingNSFuncs[baseName]; exists { return fn(baseName, c, labels) } // Otherwise use the upstream default ns, err := e2e.CreateTestingNS(baseName, c, labels) if err != nil { return ns, err } // Add anyuid and privileged permissions for upstream tests if isKubernetesE2ETest() && !skipTestNamespaceCustomization() { e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged") // add to the "privileged" scc to ensure pods that explicitly // request extra capabilities are not rejected addE2EServiceAccountsToSCC(c, []kapi.Namespace{*ns}, "privileged") // add to the "anyuid" scc to ensure pods that don't specify a // uid don't get forced into a range (mimics upstream // behavior) addE2EServiceAccountsToSCC(c, []kapi.Namespace{*ns}, "anyuid") // The intra-pod test requires that the service account have // permission to retrieve service endpoints. osClient, _, err := configapi.GetOpenShiftClient(KubeConfigPath(), nil) if err != nil { return ns, err } addRoleToE2EServiceAccounts(osClient, []kapi.Namespace{*ns}, bootstrappolicy.ViewRoleName) } return ns, err }
func RunSDNController(config *kubernetes.NodeConfig, nodeConfig configapi.NodeConfig) kubernetes.FilteringEndpointsConfigHandler { oclient, _, err := configapi.GetOpenShiftClient(nodeConfig.MasterKubeConfig) if err != nil { glog.Fatal("Failed to get kube client for SDN") } ch := make(chan struct{}) controller, endpointFilter, err := factory.NewPlugin(nodeConfig.NetworkConfig.NetworkPluginName, oclient, config.Client, nodeConfig.NodeName, nodeConfig.NodeIP, ch) if err != nil { glog.Fatalf("SDN initialization failed: %v", err) } if controller != nil { config.KubeletConfig.StartUpdates = ch config.KubeletConfig.NetworkPlugins = append(config.KubeletConfig.NetworkPlugins, controller) go func() { err := controller.StartNode(nodeConfig.NetworkConfig.MTU) if err != nil { glog.Fatalf("SDN Node failed: %v", err) } }() } return endpointFilter }
func RunSDNController(config *kubernetes.NodeConfig, nodeConfig configapi.NodeConfig) { if nodeConfig.NetworkPluginName != osdn.NetworkPluginName() { return } oclient, _, err := configapi.GetOpenShiftClient(nodeConfig.MasterKubeConfig) if err != nil { glog.Fatal("Failed to get kube client for SDN") } ch := make(chan struct{}) config.KubeletConfig.StartUpdates = ch go osdn.Node(oclient, config.Client, nodeConfig.NodeName, "", ch) }
func RunSDNController(config *kubernetes.NodeConfig, nodeConfig configapi.NodeConfig) { oclient, _, err := configapi.GetOpenShiftClient(nodeConfig.MasterKubeConfig) if err != nil { glog.Fatal("Failed to get kube client for SDN") } switch nodeConfig.NetworkPluginName { case flatsdn.NetworkPluginName(): ch := make(chan struct{}) config.KubeletConfig.StartUpdates = ch go flatsdn.Node(oclient, config.Client, nodeConfig.NodeName, "", ch) case multitenant.NetworkPluginName(): ch := make(chan struct{}) config.KubeletConfig.StartUpdates = ch plugin := multitenant.GetKubeNetworkPlugin() config.KubeletConfig.NetworkPlugins = append(config.KubeletConfig.NetworkPlugins, plugin) go multitenant.Node(oclient, config.Client, nodeConfig.NodeName, "", ch, plugin) } }
func RunSDNController(config *kubernetes.NodeConfig, nodeConfig configapi.NodeConfig) kubernetes.FilteringEndpointsConfigHandler { oclient, _, err := configapi.GetOpenShiftClient(nodeConfig.MasterKubeConfig) if err != nil { glog.Fatal("Failed to get kube client for SDN") } registry := osdn.NewOsdnRegistryInterface(oclient, config.Client) switch nodeConfig.NetworkConfig.NetworkPluginName { case flatsdn.NetworkPluginName(): ch := make(chan struct{}) config.KubeletConfig.StartUpdates = ch go flatsdn.Node(registry, nodeConfig.NodeName, nodeConfig.NodeIP, ch, nodeConfig.NetworkConfig.MTU) case multitenant.NetworkPluginName(): ch := make(chan struct{}) config.KubeletConfig.StartUpdates = ch plugin := multitenant.GetKubeNetworkPlugin() config.KubeletConfig.NetworkPlugins = append(config.KubeletConfig.NetworkPlugins, plugin) go multitenant.Node(registry, nodeConfig.NodeName, nodeConfig.NodeIP, ch, plugin, nodeConfig.NetworkConfig.MTU) return registry } return nil }
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdHelper, err := NewEtcdStorage(client, options.EtcdStorageConfig.OpenShiftStorageVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest policyCache, policyClient := newReadOnlyCacheAndClient(etcdHelper) requestContextMapper := kapi.NewRequestContextMapper() groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupstorage.NewREST(etcdHelper))) kubeletClientConfig := configapi.GetKubeletClientConfig(options) // in-order list of plug-ins that should intercept admission decisions (origin only intercepts) admissionControlPluginNames := []string{"OriginNamespaceLifecycle", "BuildByStrategy"} admissionClient := admissionControlClient(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient) admissionController := admission.NewFromPlugins(admissionClient, admissionControlPluginNames, "") serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, client) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) config := &MasterConfig{ Options: options, Authenticator: newAuthenticator(options, etcdHelper, serviceAccountTokenGetter, apiClientCAs, groupCache), Authorizer: newAuthorizer(policyClient, options.ProjectConfig.ProjectRequestMessage), AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), PolicyCache: policyCache, GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(privilegedLoopbackOpenShiftClient, privilegedLoopbackKubeClient, policyClient), RequestContextMapper: requestContextMapper, AdmissionControl: admissionController, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, EtcdClient: client, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, BuildControllerServiceAccount: bootstrappolicy.InfraBuildControllerServiceAccountName, DeploymentControllerServiceAccount: bootstrappolicy.InfraDeploymentControllerServiceAccountName, ReplicationControllerServiceAccount: bootstrappolicy.InfraReplicationControllerServiceAccountName, } return config, nil }
// BuildMasterConfig builds and returns the OpenShift master configuration based on the // provided options func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion} etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest policyCache, policyClient := newReadOnlyCacheAndClient(etcdHelper) requestContextMapper := kapi.NewRequestContextMapper() groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupstorage.NewREST(etcdHelper))) projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector) kubeletClientConfig := configapi.GetKubeletClientConfig(options) // in-order list of plug-ins that should intercept admission decisions (origin only intercepts) admissionControlPluginNames := []string{"OriginNamespaceLifecycle", "BuildByStrategy"} if len(options.AdmissionConfig.PluginOrderOverride) > 0 { admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride } pluginInitializer := oadmission.PluginInitializer{ OpenshiftClient: privilegedLoopbackOpenShiftClient, ProjectCache: projectCache, } plugins := []admission.Interface{} for _, pluginName := range admissionControlPluginNames { configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName]) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, privilegedLoopbackKubeClient, configFile) if plugin != nil { plugins = append(plugins, plugin) } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) authorizer := newAuthorizer(policyClient, options.ProjectConfig.ProjectRequestMessage) config := &MasterConfig{ Options: options, Authenticator: newAuthenticator(options, etcdHelper, serviceAccountTokenGetter, apiClientCAs, groupCache), Authorizer: authorizer, AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), PolicyCache: policyCache, GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, policyClient), ProjectCache: projectCache, RequestContextMapper: requestContextMapper, AdmissionControl: admissionController, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, } return config, nil }
// BuildMasterConfig builds and returns the OpenShift master configuration based on the // provided options func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion} etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } restOptsGetter := restoptions.NewConfigGetter(options) clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } customListerWatchers := shared.DefaultListerWatcherOverrides{} if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil { return nil, err } informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute) imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest requestContextMapper := kapi.NewRequestContextMapper() groupStorage, err := groupstorage.NewREST(restOptsGetter) if err != nil { return nil, err } groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage)) projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector) clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()) kubeletClientConfig := configapi.GetKubeletClientConfig(options) // in-order list of plug-ins that should intercept admission decisions (origin only intercepts) admissionControlPluginNames := []string{ "ProjectRequestLimit", "OriginNamespaceLifecycle", "PodNodeConstraints", "JenkinsBootstrapper", "BuildByStrategy", imageadmission.PluginName, quotaadmission.PluginName, } if len(options.AdmissionConfig.PluginOrderOverride) > 0 { admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride } quotaRegistry := quota.NewOriginQuotaRegistry(privilegedLoopbackOpenShiftClient) ruleResolver := rulevalidation.NewDefaultRuleResolver( informerFactory.Policies().Lister(), informerFactory.PolicyBindings().Lister(), informerFactory.ClusterPolicies().Lister().ClusterPolicies(), informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(), ) authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage) pluginInitializer := oadmission.PluginInitializer{ OpenshiftClient: privilegedLoopbackOpenShiftClient, ProjectCache: projectCache, OriginQuotaRegistry: quotaRegistry, Authorizer: authorizer, JenkinsPipelineConfig: options.JenkinsPipelineConfig, RESTClientConfig: *privilegedLoopbackClientConfig, } plugins := []admission.Interface{} clientsetClient := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient) for _, pluginName := range admissionControlPluginNames { configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName]) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, clientsetClient, configFile) if plugin != nil { plugins = append(plugins, plugin) } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) // TODO: look up storage by resource serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient) if err != nil { return nil, err } authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) config := &MasterConfig{ Options: options, RESTOptionsGetter: restOptsGetter, RuleResolver: ruleResolver, Authenticator: authenticator, Authorizer: authorizer, AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory), ProjectCache: projectCache, ClusterQuotaMappingController: clusterQuotaMappingController, RequestContextMapper: requestContextMapper, AdmissionControl: admissionController, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PluginInitializer: pluginInitializer, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, Informers: informerFactory, } return config, nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig, enableProxy, enableDNS bool) (*NodeConfig, error) { originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides) if err != nil { return nil, err } _, kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides) if err != nil { return nil, err } // Make a separate client for event reporting, to avoid event QPS blocking node calls _, eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } clientCAs, err := kcrypto.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } if err = validateNetworkPluginName(originClient, options.NetworkConfig.NetworkPluginName); err != nil { return nil, err } // Defaults are tested in TestKubeletDefaults server := kubeletoptions.NewKubeletServer() // Adjust defaults server.RequireKubeConfig = true server.PodManifestPath = path server.RootDirectory = options.VolumeDirectory server.NodeIP = options.NodeIP server.HostnameOverride = options.NodeName server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddressStr server.Port = int32(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.HealthzBindAddress = "" // no unsecured healthz access server.ClusterDNS = options.DNSIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource} server.HostPIDSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource} server.HostIPCSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource} server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second} server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default server.MaxPods = 250 server.PodsPerCore = 10 server.SerializeImagePulls = false // disable serialized image pulls by default server.EnableControllerAttachDetach = false // stay consistent with existing config, but admins should enable it if enableDNS { // if we are running local DNS, skydns will load the default recursive nameservers for us server.ResolverConfig = "" } server.DockerExecHandlerName = string(options.DockerConfig.ExecHandlerName) if sdnapi.IsOpenShiftNetworkPlugin(server.NetworkPluginName) { // set defaults for openshift-sdn server.HairpinMode = componentconfig.HairpinNone server.ConfigureCBR0 = false } // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true" server.Containerized = containerized // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } proxyconfig, err := buildKubeProxyConfig(options) if err != nil { return nil, err } // Initialize SDN before building kubelet config so it can modify options iptablesSyncPeriod, err := time.ParseDuration(options.IPTablesSyncPeriod) if err != nil { return nil, fmt.Errorf("Cannot parse the provided ip-tables sync period (%s) : %v", options.IPTablesSyncPeriod, err) } sdnPlugin, err := sdnplugin.NewNodePlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP, iptablesSyncPeriod, options.NetworkConfig.MTU) if err != nil { return nil, fmt.Errorf("SDN initialization failed: %v", err) } if sdnPlugin != nil { // SDN plugin pod setup/teardown is implemented as a CNI plugin server.NetworkPluginName = kubeletcni.CNIPluginName server.NetworkPluginDir = kubeletcni.DefaultNetDir server.HairpinMode = componentconfig.HairpinNone server.ConfigureCBR0 = false } deps, err := kubeletapp.UnsecuredKubeletDeps(server) if err != nil { return nil, err } // Initialize cloud provider cloud, err := buildCloudProvider(server) if err != nil { return nil, err } deps.Cloud = cloud // Replace the kubelet-created CNI plugin with the SDN plugin // Kubelet must be initialized with NetworkPluginName="cni" but // the SDN plugin (if available) needs to be the only one used if sdnPlugin != nil { deps.NetworkPlugins = []kubeletnetwork.NetworkPlugin{sdnPlugin} } // provide any config overrides //deps.NodeName = options.NodeName deps.KubeClient = kubeClient deps.EventClient = eventClient // Setup auth authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(kubeClient.Authentication(), clientCAs, authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(originClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } deps.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz) // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } deps.TLSOptions = &kubeletserver.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { deps.TLSOptions = nil } sdnProxy, err := sdnplugin.NewProxyPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient) if err != nil { return nil, fmt.Errorf("SDN proxy initialization failed: %v", err) } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Containerized: containerized, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletDeps: deps, ServicesReady: make(chan struct{}), ProxyConfig: proxyconfig, EnableUnidling: options.EnableUnidling, SDNPlugin: sdnPlugin, SDNProxy: sdnProxy, } if enableDNS { dnsConfig, err := dns.NewServerDefaults() if err != nil { return nil, fmt.Errorf("DNS configuration was not possible: %v", err) } if len(options.DNSIP) > 0 { dnsConfig.DnsAddr = options.DNSIP + ":53" } dnsConfig.Domain = server.ClusterDomain + "." dnsConfig.Local = "openshift.default.svc." + dnsConfig.Domain services, serviceStore := dns.NewCachedServiceAccessorAndStore() endpoints, endpointsStore := dns.NewCachedEndpointsAccessorAndStore() if !enableProxy { endpoints = deps.KubeClient endpointsStore = nil } // TODO: use kubeletConfig.ResolverConfig as an argument to etcd in the event the // user sets it, instead of passing it to the kubelet. config.ServiceStore = serviceStore config.EndpointsStore = endpointsStore config.DNSServer = &dns.Server{ Config: dnsConfig, Services: services, Endpoints: endpoints, MetricsName: "node", } } return config, nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) { kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } var dnsIP net.IP if len(options.DNSIP) > 0 { dnsIP = net.ParseIP(options.DNSIP) if dnsIP == nil { return nil, fmt.Errorf("Invalid DNS IP: %s", options.DNSIP) } } clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } var dockerExecHandler dockertools.ExecHandler switch options.DockerConfig.ExecHandlerName { case configapi.DockerExecHandlerNative: dockerExecHandler = &dockertools.NativeExecHandler{} case configapi.DockerExecHandlerNsenter: dockerExecHandler = &dockertools.NsenterExecHandler{} } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } kubeAddress := net.ParseIP(kubeAddressStr) if kubeAddress == nil { return nil, fmt.Errorf("Invalid DNS IP: %s", kubeAddressStr) } // declare the OpenShift defaults from config server := kapp.NewKubeletServer() server.Config = path server.RootDirectory = options.VolumeDirectory // kubelet finds the node IP address by doing net.ParseIP(hostname) and if that fails, // it does net.LookupIP(NodeName) and picks the first non-loopback address. // Pass node IP as hostname to make kubelet use the desired IP address. if len(options.NodeIP) > 0 { server.HostnameOverride = options.NodeIP } else { server.HostnameOverride = options.NodeName } server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddress server.Port = uint(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.ClusterDNS = dnsIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HTTPCheckFrequency = 0 // no remote HTTP pod creation access server.FileCheckFrequency = time.Duration(fileCheckInterval) * time.Second server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile if value := cmdutil.Env("OPENSHIFT_CONTAINERIZED", ""); len(value) > 0 { server.Containerized = value == "true" } // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, errors.NewAggregate(err) } cfg, err := server.UnsecuredKubeletConfig() if err != nil { return nil, err } // provide any config overrides cfg.NodeName = options.NodeName cfg.StreamingConnectionIdleTimeout = 5 * time.Minute // TODO: should be set cfg.KubeClient = kubeClient cfg.DockerExecHandler = dockerExecHandler // Setup auth osClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(*osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(osClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } cfg.Auth = kubelet.NewKubeletAuth(authn, authzAttr, authz) // Make sure the node doesn't think it is in standalone mode // This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc cfg.StandaloneMode = false // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } cfg.TLSOptions = &kubelet.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { cfg.TLSOptions = nil } // Prepare cloud provider cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } cfg.Cloud = cloud config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletConfig: cfg, IPTablesSyncPeriod: options.IPTablesSyncPeriod, } return config, nil }
// BuildMasterConfig builds and returns the OpenShift master configuration based on the // provided options func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion} etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } restOptsGetter := restoptions.NewConfigGetter(options) clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } customListerWatchers := shared.DefaultListerWatcherOverrides{} if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil { return nil, err } informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute) imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest requestContextMapper := kapi.NewRequestContextMapper() groupStorage, err := groupstorage.NewREST(restOptsGetter) if err != nil { return nil, err } groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage)) projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector) clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()) kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeClientSet := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient) quotaRegistry := quota.NewAllResourceQuotaRegistry(privilegedLoopbackOpenShiftClient, kubeClientSet) ruleResolver := rulevalidation.NewDefaultRuleResolver( informerFactory.Policies().Lister(), informerFactory.PolicyBindings().Lister(), informerFactory.ClusterPolicies().Lister().ClusterPolicies(), informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(), ) authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage) pluginInitializer := oadmission.PluginInitializer{ OpenshiftClient: privilegedLoopbackOpenShiftClient, ProjectCache: projectCache, OriginQuotaRegistry: quotaRegistry, Authorizer: authorizer, JenkinsPipelineConfig: options.JenkinsPipelineConfig, RESTClientConfig: *privilegedLoopbackClientConfig, Informers: informerFactory, ClusterQuotaMapper: clusterQuotaMappingController.GetClusterQuotaMapper(), } originAdmission, kubeAdmission, err := buildAdmissionChains(options, kubeClientSet, pluginInitializer) // TODO: look up storage by resource serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient) if err != nil { return nil, err } authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) config := &MasterConfig{ Options: options, RESTOptionsGetter: restOptsGetter, RuleResolver: ruleResolver, Authenticator: authenticator, Authorizer: authorizer, AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory), ProjectCache: projectCache, ClusterQuotaMappingController: clusterQuotaMappingController, RequestContextMapper: requestContextMapper, AdmissionControl: originAdmission, KubeAdmissionControl: kubeAdmission, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, Informers: informerFactory, } return config, nil }
// BuildMasterConfig builds and returns the OpenShift master configuration based on the // provided options func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.MakeEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } restOptsGetter := originrest.StorageOptions(options) clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig, options.MasterClients.OpenShiftLoopbackClientConnectionOverrides) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig, options.MasterClients.OpenShiftLoopbackClientConnectionOverrides) if err != nil { return nil, err } customListerWatchers := shared.DefaultListerWatcherOverrides{} if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil { return nil, err } informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute) imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest defaultRegistry := env("OPENSHIFT_DEFAULT_REGISTRY", "${DOCKER_REGISTRY_SERVICE_HOST}:${DOCKER_REGISTRY_SERVICE_PORT}") svcCache := service.NewServiceResolverCache(privilegedLoopbackKubeClient.Services(kapi.NamespaceDefault).Get) defaultRegistryFunc, err := svcCache.Defer(defaultRegistry) if err != nil { return nil, fmt.Errorf("OPENSHIFT_DEFAULT_REGISTRY variable is invalid %q: %v", defaultRegistry, err) } requestContextMapper := kapi.NewRequestContextMapper() groupStorage, err := groupstorage.NewREST(restOptsGetter) if err != nil { return nil, err } groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage)) projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector) clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()) kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeClientSet := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient) quotaRegistry := quota.NewAllResourceQuotaRegistry(privilegedLoopbackOpenShiftClient, kubeClientSet) ruleResolver := rulevalidation.NewDefaultRuleResolver( informerFactory.Policies().Lister(), informerFactory.PolicyBindings().Lister(), informerFactory.ClusterPolicies().Lister().ClusterPolicies(), informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(), ) authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage) pluginInitializer := oadmission.PluginInitializer{ OpenshiftClient: privilegedLoopbackOpenShiftClient, ProjectCache: projectCache, OriginQuotaRegistry: quotaRegistry, Authorizer: authorizer, JenkinsPipelineConfig: options.JenkinsPipelineConfig, RESTClientConfig: *privilegedLoopbackClientConfig, Informers: informerFactory, ClusterQuotaMapper: clusterQuotaMappingController.GetClusterQuotaMapper(), DefaultRegistryFn: imageapi.DefaultRegistryFunc(defaultRegistryFunc), } originAdmission, kubeAdmission, err := buildAdmissionChains(options, kubeClientSet, pluginInitializer) if err != nil { return nil, err } serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options) if err != nil { return nil, err } authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) config := &MasterConfig{ Options: options, RESTOptionsGetter: restOptsGetter, RuleResolver: ruleResolver, Authenticator: authenticator, Authorizer: authorizer, AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory), ProjectCache: projectCache, ClusterQuotaMappingController: clusterQuotaMappingController, RequestContextMapper: requestContextMapper, AdmissionControl: originAdmission, KubeAdmissionControl: kubeAdmission, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, RegistryNameFn: imageapi.DefaultRegistryFunc(defaultRegistryFunc), // TODO: migration of versions of resources stored in annotations must be sorted out ExternalVersionCodec: kapi.Codecs.LegacyCodec(unversioned.GroupVersion{Group: "", Version: "v1"}), KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, Informers: informerFactory, } // ensure that the limit range informer will be started informer := config.Informers.LimitRanges().Informer() config.LimitVerifier = imageadmission.NewLimitVerifier(imageadmission.LimitRangesForNamespaceFunc(func(ns string) ([]*kapi.LimitRange, error) { list, err := config.Informers.LimitRanges().Lister().LimitRanges(ns).List(labels.Everything()) if err != nil { return nil, err } // the verifier must return an error if len(list) == 0 && len(informer.LastSyncResourceVersion()) == 0 { glog.V(4).Infof("LimitVerifier still waiting for ranges to load: %#v", informer) forbiddenErr := kapierrors.NewForbidden(unversioned.GroupResource{Resource: "limitranges"}, "", fmt.Errorf("the server is still loading limit information")) forbiddenErr.ErrStatus.Details.RetryAfterSeconds = 1 return nil, forbiddenErr } return list, nil })) return config, nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) { originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } // Make a separate client for event reporting, to avoid event QPS blocking node calls eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } var dockerExecHandler dockertools.ExecHandler switch options.DockerConfig.ExecHandlerName { case configapi.DockerExecHandlerNative: dockerExecHandler = &dockertools.NativeExecHandler{} case configapi.DockerExecHandlerNsenter: dockerExecHandler = &dockertools.NsenterExecHandler{} } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } // declare the OpenShift defaults from config server := kubeletoptions.NewKubeletServer() server.Config = path server.RootDirectory = options.VolumeDirectory server.NodeIP = options.NodeIP server.HostnameOverride = options.NodeName server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddressStr server.Port = uint(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.ClusterDNS = options.DNSIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second} server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default server.MaxPods = 110 // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true" server.Containerized = containerized // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } proxyconfig, err := buildKubeProxyConfig(options) if err != nil { return nil, err } cfg, err := kubeletapp.UnsecuredKubeletConfig(server) if err != nil { return nil, err } // Replace the standard k8s emptyDir volume plugin with a wrapper version // which offers XFS quota functionality, but only if the node config // specifies an empty dir quota to apply to projects: if options.VolumeConfig.LocalQuota.PerFSGroup != nil { glog.V(2).Info("Replacing empty-dir volume plugin with quota wrapper") wrappedEmptyDirPlugin := false quotaApplicator, err := empty_dir.NewQuotaApplicator(options.VolumeDirectory) if err != nil { return nil, err } // Create a volume spec with emptyDir we can use to search for the // emptyDir plugin with CanSupport: emptyDirSpec := &volume.Spec{ Volume: &kapi.Volume{ VolumeSource: kapi.VolumeSource{ EmptyDir: &kapi.EmptyDirVolumeSource{}, }, }, } for idx, plugin := range cfg.VolumePlugins { // Can't really do type checking or use a constant here as they are not exported: if plugin.CanSupport(emptyDirSpec) { wrapper := empty_dir.EmptyDirQuotaPlugin{ Wrapped: plugin, Quota: *options.VolumeConfig.LocalQuota.PerFSGroup, QuotaApplicator: quotaApplicator, } cfg.VolumePlugins[idx] = &wrapper wrappedEmptyDirPlugin = true } } // Because we can't look for the k8s emptyDir plugin by any means that would // survive a refactor, error out if we couldn't find it: if !wrappedEmptyDirPlugin { return nil, errors.New("unable to wrap emptyDir volume plugin for quota support") } } else { glog.V(2).Info("Skipping replacement of empty-dir volume plugin with quota wrapper, no local fsGroup quota specified") } // provide any config overrides cfg.NodeName = options.NodeName cfg.KubeClient = internalclientset.FromUnversionedClient(kubeClient) cfg.EventClient = internalclientset.FromUnversionedClient(eventClient) cfg.DockerExecHandler = dockerExecHandler // docker-in-docker (dind) deployments are used for testing // networking plugins. Running openshift under dind won't work // with the real oom adjuster due to the state of the cgroups path // in a dind container that uses systemd for init. Similarly, // cgroup manipulation of the nested docker daemon doesn't work // properly under centos/rhel and should be disabled by setting // the name of the container to an empty string. // // This workaround should become unnecessary once user namespaces if value := cmdutil.Env("OPENSHIFT_DIND", ""); value == "true" { glog.Warningf("Using FakeOOMAdjuster for docker-in-docker compatibility") cfg.OOMAdjuster = oom.NewFakeOOMAdjuster() } // Setup auth osClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(osClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } cfg.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz) // Make sure the node doesn't think it is in standalone mode // This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc cfg.StandaloneMode = false // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } cfg.TLSOptions = &kubeletserver.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { cfg.TLSOptions = nil } // Prepare cloud provider cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } cfg.Cloud = cloud sdnPlugin, endpointFilter, err := factory.NewPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP) if err != nil { return nil, fmt.Errorf("SDN initialization failed: %v", err) } if sdnPlugin != nil { cfg.NetworkPlugins = append(cfg.NetworkPlugins, sdnPlugin) } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Containerized: containerized, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletConfig: cfg, ProxyConfig: proxyconfig, MTU: options.NetworkConfig.MTU, SDNPlugin: sdnPlugin, FilteringEndpointsHandler: endpointFilter, } return config, nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) { originClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } // Make a separate client for event reporting, to avoid event QPS blocking node calls eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } clientCAs, err := kcrypto.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } var dockerExecHandler dockertools.ExecHandler switch options.DockerConfig.ExecHandlerName { case configapi.DockerExecHandlerNative: dockerExecHandler = &dockertools.NativeExecHandler{} case configapi.DockerExecHandlerNsenter: dockerExecHandler = &dockertools.NsenterExecHandler{} } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } // Defaults are tested in TestKubeletDefaults server := kubeletoptions.NewKubeletServer() // Adjust defaults server.Config = path server.RootDirectory = options.VolumeDirectory server.NodeIP = options.NodeIP server.HostnameOverride = options.NodeName server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddressStr server.Port = uint(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.HealthzBindAddress = "" // no unsecured healthz access server.ClusterDNS = options.DNSIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second} server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default server.MaxPods = 110 server.SerializeImagePulls = false // disable serial image pulls by default switch server.NetworkPluginName { case ovs.SingleTenantPluginName, ovs.MultiTenantPluginName: // set defaults for openshift-sdn server.HairpinMode = componentconfig.HairpinNone server.ConfigureCBR0 = false } // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true" server.Containerized = containerized // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } proxyconfig, err := buildKubeProxyConfig(options) if err != nil { return nil, err } cfg, err := kubeletapp.UnsecuredKubeletConfig(server) if err != nil { return nil, err } // provide any config overrides cfg.NodeName = options.NodeName cfg.KubeClient = clientadapter.FromUnversionedClient(kubeClient) cfg.EventClient = clientadapter.FromUnversionedClient(eventClient) cfg.DockerExecHandler = dockerExecHandler // Setup auth authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(originClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } cfg.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz) // Make sure the node doesn't think it is in standalone mode // This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc cfg.StandaloneMode = false // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } cfg.TLSOptions = &kubeletserver.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { cfg.TLSOptions = nil } // Prepare cloud provider cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } cfg.Cloud = cloud sdnPlugin, err := factory.NewNodePlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP) if err != nil { return nil, fmt.Errorf("SDN initialization failed: %v", err) } if sdnPlugin != nil { cfg.NetworkPlugins = append(cfg.NetworkPlugins, sdnPlugin) } endpointFilter, err := factory.NewProxyPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient) if err != nil { return nil, fmt.Errorf("SDN proxy initialization failed: %v", err) } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Containerized: containerized, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletConfig: cfg, ProxyConfig: proxyconfig, MTU: options.NetworkConfig.MTU, SDNPlugin: sdnPlugin, FilteringEndpointsHandler: endpointFilter, } return config, nil }