func buildAuth(nodeName types.NodeName, client internalclientset.Interface, config componentconfig.KubeletConfiguration) (server.AuthInterface, error) { // Get clients, if provided var ( tokenClient authenticationclient.TokenReviewInterface sarClient authorizationclient.SubjectAccessReviewInterface ) if client != nil && !reflect.ValueOf(client).IsNil() { tokenClient = client.Authentication().TokenReviews() sarClient = client.Authorization().SubjectAccessReviews() } authenticator, err := buildAuthn(tokenClient, config.Authentication) if err != nil { return nil, err } attributes := server.NewNodeAuthorizerAttributesGetter(nodeName) authorizer, err := buildAuthz(sarClient, config.Authorization) if err != nil { return nil, err } return server.NewKubeletAuth(authenticator, attributes, authorizer), nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig, enableProxy, enableDNS bool) (*NodeConfig, error) { originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides) if err != nil { return nil, err } _, kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides) if err != nil { return nil, err } // Make a separate client for event reporting, to avoid event QPS blocking node calls _, eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } clientCAs, err := kcrypto.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } if err = validateNetworkPluginName(originClient, options.NetworkConfig.NetworkPluginName); err != nil { return nil, err } // Defaults are tested in TestKubeletDefaults server := kubeletoptions.NewKubeletServer() // Adjust defaults server.RequireKubeConfig = true server.PodManifestPath = path server.RootDirectory = options.VolumeDirectory server.NodeIP = options.NodeIP server.HostnameOverride = options.NodeName server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddressStr server.Port = int32(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.HealthzBindAddress = "" // no unsecured healthz access server.ClusterDNS = options.DNSIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource} server.HostPIDSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource} server.HostIPCSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource} server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second} server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default server.MaxPods = 250 server.PodsPerCore = 10 server.SerializeImagePulls = false // disable serialized image pulls by default server.EnableControllerAttachDetach = false // stay consistent with existing config, but admins should enable it if enableDNS { // if we are running local DNS, skydns will load the default recursive nameservers for us server.ResolverConfig = "" } server.DockerExecHandlerName = string(options.DockerConfig.ExecHandlerName) if sdnapi.IsOpenShiftNetworkPlugin(server.NetworkPluginName) { // set defaults for openshift-sdn server.HairpinMode = componentconfig.HairpinNone server.ConfigureCBR0 = false } // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true" server.Containerized = containerized // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } proxyconfig, err := buildKubeProxyConfig(options) if err != nil { return nil, err } // Initialize SDN before building kubelet config so it can modify options iptablesSyncPeriod, err := time.ParseDuration(options.IPTablesSyncPeriod) if err != nil { return nil, fmt.Errorf("Cannot parse the provided ip-tables sync period (%s) : %v", options.IPTablesSyncPeriod, err) } sdnPlugin, err := sdnplugin.NewNodePlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP, iptablesSyncPeriod, options.NetworkConfig.MTU) if err != nil { return nil, fmt.Errorf("SDN initialization failed: %v", err) } if sdnPlugin != nil { // SDN plugin pod setup/teardown is implemented as a CNI plugin server.NetworkPluginName = kubeletcni.CNIPluginName server.NetworkPluginDir = kubeletcni.DefaultNetDir server.HairpinMode = componentconfig.HairpinNone server.ConfigureCBR0 = false } deps, err := kubeletapp.UnsecuredKubeletDeps(server) if err != nil { return nil, err } // Initialize cloud provider cloud, err := buildCloudProvider(server) if err != nil { return nil, err } deps.Cloud = cloud // Replace the kubelet-created CNI plugin with the SDN plugin // Kubelet must be initialized with NetworkPluginName="cni" but // the SDN plugin (if available) needs to be the only one used if sdnPlugin != nil { deps.NetworkPlugins = []kubeletnetwork.NetworkPlugin{sdnPlugin} } // provide any config overrides //deps.NodeName = options.NodeName deps.KubeClient = kubeClient deps.EventClient = eventClient // Setup auth authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(kubeClient.Authentication(), clientCAs, authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(originClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } deps.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz) // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } deps.TLSOptions = &kubeletserver.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { deps.TLSOptions = nil } sdnProxy, err := sdnplugin.NewProxyPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient) if err != nil { return nil, fmt.Errorf("SDN proxy initialization failed: %v", err) } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Containerized: containerized, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletDeps: deps, ServicesReady: make(chan struct{}), ProxyConfig: proxyconfig, EnableUnidling: options.EnableUnidling, SDNPlugin: sdnPlugin, SDNProxy: sdnProxy, } if enableDNS { dnsConfig, err := dns.NewServerDefaults() if err != nil { return nil, fmt.Errorf("DNS configuration was not possible: %v", err) } if len(options.DNSIP) > 0 { dnsConfig.DnsAddr = options.DNSIP + ":53" } dnsConfig.Domain = server.ClusterDomain + "." dnsConfig.Local = "openshift.default.svc." + dnsConfig.Domain services, serviceStore := dns.NewCachedServiceAccessorAndStore() endpoints, endpointsStore := dns.NewCachedEndpointsAccessorAndStore() if !enableProxy { endpoints = deps.KubeClient endpointsStore = nil } // TODO: use kubeletConfig.ResolverConfig as an argument to etcd in the event the // user sets it, instead of passing it to the kubelet. config.ServiceStore = serviceStore config.EndpointsStore = endpointsStore config.DNSServer = &dns.Server{ Config: dnsConfig, Services: services, Endpoints: endpoints, MetricsName: "node", } } return config, nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) { originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } // Make a separate client for event reporting, to avoid event QPS blocking node calls eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } var dockerExecHandler dockertools.ExecHandler switch options.DockerConfig.ExecHandlerName { case configapi.DockerExecHandlerNative: dockerExecHandler = &dockertools.NativeExecHandler{} case configapi.DockerExecHandlerNsenter: dockerExecHandler = &dockertools.NsenterExecHandler{} } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } // declare the OpenShift defaults from config server := kubeletoptions.NewKubeletServer() server.Config = path server.RootDirectory = options.VolumeDirectory server.NodeIP = options.NodeIP server.HostnameOverride = options.NodeName server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddressStr server.Port = uint(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.ClusterDNS = options.DNSIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second} server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default server.MaxPods = 110 // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true" server.Containerized = containerized // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } proxyconfig, err := buildKubeProxyConfig(options) if err != nil { return nil, err } cfg, err := kubeletapp.UnsecuredKubeletConfig(server) if err != nil { return nil, err } // Replace the standard k8s emptyDir volume plugin with a wrapper version // which offers XFS quota functionality, but only if the node config // specifies an empty dir quota to apply to projects: if options.VolumeConfig.LocalQuota.PerFSGroup != nil { glog.V(2).Info("Replacing empty-dir volume plugin with quota wrapper") wrappedEmptyDirPlugin := false quotaApplicator, err := empty_dir.NewQuotaApplicator(options.VolumeDirectory) if err != nil { return nil, err } // Create a volume spec with emptyDir we can use to search for the // emptyDir plugin with CanSupport: emptyDirSpec := &volume.Spec{ Volume: &kapi.Volume{ VolumeSource: kapi.VolumeSource{ EmptyDir: &kapi.EmptyDirVolumeSource{}, }, }, } for idx, plugin := range cfg.VolumePlugins { // Can't really do type checking or use a constant here as they are not exported: if plugin.CanSupport(emptyDirSpec) { wrapper := empty_dir.EmptyDirQuotaPlugin{ Wrapped: plugin, Quota: *options.VolumeConfig.LocalQuota.PerFSGroup, QuotaApplicator: quotaApplicator, } cfg.VolumePlugins[idx] = &wrapper wrappedEmptyDirPlugin = true } } // Because we can't look for the k8s emptyDir plugin by any means that would // survive a refactor, error out if we couldn't find it: if !wrappedEmptyDirPlugin { return nil, errors.New("unable to wrap emptyDir volume plugin for quota support") } } else { glog.V(2).Info("Skipping replacement of empty-dir volume plugin with quota wrapper, no local fsGroup quota specified") } // provide any config overrides cfg.NodeName = options.NodeName cfg.KubeClient = internalclientset.FromUnversionedClient(kubeClient) cfg.EventClient = internalclientset.FromUnversionedClient(eventClient) cfg.DockerExecHandler = dockerExecHandler // docker-in-docker (dind) deployments are used for testing // networking plugins. Running openshift under dind won't work // with the real oom adjuster due to the state of the cgroups path // in a dind container that uses systemd for init. Similarly, // cgroup manipulation of the nested docker daemon doesn't work // properly under centos/rhel and should be disabled by setting // the name of the container to an empty string. // // This workaround should become unnecessary once user namespaces if value := cmdutil.Env("OPENSHIFT_DIND", ""); value == "true" { glog.Warningf("Using FakeOOMAdjuster for docker-in-docker compatibility") cfg.OOMAdjuster = oom.NewFakeOOMAdjuster() } // Setup auth osClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(osClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } cfg.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz) // Make sure the node doesn't think it is in standalone mode // This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc cfg.StandaloneMode = false // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } cfg.TLSOptions = &kubeletserver.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { cfg.TLSOptions = nil } // Prepare cloud provider cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } cfg.Cloud = cloud sdnPlugin, endpointFilter, err := factory.NewPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP) if err != nil { return nil, fmt.Errorf("SDN initialization failed: %v", err) } if sdnPlugin != nil { cfg.NetworkPlugins = append(cfg.NetworkPlugins, sdnPlugin) } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Containerized: containerized, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletConfig: cfg, ProxyConfig: proxyconfig, MTU: options.NetworkConfig.MTU, SDNPlugin: sdnPlugin, FilteringEndpointsHandler: endpointFilter, } return config, nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) { originClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } // Make a separate client for event reporting, to avoid event QPS blocking node calls eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } clientCAs, err := kcrypto.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } var dockerExecHandler dockertools.ExecHandler switch options.DockerConfig.ExecHandlerName { case configapi.DockerExecHandlerNative: dockerExecHandler = &dockertools.NativeExecHandler{} case configapi.DockerExecHandlerNsenter: dockerExecHandler = &dockertools.NsenterExecHandler{} } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } // Defaults are tested in TestKubeletDefaults server := kubeletoptions.NewKubeletServer() // Adjust defaults server.Config = path server.RootDirectory = options.VolumeDirectory server.NodeIP = options.NodeIP server.HostnameOverride = options.NodeName server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddressStr server.Port = uint(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.HealthzBindAddress = "" // no unsecured healthz access server.ClusterDNS = options.DNSIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second} server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default server.MaxPods = 110 server.SerializeImagePulls = false // disable serial image pulls by default switch server.NetworkPluginName { case ovs.SingleTenantPluginName, ovs.MultiTenantPluginName: // set defaults for openshift-sdn server.HairpinMode = componentconfig.HairpinNone server.ConfigureCBR0 = false } // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true" server.Containerized = containerized // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } proxyconfig, err := buildKubeProxyConfig(options) if err != nil { return nil, err } cfg, err := kubeletapp.UnsecuredKubeletConfig(server) if err != nil { return nil, err } // provide any config overrides cfg.NodeName = options.NodeName cfg.KubeClient = clientadapter.FromUnversionedClient(kubeClient) cfg.EventClient = clientadapter.FromUnversionedClient(eventClient) cfg.DockerExecHandler = dockerExecHandler // Setup auth authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(originClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } cfg.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz) // Make sure the node doesn't think it is in standalone mode // This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc cfg.StandaloneMode = false // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } cfg.TLSOptions = &kubeletserver.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { cfg.TLSOptions = nil } // Prepare cloud provider cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } cfg.Cloud = cloud sdnPlugin, err := factory.NewNodePlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP) if err != nil { return nil, fmt.Errorf("SDN initialization failed: %v", err) } if sdnPlugin != nil { cfg.NetworkPlugins = append(cfg.NetworkPlugins, sdnPlugin) } endpointFilter, err := factory.NewProxyPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient) if err != nil { return nil, fmt.Errorf("SDN proxy initialization failed: %v", err) } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Containerized: containerized, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletConfig: cfg, ProxyConfig: proxyconfig, MTU: options.NetworkConfig.MTU, SDNPlugin: sdnPlugin, FilteringEndpointsHandler: endpointFilter, } return config, nil }