func RunSDNController(config *kubernetes.NodeConfig, nodeConfig configapi.NodeConfig) kubernetes.FilteringEndpointsConfigHandler { oclient, _, err := configapi.GetOpenShiftClient(nodeConfig.MasterKubeConfig) if err != nil { glog.Fatal("Failed to get kube client for SDN") } ch := make(chan struct{}) controller, endpointFilter, err := factory.NewPlugin(nodeConfig.NetworkConfig.NetworkPluginName, oclient, config.Client, nodeConfig.NodeName, nodeConfig.NodeIP, ch) if err != nil { glog.Fatalf("SDN initialization failed: %v", err) } if controller != nil { config.KubeletConfig.StartUpdates = ch config.KubeletConfig.NetworkPlugins = append(config.KubeletConfig.NetworkPlugins, controller) go func() { err := controller.StartNode(nodeConfig.NetworkConfig.MTU) if err != nil { glog.Fatalf("SDN Node failed: %v", err) } }() } return endpointFilter }
// RunSDNController runs openshift-sdn if the said network plugin is provided func (c *MasterConfig) RunSDNController() { oClient, kClient := c.SDNControllerClients() controller, _, err := factory.NewPlugin(c.Options.NetworkConfig.NetworkPluginName, oClient, kClient, "", "") if err != nil { glog.Fatalf("SDN initialization failed: %v", err) } if controller != nil { err = controller.StartMaster(c.Options.NetworkConfig.ClusterNetworkCIDR, c.Options.NetworkConfig.HostSubnetLength, c.Options.NetworkConfig.ServiceNetworkCIDR) if err != nil { glog.Fatalf("SDN initialization failed: %v", err) } } }
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) { originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } // Make a separate client for event reporting, to avoid event QPS blocking node calls eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } var dockerExecHandler dockertools.ExecHandler switch options.DockerConfig.ExecHandlerName { case configapi.DockerExecHandlerNative: dockerExecHandler = &dockertools.NativeExecHandler{} case configapi.DockerExecHandlerNsenter: dockerExecHandler = &dockertools.NsenterExecHandler{} } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } // declare the OpenShift defaults from config server := kubeletoptions.NewKubeletServer() server.Config = path server.RootDirectory = options.VolumeDirectory server.NodeIP = options.NodeIP server.HostnameOverride = options.NodeName server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddressStr server.Port = uint(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.ClusterDNS = options.DNSIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second} server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default server.MaxPods = 110 // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true" server.Containerized = containerized // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } proxyconfig, err := buildKubeProxyConfig(options) if err != nil { return nil, err } cfg, err := kubeletapp.UnsecuredKubeletConfig(server) if err != nil { return nil, err } // Replace the standard k8s emptyDir volume plugin with a wrapper version // which offers XFS quota functionality, but only if the node config // specifies an empty dir quota to apply to projects: if options.VolumeConfig.LocalQuota.PerFSGroup != nil { glog.V(2).Info("Replacing empty-dir volume plugin with quota wrapper") wrappedEmptyDirPlugin := false quotaApplicator, err := empty_dir.NewQuotaApplicator(options.VolumeDirectory) if err != nil { return nil, err } // Create a volume spec with emptyDir we can use to search for the // emptyDir plugin with CanSupport: emptyDirSpec := &volume.Spec{ Volume: &kapi.Volume{ VolumeSource: kapi.VolumeSource{ EmptyDir: &kapi.EmptyDirVolumeSource{}, }, }, } for idx, plugin := range cfg.VolumePlugins { // Can't really do type checking or use a constant here as they are not exported: if plugin.CanSupport(emptyDirSpec) { wrapper := empty_dir.EmptyDirQuotaPlugin{ Wrapped: plugin, Quota: *options.VolumeConfig.LocalQuota.PerFSGroup, QuotaApplicator: quotaApplicator, } cfg.VolumePlugins[idx] = &wrapper wrappedEmptyDirPlugin = true } } // Because we can't look for the k8s emptyDir plugin by any means that would // survive a refactor, error out if we couldn't find it: if !wrappedEmptyDirPlugin { return nil, errors.New("unable to wrap emptyDir volume plugin for quota support") } } else { glog.V(2).Info("Skipping replacement of empty-dir volume plugin with quota wrapper, no local fsGroup quota specified") } // provide any config overrides cfg.NodeName = options.NodeName cfg.KubeClient = internalclientset.FromUnversionedClient(kubeClient) cfg.EventClient = internalclientset.FromUnversionedClient(eventClient) cfg.DockerExecHandler = dockerExecHandler // docker-in-docker (dind) deployments are used for testing // networking plugins. Running openshift under dind won't work // with the real oom adjuster due to the state of the cgroups path // in a dind container that uses systemd for init. Similarly, // cgroup manipulation of the nested docker daemon doesn't work // properly under centos/rhel and should be disabled by setting // the name of the container to an empty string. // // This workaround should become unnecessary once user namespaces if value := cmdutil.Env("OPENSHIFT_DIND", ""); value == "true" { glog.Warningf("Using FakeOOMAdjuster for docker-in-docker compatibility") cfg.OOMAdjuster = oom.NewFakeOOMAdjuster() } // Setup auth osClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(osClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } cfg.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz) // Make sure the node doesn't think it is in standalone mode // This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc cfg.StandaloneMode = false // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } cfg.TLSOptions = &kubeletserver.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { cfg.TLSOptions = nil } // Prepare cloud provider cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } cfg.Cloud = cloud sdnPlugin, endpointFilter, err := factory.NewPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP) if err != nil { return nil, fmt.Errorf("SDN initialization failed: %v", err) } if sdnPlugin != nil { cfg.NetworkPlugins = append(cfg.NetworkPlugins, sdnPlugin) } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Containerized: containerized, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletConfig: cfg, ProxyConfig: proxyconfig, MTU: options.NetworkConfig.MTU, SDNPlugin: sdnPlugin, FilteringEndpointsHandler: endpointFilter, } return config, nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) { originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } var dnsIP net.IP if len(options.DNSIP) > 0 { dnsIP = net.ParseIP(options.DNSIP) if dnsIP == nil { return nil, fmt.Errorf("Invalid DNS IP: %s", options.DNSIP) } } clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } var dockerExecHandler dockertools.ExecHandler switch options.DockerConfig.ExecHandlerName { case configapi.DockerExecHandlerNative: dockerExecHandler = &dockertools.NativeExecHandler{} case configapi.DockerExecHandlerNsenter: dockerExecHandler = &dockertools.NsenterExecHandler{} } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } kubeAddress := net.ParseIP(kubeAddressStr) if kubeAddress == nil { return nil, fmt.Errorf("Invalid DNS IP: %s", kubeAddressStr) } // declare the OpenShift defaults from config server := kapp.NewKubeletServer() server.Config = path server.RootDirectory = options.VolumeDirectory // kubelet finds the node IP address by doing net.ParseIP(hostname) and if that fails, // it does net.LookupIP(NodeName) and picks the first non-loopback address. // Pass node IP as hostname to make kubelet use the desired IP address. if len(options.NodeIP) > 0 { server.HostnameOverride = options.NodeIP } else { server.HostnameOverride = options.NodeName } server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddress server.Port = uint(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.ClusterDNS = dnsIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HTTPCheckFrequency = 0 // no remote HTTP pod creation access server.FileCheckFrequency = time.Duration(fileCheckInterval) * time.Second server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile if value := cmdutil.Env("OPENSHIFT_CONTAINERIZED", ""); len(value) > 0 { server.Containerized = value == "true" } // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, errors.NewAggregate(err) } cfg, err := server.UnsecuredKubeletConfig() if err != nil { return nil, err } // provide any config overrides cfg.NodeName = options.NodeName cfg.KubeClient = kubeClient cfg.DockerExecHandler = dockerExecHandler // docker-in-docker (dind) deployments are used for testing // networking plugins. Running openshift under dind won't work // with the real oom adjuster due to the state of the cgroups path // in a dind container that uses systemd for init. Similarly, // cgroup manipulation of the nested docker daemon doesn't work // properly under centos/rhel and should be disabled by setting // the name of the container to an empty string. // // This workaround should become unnecessary once user namespaces if value := cmdutil.Env("OPENSHIFT_DIND", ""); value == "true" { glog.Warningf("Using FakeOOMAdjuster for docker-in-docker compatibility") cfg.OOMAdjuster = oom.NewFakeOOMAdjuster() glog.Warningf("Disabling cgroup manipulation of nested docker daemon for docker-in-docker compatibility") cfg.DockerDaemonContainer = "" } // Setup auth osClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(*osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(osClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } cfg.Auth = kubelet.NewKubeletAuth(authn, authzAttr, authz) // Make sure the node doesn't think it is in standalone mode // This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc cfg.StandaloneMode = false // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } cfg.TLSOptions = &kubelet.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { cfg.TLSOptions = nil } // Prepare cloud provider cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } cfg.Cloud = cloud sdnPlugin, endpointFilter, err := factory.NewPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP) if err != nil { return nil, fmt.Errorf("SDN initialization failed: %v", err) } else if sdnPlugin != nil { cfg.NetworkPlugins = append(cfg.NetworkPlugins, sdnPlugin) } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletConfig: cfg, IPTablesSyncPeriod: options.IPTablesSyncPeriod, MTU: options.NetworkConfig.MTU, SDNPlugin: sdnPlugin, FilteringEndpointsHandler: endpointFilter, } return config, nil }