func (d *MasterNode) CanRun() (bool, error) { if d.KubeClient == nil || d.OsClient == nil { return false, errors.New("must have kube and os client") } if d.ServerUrl == "" { return false, errors.New("must have a server URL") } // If there is a master config file available, we'll perform an additional // check to see if an OVS network plugin is in use. If no master config, // we assume this is the case for now and let the check run anyhow. if len(d.MasterConfigFile) > 0 { // Parse the master config and check the network plugin name: masterCfg, masterErr := configapilatest.ReadAndResolveMasterConfig(d.MasterConfigFile) if masterErr != nil { return false, types.DiagnosticError{ID: "DClu3008", LogMessage: fmt.Sprintf("Master config provided but unable to parse: %s", masterErr), Cause: masterErr} } if !sdnapi.IsOpenShiftNetworkPlugin(masterCfg.NetworkConfig.NetworkPluginName) { return false, errors.New(fmt.Sprintf("Network plugin does not require master to also run node: %s", masterCfg.NetworkConfig.NetworkPluginName)) } } can, err := userCan(d.OsClient, authorizationapi.Action{ Verb: "list", Group: kapi.GroupName, Resource: "nodes", }) if err != nil { return false, types.DiagnosticError{ID: "DClu3000", LogMessage: fmt.Sprintf(clientErrorGettingNodes, err), Cause: err} } else if !can { return false, types.DiagnosticError{ID: "DClu3001", LogMessage: "Client does not have access to see node status", Cause: err} } return true, nil }
func GetOpenShiftNetworkPlugin(osClient *osclient.Client) (string, bool, error) { cn, err := osClient.ClusterNetwork().Get(api.ClusterNetworkDefault) if err != nil { if kerrors.IsNotFound(err) { return "", false, nil } return "", false, err } return cn.PluginName, sdnapi.IsOpenShiftNetworkPlugin(cn.PluginName), nil }
// Called by higher layers to create the plugin SDN node instance func NewNodePlugin(pluginName string, osClient *osclient.Client, kClient *kclient.Client, hostname string, selfIP string, iptablesSyncPeriod time.Duration, mtu uint32) (*OsdnNode, error) { if !osapi.IsOpenShiftNetworkPlugin(pluginName) { return nil, nil } log.Infof("Initializing SDN node of type %q with configured hostname %q (IP %q), iptables sync period %q", pluginName, hostname, selfIP, iptablesSyncPeriod.String()) if hostname == "" { output, err := kexec.New().Command("uname", "-n").CombinedOutput() if err != nil { return nil, err } hostname = strings.TrimSpace(string(output)) log.Infof("Resolved hostname to %q", hostname) } if selfIP == "" { var err error selfIP, err = netutils.GetNodeIP(hostname) if err != nil { log.V(5).Infof("Failed to determine node address from hostname %s; using default interface (%v)", hostname, err) var defaultIP net.IP defaultIP, err = kubeutilnet.ChooseHostInterface() if err != nil { return nil, err } selfIP = defaultIP.String() log.Infof("Resolved IP address to %q", selfIP) } } ovsif, err := ovs.New(kexec.New(), BR) if err != nil { return nil, err } plugin := &OsdnNode{ multitenant: osapi.IsOpenShiftMultitenantNetworkPlugin(pluginName), kClient: kClient, osClient: osClient, ovs: ovsif, localIP: selfIP, hostName: hostname, vnids: newNodeVNIDMap(), podNetworkReady: make(chan struct{}), kubeletInitReady: make(chan struct{}), iptablesSyncPeriod: iptablesSyncPeriod, mtu: mtu, egressPolicies: make(map[uint32][]*osapi.EgressNetworkPolicy), } if err := plugin.dockerPreCNICleanup(); err != nil { return nil, err } return plugin, nil }
func StartNode(nodeConfig configapi.NodeConfig, components *utilflags.ComponentFlag) error { config, err := kubernetes.BuildKubernetesNodeConfig(nodeConfig, components.Enabled(ComponentProxy), components.Enabled(ComponentDNS)) if err != nil { return err } if sdnapi.IsOpenShiftNetworkPlugin(config.KubeletServer.NetworkPluginName) { // TODO: SDN plugin depends on the Kubelet registering as a Node and doesn't retry cleanly, // and Kubelet also can't start the PodSync loop until the SDN plugin has loaded. if components.Enabled(ComponentKubelet) != components.Enabled(ComponentPlugins) { return fmt.Errorf("the SDN plugin must be run in the same process as the kubelet") } } if components.Enabled(ComponentKubelet) { glog.Infof("Starting node %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String()) } else { glog.Infof("Starting node networking %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String()) } _, _, kubeClientConfig, err := configapi.GetKubeClient(nodeConfig.MasterKubeConfig, nodeConfig.MasterClientConnectionOverrides) if err != nil { return err } glog.Infof("Connecting to API server %s", kubeClientConfig.Host) // preconditions if components.Enabled(ComponentKubelet) { config.EnsureKubeletAccess() config.EnsureVolumeDir() config.EnsureDocker(docker.NewHelper()) config.EnsureLocalQuota(nodeConfig) // must be performed after EnsureVolumeDir } if components.Enabled(ComponentKubelet) { config.RunKubelet() } if components.Enabled(ComponentPlugins) { config.RunPlugin() } if components.Enabled(ComponentProxy) { config.RunProxy() } if components.Enabled(ComponentDNS) { config.RunDNS() } config.RunServiceStores(components.Enabled(ComponentProxy), components.Enabled(ComponentDNS)) return nil }
func validateNetworkPluginName(originClient *osclient.Client, pluginName string) error { if sdnapi.IsOpenShiftNetworkPlugin(pluginName) { // Detect any plugin mismatches between node and master clusterNetwork, err := originClient.ClusterNetwork().Get(sdnapi.ClusterNetworkDefault) if kerrs.IsNotFound(err) { return fmt.Errorf("master has not created a default cluster network, network plugin %q can not start", pluginName) } else if err != nil { return fmt.Errorf("cannot fetch %q cluster network: %v", sdnapi.ClusterNetworkDefault, err) } if clusterNetwork.PluginName != strings.ToLower(pluginName) { if len(clusterNetwork.PluginName) != 0 { return fmt.Errorf("detected network plugin mismatch between OpenShift node(%q) and master(%q)", pluginName, clusterNetwork.PluginName) } else { // Do not return error in this case glog.Warningf(`either there is network plugin mismatch between OpenShift node(%q) and master or OpenShift master is running an older version where we did not persist plugin name`, pluginName) } } } return nil }
func StartMaster(networkConfig osconfigapi.MasterNetworkConfig, osClient *osclient.Client, kClient *kclientset.Clientset) error { if !osapi.IsOpenShiftNetworkPlugin(networkConfig.NetworkPluginName) { return nil } log.Infof("Initializing SDN master of type %q", networkConfig.NetworkPluginName) master := &OsdnMaster{ kClient: kClient, osClient: osClient, } var err error master.networkInfo, err = parseNetworkInfo(networkConfig.ClusterNetworkCIDR, networkConfig.ServiceNetworkCIDR) if err != nil { return err } createConfig := false updateConfig := false cn, err := master.osClient.ClusterNetwork().Get(osapi.ClusterNetworkDefault) if err == nil { if master.networkInfo.ClusterNetwork.String() != cn.Network || networkConfig.HostSubnetLength != cn.HostSubnetLength || master.networkInfo.ServiceNetwork.String() != cn.ServiceNetwork || networkConfig.NetworkPluginName != cn.PluginName { updateConfig = true } } else { cn = &osapi.ClusterNetwork{ TypeMeta: kapiunversioned.TypeMeta{Kind: "ClusterNetwork"}, ObjectMeta: kapi.ObjectMeta{Name: osapi.ClusterNetworkDefault}, } createConfig = true } if createConfig || updateConfig { if err = master.validateNetworkConfig(); err != nil { return err } size, len := master.networkInfo.ClusterNetwork.Mask.Size() if networkConfig.HostSubnetLength < 1 || networkConfig.HostSubnetLength >= uint32(len-size) { return fmt.Errorf("invalid HostSubnetLength %d for network %s (must be from 1 to %d)", networkConfig.HostSubnetLength, networkConfig.ClusterNetworkCIDR, len-size) } cn.Network = master.networkInfo.ClusterNetwork.String() cn.HostSubnetLength = networkConfig.HostSubnetLength cn.ServiceNetwork = master.networkInfo.ServiceNetwork.String() cn.PluginName = networkConfig.NetworkPluginName } if createConfig { cn, err := master.osClient.ClusterNetwork().Create(cn) if err != nil { return err } log.Infof("Created ClusterNetwork %s", clusterNetworkToString(cn)) } else if updateConfig { cn, err := master.osClient.ClusterNetwork().Update(cn) if err != nil { return err } log.Infof("Updated ClusterNetwork %s", clusterNetworkToString(cn)) } if err = master.SubnetStartMaster(master.networkInfo.ClusterNetwork, networkConfig.HostSubnetLength); err != nil { return err } if osapi.IsOpenShiftMultitenantNetworkPlugin(networkConfig.NetworkPluginName) { master.vnids = newMasterVNIDMap() if err = master.VnidStartMaster(); err != nil { return err } } return nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig, enableProxy, enableDNS bool) (*NodeConfig, error) { originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides) if err != nil { return nil, err } _, kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides) if err != nil { return nil, err } // Make a separate client for event reporting, to avoid event QPS blocking node calls _, eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } clientCAs, err := kcrypto.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } if err = validateNetworkPluginName(originClient, options.NetworkConfig.NetworkPluginName); err != nil { return nil, err } // Defaults are tested in TestKubeletDefaults server := kubeletoptions.NewKubeletServer() // Adjust defaults server.RequireKubeConfig = true server.PodManifestPath = path server.RootDirectory = options.VolumeDirectory server.NodeIP = options.NodeIP server.HostnameOverride = options.NodeName server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddressStr server.Port = int32(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.HealthzBindAddress = "" // no unsecured healthz access server.ClusterDNS = options.DNSIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource} server.HostPIDSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource} server.HostIPCSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource} server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second} server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default server.MaxPods = 250 server.PodsPerCore = 10 server.SerializeImagePulls = false // disable serialized image pulls by default server.EnableControllerAttachDetach = false // stay consistent with existing config, but admins should enable it if enableDNS { // if we are running local DNS, skydns will load the default recursive nameservers for us server.ResolverConfig = "" } server.DockerExecHandlerName = string(options.DockerConfig.ExecHandlerName) if sdnapi.IsOpenShiftNetworkPlugin(server.NetworkPluginName) { // set defaults for openshift-sdn server.HairpinMode = componentconfig.HairpinNone server.ConfigureCBR0 = false } // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true" server.Containerized = containerized // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } proxyconfig, err := buildKubeProxyConfig(options) if err != nil { return nil, err } // Initialize SDN before building kubelet config so it can modify options iptablesSyncPeriod, err := time.ParseDuration(options.IPTablesSyncPeriod) if err != nil { return nil, fmt.Errorf("Cannot parse the provided ip-tables sync period (%s) : %v", options.IPTablesSyncPeriod, err) } sdnPlugin, err := sdnplugin.NewNodePlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP, iptablesSyncPeriod, options.NetworkConfig.MTU) if err != nil { return nil, fmt.Errorf("SDN initialization failed: %v", err) } if sdnPlugin != nil { // SDN plugin pod setup/teardown is implemented as a CNI plugin server.NetworkPluginName = kubeletcni.CNIPluginName server.NetworkPluginDir = kubeletcni.DefaultNetDir server.HairpinMode = componentconfig.HairpinNone server.ConfigureCBR0 = false } deps, err := kubeletapp.UnsecuredKubeletDeps(server) if err != nil { return nil, err } // Initialize cloud provider cloud, err := buildCloudProvider(server) if err != nil { return nil, err } deps.Cloud = cloud // Replace the kubelet-created CNI plugin with the SDN plugin // Kubelet must be initialized with NetworkPluginName="cni" but // the SDN plugin (if available) needs to be the only one used if sdnPlugin != nil { deps.NetworkPlugins = []kubeletnetwork.NetworkPlugin{sdnPlugin} } // provide any config overrides //deps.NodeName = options.NodeName deps.KubeClient = kubeClient deps.EventClient = eventClient // Setup auth authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(kubeClient.Authentication(), clientCAs, authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(originClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } deps.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz) // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } deps.TLSOptions = &kubeletserver.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { deps.TLSOptions = nil } sdnProxy, err := sdnplugin.NewProxyPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient) if err != nil { return nil, fmt.Errorf("SDN proxy initialization failed: %v", err) } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Containerized: containerized, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletDeps: deps, ServicesReady: make(chan struct{}), ProxyConfig: proxyconfig, EnableUnidling: options.EnableUnidling, SDNPlugin: sdnPlugin, SDNProxy: sdnProxy, } if enableDNS { dnsConfig, err := dns.NewServerDefaults() if err != nil { return nil, fmt.Errorf("DNS configuration was not possible: %v", err) } if len(options.DNSIP) > 0 { dnsConfig.DnsAddr = options.DNSIP + ":53" } dnsConfig.Domain = server.ClusterDomain + "." dnsConfig.Local = "openshift.default.svc." + dnsConfig.Domain services, serviceStore := dns.NewCachedServiceAccessorAndStore() endpoints, endpointsStore := dns.NewCachedEndpointsAccessorAndStore() if !enableProxy { endpoints = deps.KubeClient endpointsStore = nil } // TODO: use kubeletConfig.ResolverConfig as an argument to etcd in the event the // user sets it, instead of passing it to the kubelet. config.ServiceStore = serviceStore config.EndpointsStore = endpointsStore config.DNSServer = &dns.Server{ Config: dnsConfig, Services: services, Endpoints: endpoints, MetricsName: "node", } } return config, nil }