// NewKubeletCommand provides a CLI handler for the 'kubelet' command func NewKubeletCommand(name, fullName string, out io.Writer) *cobra.Command { s := app.NewKubeletServer() cmd := &cobra.Command{ Use: name, Short: "Launch the Kubelet (kubelet)", Long: kubeletLog, Run: func(c *cobra.Command, args []string) { startProfiler() util.InitLogs() defer util.FlushLogs() if err := s.Run(nil); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }, } cmd.SetOutput(out) flags := cmd.Flags() flags.SetNormalizeFunc(util.WordSepNormalizeFunc) flags.AddGoFlagSet(flag.CommandLine) s.AddFlags(flags) return cmd }
// NewKubeletCommand provides a CLI handler for the 'kubelet' command func NewKubeletCommand(name, fullName string, out io.Writer) *cobra.Command { s := app.NewKubeletServer() cmd := &cobra.Command{ Use: name, Short: "Launch the Kubelet (kubelet)", Long: kubeletLog, Run: func(c *cobra.Command, args []string) { startProfiler() util.InitLogs() defer util.FlushLogs() if err := s.Run(nil); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }, } cmd.SetOutput(out) flags := cmd.Flags() //TODO: uncomment after picking up a newer cobra //pflag.AddFlagSetToPFlagSet(flag, flags) s.AddFlags(flags) return cmd }
func NewStatusUpdater(client *client.Client, relistPeriod time.Duration, nowFunc func() time.Time) *StatusUpdater { kubecfg := kubelet.NewKubeletServer() // only create to get the config, this is without side-effects return &StatusUpdater{ client: client, relistPeriod: relistPeriod, heartBeatPeriod: kubecfg.NodeStatusUpdateFrequency, nowFunc: nowFunc, } }
func NewKubeletExecutorServer() *KubeletExecutorServer { k := &KubeletExecutorServer{ KubeletServer: app.NewKubeletServer(), SuicideTimeout: config.DefaultSuicideTimeout, } if pwd, err := os.Getwd(); err != nil { log.Warningf("failed to determine current directory: %v", err) } else { k.RootDirectory = pwd // mesos sandbox dir } k.Address = net.ParseIP(defaultBindingAddress()) return k }
func Test_nodeWithUpdatedStatus(t *testing.T) { now := time.Now() testNode := func(d time.Duration, s api.ConditionStatus, r string) *api.Node { return &api.Node{ Status: api.NodeStatus{ Conditions: []api.NodeCondition{{ Type: api.NodeOutOfDisk, }, { Type: api.NodeReady, Status: s, Reason: r, Message: "some message we don't care about here", LastTransitionTime: unversioned.Time{now.Add(-time.Minute)}, LastHeartbeatTime: unversioned.Time{now.Add(d)}, }}, }, } } cm := app.NewCMServer() kubecfg := kubelet.NewKubeletServer() assert.True(t, kubecfg.NodeStatusUpdateFrequency*3 < cm.NodeMonitorGracePeriod) // sanity check for defaults n := testNode(0, api.ConditionTrue, "KubeletReady") su := NewStatusUpdater(nil, cm.NodeMonitorPeriod, func() time.Time { return now }) _, updated, err := su.nodeWithUpdatedStatus(n) assert.NoError(t, err) assert.False(t, updated, "no update expected b/c kubelet updated heartbeat just now") n = testNode(-cm.NodeMonitorGracePeriod, api.ConditionTrue, "KubeletReady") n2, updated, err := su.nodeWithUpdatedStatus(n) assert.NoError(t, err) assert.True(t, updated, "update expected b/c kubelet's update is older than DefaultNodeMonitorGracePeriod") assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Reason, slaveReadyReason) assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Message, slaveReadyMessage) n = testNode(-kubecfg.NodeStatusUpdateFrequency, api.ConditionTrue, "KubeletReady") n2, updated, err = su.nodeWithUpdatedStatus(n) assert.NoError(t, err) assert.False(t, updated, "no update expected b/c kubelet's update was missed only once") n = testNode(-kubecfg.NodeStatusUpdateFrequency*3, api.ConditionTrue, "KubeletReady") n2, updated, err = su.nodeWithUpdatedStatus(n) assert.NoError(t, err) assert.True(t, updated, "update expected b/c kubelet's update is older than 3*DefaultNodeStatusUpdateFrequency") assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Reason, slaveReadyReason) assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Message, slaveReadyMessage) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := app.NewKubeletServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func NewKubeletExecutorServer() *KubeletExecutorServer { k := &KubeletExecutorServer{ KubeletServer: app.NewKubeletServer(), SuicideTimeout: config.DefaultSuicideTimeout, cgroupPrefix: config.DefaultCgroupPrefix, } if pwd, err := os.Getwd(); err != nil { log.Warningf("failed to determine current directory: %v", err) } else { k.RootDirectory = pwd // mesos sandbox dir } k.Address = net.ParseIP(defaultBindingAddress()) k.ShutdownFD = -1 // indicates unspecified FD return k }
// NewKubelet creates a new hyperkube Server object that includes the // description and flags. func NewKubelet() *Server { s := kubelet.NewKubeletServer() hks := Server{ SimpleUsage: "kubelet", Long: `The kubelet binary is responsible for maintaining a set of containers on a particular node. It syncs data from a variety of sources including a Kubernetes API server, an etcd cluster, HTTP endpoint or local file. It then queries Docker to see what is currently running. It synchronizes the configuration data, with the running set of containers by starting or stopping Docker containers.`, Run: func(_ *Server, args []string) error { return s.Run(args) }, } s.AddFlags(hks.Flags()) return &hks }
func ValidateKubeletExtendedArguments(config api.ExtendedArguments) fielderrors.ValidationErrorList { return ValidateExtendedArguments(config, kapp.NewKubeletServer().AddFlags) }
func ValidateKubeletExtendedArguments(config api.ExtendedArguments, fldPath *field.Path) field.ErrorList { return ValidateExtendedArguments(config, kapp.NewKubeletServer().AddFlags, fldPath) }
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) { kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } var dnsIP net.IP if len(options.DNSIP) > 0 { dnsIP = net.ParseIP(options.DNSIP) if dnsIP == nil { return nil, fmt.Errorf("Invalid DNS IP: %s", options.DNSIP) } } clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } var dockerExecHandler dockertools.ExecHandler switch options.DockerConfig.ExecHandlerName { case configapi.DockerExecHandlerNative: dockerExecHandler = &dockertools.NativeExecHandler{} case configapi.DockerExecHandlerNsenter: dockerExecHandler = &dockertools.NsenterExecHandler{} } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } kubeAddress := net.ParseIP(kubeAddressStr) if kubeAddress == nil { return nil, fmt.Errorf("Invalid DNS IP: %s", kubeAddressStr) } // declare the OpenShift defaults from config server := kapp.NewKubeletServer() server.Config = path server.RootDirectory = options.VolumeDirectory // kubelet finds the node IP address by doing net.ParseIP(hostname) and if that fails, // it does net.LookupIP(NodeName) and picks the first non-loopback address. // Pass node IP as hostname to make kubelet use the desired IP address. if len(options.NodeIP) > 0 { server.HostnameOverride = options.NodeIP } else { server.HostnameOverride = options.NodeName } server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddress server.Port = uint(kubePort) server.ReadOnlyPort = 0 // no read only access server.CAdvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.ClusterDNS = dnsIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",") server.HTTPCheckFrequency = 0 // no remote HTTP pod creation access server.FileCheckFrequency = time.Duration(fileCheckInterval) * time.Second server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile if value := cmdutil.Env("OPENSHIFT_CONTAINERIZED", ""); len(value) > 0 { server.Containerized = value == "true" } // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, errors.NewAggregate(err) } cfg, err := server.UnsecuredKubeletConfig() if err != nil { return nil, err } // provide any config overrides cfg.NodeName = options.NodeName cfg.StreamingConnectionIdleTimeout = 5 * time.Minute // TODO: should be set cfg.KubeClient = kubeClient cfg.DockerExecHandler = dockerExecHandler // Setup auth osClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig) if err != nil { return nil, err } authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL) if err != nil { return nil, err } authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(*osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize) if err != nil { return nil, err } authzAttr, err := newAuthorizerAttributesGetter(options.NodeName) if err != nil { return nil, err } authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL) if err != nil { return nil, err } authz, err := newAuthorizer(osClient, authzTTL, options.AuthConfig.AuthorizationCacheSize) if err != nil { return nil, err } cfg.Auth = kubelet.NewKubeletAuth(authn, authzAttr, authz) // Make sure the node doesn't think it is in standalone mode // This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc cfg.StandaloneMode = false // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates) if err != nil { return nil, err } cfg.TLSOptions = &kubelet.TLSOptions{ Config: crypto.SecureTLSConfig(&tls.Config{ // RequestClientCert lets us request certs, but allow requests without client certs // Verification is done by the authn layer ClientAuth: tls.RequestClientCert, ClientCAs: clientCAs, // Set SNI certificate func // Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list, // which we do not control when running with http.Server#ListenAndServeTLS GetCertificate: cmdutil.GetCertificateFunc(extraCerts), }), CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { cfg.TLSOptions = nil } // Prepare cloud provider cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } cfg.Cloud = cloud config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletConfig: cfg, IPTablesSyncPeriod: options.IPTablesSyncPeriod, } return config, nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) { kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } var dnsIP net.IP if len(options.DNSIP) > 0 { dnsIP = net.ParseIP(options.DNSIP) if dnsIP == nil { return nil, fmt.Errorf("Invalid DNS IP: %s", options.DNSIP) } } clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } var dockerExecHandler dockertools.ExecHandler switch options.DockerConfig.ExecHandlerName { case configapi.DockerExecHandlerNative: dockerExecHandler = &dockertools.NativeExecHandler{} case configapi.DockerExecHandlerNsenter: dockerExecHandler = &dockertools.NsenterExecHandler{} } kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } kubeAddress := net.ParseIP(kubeAddressStr) if kubeAddress == nil { return nil, fmt.Errorf("Invalid DNS IP: %s", kubeAddressStr) } // declare the OpenShift defaults from config server := kapp.NewKubeletServer() server.Config = path server.RootDirectory = options.VolumeDirectory // kubelet finds the node IP address by doing net.ParseIP(hostname) and if that fails, // it does net.LookupIP(NodeName) and picks the first non-loopback address. // Pass node IP as hostname to make kubelet use the desired IP address. if len(options.NodeIP) > 0 { server.HostnameOverride = options.NodeIP } else { server.HostnameOverride = options.NodeName } server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddress server.Port = uint(kubePort) server.ReadOnlyPort = 0 // no read only access server.CadvisorPort = 0 // no unsecured cadvisor access server.HealthzPort = 0 // no unsecured healthz access server.ClusterDNS = dnsIP server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkConfig.NetworkPluginName server.HostNetworkSources = strings.Join([]string{kubelet.ApiserverSource, kubelet.FileSource}, ",") server.HTTPCheckFrequency = 0 // no remote HTTP pod creation access server.FileCheckFrequency = time.Duration(fileCheckInterval) * time.Second server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile if value := cmdutil.Env("OPENSHIFT_CONTAINERIZED", ""); len(value) > 0 { server.Containerized = value == "true" } // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, errors.NewAggregate(err) } cfg, err := server.KubeletConfig() if err != nil { return nil, err } // provide any config overrides cfg.NodeName = options.NodeName cfg.StreamingConnectionIdleTimeout = 5 * time.Minute // TODO: should be set cfg.KubeClient = kubeClient cfg.DockerExecHandler = dockerExecHandler // Make sure the node doesn't think it is in standalone mode // This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc cfg.StandaloneMode = false // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { cfg.TLSOptions = &kubelet.TLSOptions{ Config: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, // RequireAndVerifyClientCert lets us limit requests to ones with a valid client certificate ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: clientCAs, }, CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { cfg.TLSOptions = nil } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletConfig: cfg, IPTablesSyncPeriod: options.IPTablesSyncPeriod, } return config, nil }