Пример #1
0
// NewKubeletCommand provides a CLI handler for the 'kubelet' command
func NewKubeletCommand(name, fullName string, out io.Writer) *cobra.Command {
	kubeletOptions := kubeletoptions.NewKubeletServer()

	cmd := &cobra.Command{
		Use:   name,
		Short: "Launch the Kubelet (kubelet)",
		Long:  kubeletLog,
		Run: func(c *cobra.Command, args []string) {
			startProfiler()

			util.InitLogs()
			defer util.FlushLogs()

			if err := kubeletapp.Run(kubeletOptions, nil); err != nil {
				fmt.Fprintf(os.Stderr, "%v\n", err)
				os.Exit(1)
			}
		},
	}
	cmd.SetOutput(out)

	flags := cmd.Flags()
	flags.SetNormalizeFunc(util.WordSepNormalizeFunc)
	flags.AddGoFlagSet(flag.CommandLine)
	kubeletOptions.AddFlags(flags)

	return cmd
}
Пример #2
0
func StartKubeletServer(lk LocalkubeServer) func() error {
	config := options.NewKubeletServer()

	// Master details
	config.APIServerList = []string{lk.GetAPIServerInsecureURL()}

	// Set containerized based on the flag
	config.Containerized = lk.Containerized

	config.AllowPrivileged = true
	config.Config = "/etc/kubernetes/manifests"

	// Networking
	config.ClusterDomain = lk.DNSDomain
	config.ClusterDNS = lk.DNSIP.String()

	config.NodeIP = lk.NodeIP.String()

	// Use the host's resolver config
	if lk.Containerized {
		config.ResolverConfig = "/rootfs/etc/resolv.conf"
	} else {
		config.ResolverConfig = "/etc/resolv.conf"
	}

	return func() error {
		return kubelet.Run(config, nil)
	}
}
Пример #3
0
func main() {
	s := options.NewKubeletServer()
	s.AddFlags(pflag.CommandLine)

	flag.InitFlags()
	logs.InitLogs()
	defer logs.FlushLogs()

	verflag.PrintAndExitIfRequested()

	if err := app.Run(s, nil); err != nil {
		fmt.Fprintf(os.Stderr, "error: %v\n", err)
		os.Exit(1)
	}
}
Пример #4
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	s := options.NewKubeletServer()
	s.AddFlags(pflag.CommandLine)

	flag.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	if err := app.Run(s, nil); err != nil {
		fmt.Fprintf(os.Stderr, "%v\n", err)
		os.Exit(1)
	}
}
Пример #5
0
// NewKubelet creates a new hyperkube Server object that includes the
// description and flags.
func NewKubelet() *Server {
	s := options.NewKubeletServer()
	hks := Server{
		SimpleUsage: "kubelet",
		Long: `The kubelet binary is responsible for maintaining a set of containers on a
		particular node. It syncs data from a variety of sources including a
		Kubernetes API server, an etcd cluster, HTTP endpoint or local file. It then
		queries Docker to see what is currently running.  It synchronizes the
		configuration data, with the running set of containers by starting or stopping
		Docker containers.`,
		Run: func(_ *Server, _ []string) error {
			return app.Run(s, nil)
		},
	}
	s.AddFlags(hks.Flags())
	return &hks
}
Пример #6
0
// RunKubelet starts the Kubelet.
func (c *NodeConfig) RunKubelet() {
	var clusterDNS net.IP
	if c.KubeletServer.ClusterDNS == "" {
		if service, err := c.Client.Services(kapi.NamespaceDefault).Get("kubernetes"); err == nil {
			if includesServicePort(service.Spec.Ports, 53, "dns") {
				// Use master service if service includes "dns" port 53.
				clusterDNS = net.ParseIP(service.Spec.ClusterIP)
			}
		}
	}
	if clusterDNS == nil {
		if endpoint, err := c.Client.Endpoints(kapi.NamespaceDefault).Get("kubernetes"); err == nil {
			if endpointIP, ok := firstEndpointIPWithNamedPort(endpoint, 53, "dns"); ok {
				// Use first endpoint if endpoint includes "dns" port 53.
				clusterDNS = net.ParseIP(endpointIP)
			} else if endpointIP, ok := firstEndpointIP(endpoint, 53); ok {
				// Test and use first endpoint if endpoint includes any port 53.
				if err := cmdutil.WaitForSuccessfulDial(false, "tcp", fmt.Sprintf("%s:%d", endpointIP, 53), 50*time.Millisecond, 0, 2); err == nil {
					clusterDNS = net.ParseIP(endpointIP)
				}
			}
		}
	}
	if clusterDNS != nil && !clusterDNS.IsUnspecified() {
		c.KubeletServer.ClusterDNS = clusterDNS.String()
	}

	c.KubeletDeps.DockerClient = c.DockerClient
	// updated by NodeConfig.EnsureVolumeDir
	c.KubeletServer.RootDirectory = c.VolumeDir

	// hook for overriding the cadvisor interface for integration tests
	c.KubeletDeps.CAdvisorInterface = defaultCadvisorInterface
	// hook for overriding the container manager interface for integration tests
	c.KubeletDeps.ContainerManager = defaultContainerManagerInterface

	go func() {
		glog.Fatal(kubeletapp.Run(c.KubeletServer, c.KubeletDeps))
	}()
}
Пример #7
0
func (s *KubeletExecutorServer) runKubelet(
	nodeInfos <-chan executor.NodeInfo,
	kubeletDone chan<- struct{},
	staticPodsConfigPath string,
	apiclient *clientset.Clientset,
	podLW *cache.ListWatch,
	registry executor.Registry,
	executorDone <-chan struct{},
) (err error) {
	defer func() {
		if err != nil {
			// close the channel here. When Run returns without error, the executorKubelet is
			// responsible to do this. If it returns with an error, we are responsible here.
			close(kubeletDone)
		}
	}()

	kcfg, err := kubeletapp.UnsecuredKubeletConfig(s.KubeletServer)
	if err != nil {
		return err
	}

	// apply Mesos specific settings
	kcfg.Builder = func(kc *kubeletapp.KubeletConfig) (kubeletapp.KubeletBootstrap, *kconfig.PodConfig, error) {
		k, pc, err := kubeletapp.CreateAndInitKubelet(kc)
		if err != nil {
			return k, pc, err
		}

		// decorate kubelet such that it shuts down when the executor is
		decorated := &executorKubelet{
			Kubelet:      k.(*kubelet.Kubelet),
			kubeletDone:  kubeletDone,
			executorDone: executorDone,
		}

		return decorated, pc, nil
	}
	kcfg.DockerDaemonContainer = "" // don't move the docker daemon into a cgroup
	kcfg.Hostname = kcfg.HostnameOverride
	kcfg.KubeClient = apiclient

	// taken from KubeletServer#Run(*KubeletConfig)
	eventClientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer)
	if err != nil {
		return err
	}

	// make a separate client for events
	eventClientConfig.QPS = s.EventRecordQPS
	eventClientConfig.Burst = s.EventBurst
	kcfg.EventClient, err = clientset.NewForConfig(eventClientConfig)
	if err != nil {
		return err
	}

	kcfg.NodeName = kcfg.HostnameOverride
	kcfg.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kcfg.Recorder) // override the default pod source
	kcfg.StandaloneMode = false
	kcfg.SystemContainer = "" // don't take control over other system processes.
	if kcfg.Cloud != nil {
		// fail early and hard because having the cloud provider loaded would go unnoticed,
		// but break bigger cluster because accessing the state.json from every slave kills the master.
		panic("cloud provider must not be set")
	}

	// create custom cAdvisor interface which return the resource values that Mesos reports
	ni := <-nodeInfos
	cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, s.CAdvisorPort)
	if err != nil {
		return err
	}

	kcfg.CAdvisorInterface = cAdvisorInterface
	kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, cAdvisorInterface)
	if err != nil {
		return err
	}

	go func() {
		for ni := range nodeInfos {
			// TODO(sttts): implement with MachineAllocable mechanism when https://github.com/kubernetes/kubernetes/issues/13984 is finished
			log.V(3).Infof("ignoring updated node resources: %v", ni)
		}
	}()

	// create main pod source, it will stop generating events once executorDone is closed
	newSourceMesos(executorDone, kcfg.PodConfig.Channel(mesosSource), podLW, registry)

	// create static-pods directory file source
	log.V(2).Infof("initializing static pods source factory, configured at path %q", staticPodsConfigPath)
	fileSourceUpdates := kcfg.PodConfig.Channel(kubetypes.FileSource)
	kconfig.NewSourceFile(staticPodsConfigPath, kcfg.HostnameOverride, kcfg.FileCheckFrequency, fileSourceUpdates)

	// run the kubelet
	// NOTE: because kcfg != nil holds, the upstream Run function will not
	//       initialize the cloud provider. We explicitly wouldn't want
	//       that because then every kubelet instance would query the master
	//       state.json which does not scale.
	err = kubeletapp.Run(s.KubeletServer, kcfg)
	return
}
Пример #8
0
func (s *KubeletExecutorServer) runKubelet(
	nodeInfos <-chan executor.NodeInfo,
	kubeletDone chan<- struct{},
	staticPodsConfigPath string,
	apiclient *clientset.Clientset,
	podLW *cache.ListWatch,
	registry executor.Registry,
	executorDone <-chan struct{},
) (err error) {
	defer func() {
		if err != nil {
			// close the channel here. When Run returns without error, the executorKubelet is
			// responsible to do this. If it returns with an error, we are responsible here.
			close(kubeletDone)
		}
	}()

	kubeDeps, err := kubeletapp.UnsecuredKubeletDeps(s.KubeletServer)
	if err != nil {
		return err
	}

	// apply Mesos specific settings
	kubeDeps.Builder = func(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, standaloneMode bool) (kubelet.KubeletBootstrap, error) {
		k, err := kubeletapp.CreateAndInitKubelet(kubeCfg, kubeDeps, standaloneMode)
		if err != nil {
			return k, err
		}

		// decorate kubelet such that it shuts down when the executor is
		decorated := &executorKubelet{
			Kubelet:      k.(*kubelet.Kubelet),
			kubeletDone:  kubeletDone,
			executorDone: executorDone,
		}

		return decorated, nil
	}
	s.RuntimeCgroups = "" // don't move the docker daemon into a cgroup
	kubeDeps.KubeClient = apiclient

	// taken from KubeletServer#Run(*KubeletConfig)
	eventClientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer)
	if err != nil {
		return err
	}

	// make a separate client for events
	eventClientConfig.QPS = float32(s.EventRecordQPS)
	eventClientConfig.Burst = int(s.EventBurst)
	kubeDeps.EventClient, err = clientset.NewForConfig(eventClientConfig)
	if err != nil {
		return err
	}

	kubeDeps.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kubeDeps.Recorder) // override the default pod source

	s.SystemCgroups = "" // don't take control over other system processes.

	if kubeDeps.Cloud != nil {
		// fail early and hard because having the cloud provider loaded would go unnoticed,
		// but break bigger cluster because accessing the state.json from every slave kills the master.
		panic("cloud provider must not be set")
	}

	// create custom cAdvisor interface which return the resource values that Mesos reports
	ni := <-nodeInfos
	cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, uint(s.CAdvisorPort), s.ContainerRuntime)
	if err != nil {
		return err
	}

	kubeDeps.CAdvisorInterface = cAdvisorInterface
	kubeDeps.ContainerManager, err = cm.NewContainerManager(kubeDeps.Mounter, cAdvisorInterface, cm.NodeConfig{
		RuntimeCgroupsName: s.RuntimeCgroups,
		SystemCgroupsName:  s.SystemCgroups,
		KubeletCgroupsName: s.KubeletCgroups,
		ContainerRuntime:   s.ContainerRuntime,
	})
	if err != nil {
		return err
	}

	go func() {
		for ni := range nodeInfos {
			// TODO(sttts): implement with MachineAllocable mechanism when https://github.com/kubernetes/kubernetes/issues/13984 is finished
			log.V(3).Infof("ignoring updated node resources: %v", ni)
		}
	}()

	// create main pod source, it will stop generating events once executorDone is closed
	var containerOptions []podsource.Option
	if s.containerID != "" {
		// tag all pod containers with the containerID so that they can be properly GC'd by Mesos
		containerOptions = append(containerOptions, podsource.ContainerEnvOverlay([]api.EnvVar{
			{Name: envContainerID, Value: s.containerID},
		}))
		kubeDeps.ContainerRuntimeOptions = append(kubeDeps.ContainerRuntimeOptions,
			dockertools.PodInfraContainerEnv(map[string]string{
				envContainerID: s.containerID,
			}))
	}

	podsource.Mesos(executorDone, kubeDeps.PodConfig.Channel(podsource.MesosSource), podLW, registry, containerOptions...)

	// create static-pods directory file source
	log.V(2).Infof("initializing static pods source factory, configured at path %q", staticPodsConfigPath)
	fileSourceUpdates := kubeDeps.PodConfig.Channel(kubetypes.FileSource)
	kconfig.NewSourceFile(staticPodsConfigPath, s.HostnameOverride, s.FileCheckFrequency.Duration, fileSourceUpdates)

	// run the kubelet
	// NOTE: because kubeDeps != nil holds, the upstream Run function will not
	//       initialize the cloud provider. We explicitly wouldn't want
	//       that because then every kubelet instance would query the master
	//       state.json which does not scale.
	s.KubeletServer.LockFilePath = "" // disable lock file
	err = kubeletapp.Run(s.KubeletServer, kubeDeps)
	return
}