func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig { // source of all configuration cfg := config.NewPodConfig(config.PodConfigNotificationIncremental, kc.Recorder) // define file config source if kc.ConfigFile != "" { glog.Infof("Adding manifest file: %v", kc.ConfigFile) config.NewSourceFile(kc.ConfigFile, kc.NodeName, kc.FileCheckFrequency, cfg.Channel(kubetypes.FileSource)) } // define url config source if kc.ManifestURL != "" { glog.Infof("Adding manifest url %q with HTTP header %v", kc.ManifestURL, kc.ManifestURLHeader) config.NewSourceURL(kc.ManifestURL, kc.ManifestURLHeader, kc.NodeName, kc.HTTPCheckFrequency, cfg.Channel(kubetypes.HTTPSource)) } if kc.KubeClient != nil { glog.Infof("Watching apiserver") config.NewSourceApiserver(kc.KubeClient, kc.NodeName, cfg.Channel(kubetypes.ApiserverSource)) } return cfg }
func (ks *KubeletExecutorServer) createAndInitKubelet( kc *app.KubeletConfig, hks hyperkube.Interface, clientConfig *client.Config, ) (app.KubeletBootstrap, *kconfig.PodConfig, error) { // TODO(k8s): block until all sources have delivered at least one update to the channel, or break the sync loop // up into "per source" synchronizations // TODO(k8s): KubeletConfig.KubeClient should be a client interface, but client interface misses certain methods // used by kubelet. Since NewMainKubelet expects a client interface, we need to make sure we are not passing // a nil pointer to it when what we really want is a nil interface. var kubeClient client.Interface if kc.KubeClient == nil { kubeClient = nil } else { kubeClient = kc.KubeClient } gcPolicy := kubelet.ContainerGCPolicy{ MinAge: kc.MinimumGCAge, MaxPerPodContainer: kc.MaxPerPodContainerCount, MaxContainers: kc.MaxContainerCount, } pc := kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kc.Recorder) updates := pc.Channel(MESOS_CFG_SOURCE) klet, err := kubelet.NewMainKubelet( kc.Hostname, kc.NodeName, kc.DockerClient, kubeClient, kc.RootDirectory, kc.PodInfraContainerImage, kc.SyncFrequency, float32(kc.RegistryPullQPS), kc.RegistryBurst, kc.EventRecordQPS, kc.EventBurst, gcPolicy, pc.SeenAllSources, kc.RegisterNode, kc.StandaloneMode, kc.ClusterDomain, net.IP(kc.ClusterDNS), kc.MasterServiceNamespace, kc.VolumePlugins, kc.NetworkPlugins, kc.NetworkPluginName, kc.StreamingConnectionIdleTimeout, kc.Recorder, kc.CAdvisorInterface, kc.ImageGCPolicy, kc.DiskSpacePolicy, kc.Cloud, kc.NodeStatusUpdateFrequency, kc.ResourceContainer, kc.OSInterface, kc.CgroupRoot, kc.ContainerRuntime, kc.RktPath, kc.RktStage1Image, kc.Mounter, kc.Writer, kc.DockerDaemonContainer, kc.SystemContainer, kc.ConfigureCBR0, kc.PodCIDR, kc.MaxPods, kc.DockerExecHandler, kc.ResolverConfig, kc.CPUCFSQuota, &api.NodeDaemonEndpoints{ KubeletEndpoint: api.DaemonEndpoint{Port: int(kc.Port)}, }, kc.OOMAdjuster, ) if err != nil { return nil, nil, err } //TODO(jdef) either configure Watch here with something useful, or else // get rid of it from executor.Config kubeletFinished := make(chan struct{}) staticPodsConfigPath := filepath.Join(kc.RootDirectory, "static-pods") exec := executor.New(executor.Config{ Kubelet: klet, Updates: updates, SourceName: MESOS_CFG_SOURCE, APIClient: kc.KubeClient, Docker: kc.DockerClient, SuicideTimeout: ks.SuicideTimeout, LaunchGracePeriod: ks.LaunchGracePeriod, KubeletFinished: kubeletFinished, ExitFunc: os.Exit, PodStatusFunc: func(_ executor.KubeletInterface, pod *api.Pod) (*api.PodStatus, error) { return klet.GetRuntime().GetPodStatus(pod) }, StaticPodsConfigPath: staticPodsConfigPath, PodLW: cache.NewListWatchFromClient(kc.KubeClient, "pods", api.NamespaceAll, fields.OneTermEqualSelector(client.PodHost, kc.NodeName)), }) go exec.InitializeStaticPodsSource(func() { // Create file source only when we are called back. Otherwise, it is never marked unseen. fileSourceUpdates := pc.Channel(kubelet.FileSource) kconfig.NewSourceFile(staticPodsConfigPath, kc.Hostname, kc.FileCheckFrequency, fileSourceUpdates) }) k := &kubeletExecutor{ Kubelet: klet, address: ks.Address, dockerClient: kc.DockerClient, hks: hks, kubeletFinished: kubeletFinished, executorDone: exec.Done(), clientConfig: clientConfig, } dconfig := bindings.DriverConfig{ Executor: exec, HostnameOverride: ks.HostnameOverride, BindingAddress: ks.Address, } if driver, err := bindings.NewMesosExecutorDriver(dconfig); err != nil { log.Fatalf("failed to create executor driver: %v", err) } else { k.driver = driver } log.V(2).Infof("Initialize executor driver...") k.BirthCry() exec.Init(k.driver) k.StartGarbageCollection() return k, pc, nil }
// TestExecutorStaticPods test that the ExecutorInfo.data is parsed // as a zip archive with pod definitions. func TestExecutorStaticPods(t *testing.T) { // create some zip with static pod definition var buf bytes.Buffer zw := zip.NewWriter(&buf) createStaticPodFile := func(fileName, id, name string) { w, err := zw.Create(fileName) assert.NoError(t, err) spod := `{ "apiVersion": "v1", "kind": "Pod", "metadata": { "name": "%v", "labels": { "name": "foo", "cluster": "bar" } }, "spec": { "containers": [{ "name": "%v", "image": "library/nginx", "ports": [{ "containerPort": 80, "name": "http" }], "livenessProbe": { "enabled": true, "type": "http", "initialDelaySeconds": 30, "httpGet": { "path": "/", "port": 80 } } }] } }` _, err = w.Write([]byte(fmt.Sprintf(spod, id, name))) assert.NoError(t, err) } createStaticPodFile("spod.json", "spod-id-01", "spod-01") createStaticPodFile("spod2.json", "spod-id-02", "spod-02") createStaticPodFile("dir/spod.json", "spod-id-03", "spod-03") // same file name as first one to check for overwriting expectedStaticPodsNum := 2 // subdirectories are ignored by FileSource, hence only 2 err := zw.Close() assert.NoError(t, err) // create fake apiserver testApiServer := NewTestServer(t, api.NamespaceDefault, nil) defer testApiServer.server.Close() // temporary directory which is normally located in the executor sandbox staticPodsConfigPath, err := ioutil.TempDir("/tmp", "executor-k8sm-archive") assert.NoError(t, err) defer os.RemoveAll(staticPodsConfigPath) mockDriver := &MockExecutorDriver{} updates := make(chan interface{}, 1024) config := Config{ Docker: dockertools.ConnectToDockerOrDie("fake://"), Updates: make(chan interface{}, 1), // allow kube-executor source to proceed past init APIClient: client.NewOrDie(&client.Config{ Host: testApiServer.server.URL, Version: testapi.Default.Version(), }), Kubelet: &kubelet.Kubelet{}, PodStatusFunc: func(kl KubeletInterface, pod *api.Pod) (*api.PodStatus, error) { return &api.PodStatus{ ContainerStatuses: []api.ContainerStatus{ { Name: "foo", State: api.ContainerState{ Running: &api.ContainerStateRunning{}, }, }, }, Phase: api.PodRunning, }, nil }, StaticPodsConfigPath: staticPodsConfigPath, } executor := New(config) hostname := "h1" go executor.InitializeStaticPodsSource(func() { kconfig.NewSourceFile(staticPodsConfigPath, hostname, 1*time.Second, updates) }) // create ExecutorInfo with static pod zip in data field executorInfo := mesosutil.NewExecutorInfo( mesosutil.NewExecutorID("ex1"), mesosutil.NewCommandInfo("k8sm-executor"), ) executorInfo.Data = buf.Bytes() // start the executor with the static pod data executor.Init(mockDriver) executor.Registered(mockDriver, executorInfo, nil, nil) // wait for static pod to start seenPods := map[string]struct{}{} timeout := time.After(time.Second) defer mockDriver.AssertExpectations(t) for { // filter by PodUpdate type select { case <-timeout: t.Fatalf("Executor should send pod updates for %v pods, only saw %v", expectedStaticPodsNum, len(seenPods)) case update, ok := <-updates: if !ok { return } podUpdate, ok := update.(kubelet.PodUpdate) if !ok { continue } for _, pod := range podUpdate.Pods { seenPods[pod.Name] = struct{}{} } if len(seenPods) == expectedStaticPodsNum { return } } } }
func (s *KubeletExecutorServer) runKubelet( nodeInfos <-chan executor.NodeInfo, kubeletDone chan<- struct{}, staticPodsConfigPath string, apiclient *clientset.Clientset, podLW *cache.ListWatch, registry executor.Registry, executorDone <-chan struct{}, ) (err error) { defer func() { if err != nil { // close the channel here. When Run returns without error, the executorKubelet is // responsible to do this. If it returns with an error, we are responsible here. close(kubeletDone) } }() kcfg, err := kubeletapp.UnsecuredKubeletConfig(s.KubeletServer) if err != nil { return err } // apply Mesos specific settings kcfg.Builder = func(kc *kubeletapp.KubeletConfig) (kubeletapp.KubeletBootstrap, *kconfig.PodConfig, error) { k, pc, err := kubeletapp.CreateAndInitKubelet(kc) if err != nil { return k, pc, err } // decorate kubelet such that it shuts down when the executor is decorated := &executorKubelet{ Kubelet: k.(*kubelet.Kubelet), kubeletDone: kubeletDone, executorDone: executorDone, } return decorated, pc, nil } kcfg.DockerDaemonContainer = "" // don't move the docker daemon into a cgroup kcfg.Hostname = kcfg.HostnameOverride kcfg.KubeClient = apiclient // taken from KubeletServer#Run(*KubeletConfig) eventClientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer) if err != nil { return err } // make a separate client for events eventClientConfig.QPS = s.EventRecordQPS eventClientConfig.Burst = s.EventBurst kcfg.EventClient, err = clientset.NewForConfig(eventClientConfig) if err != nil { return err } kcfg.NodeName = kcfg.HostnameOverride kcfg.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kcfg.Recorder) // override the default pod source kcfg.StandaloneMode = false kcfg.SystemContainer = "" // don't take control over other system processes. if kcfg.Cloud != nil { // fail early and hard because having the cloud provider loaded would go unnoticed, // but break bigger cluster because accessing the state.json from every slave kills the master. panic("cloud provider must not be set") } // create custom cAdvisor interface which return the resource values that Mesos reports ni := <-nodeInfos cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, s.CAdvisorPort) if err != nil { return err } kcfg.CAdvisorInterface = cAdvisorInterface kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, cAdvisorInterface) if err != nil { return err } go func() { for ni := range nodeInfos { // TODO(sttts): implement with MachineAllocable mechanism when https://github.com/kubernetes/kubernetes/issues/13984 is finished log.V(3).Infof("ignoring updated node resources: %v", ni) } }() // create main pod source, it will stop generating events once executorDone is closed newSourceMesos(executorDone, kcfg.PodConfig.Channel(mesosSource), podLW, registry) // create static-pods directory file source log.V(2).Infof("initializing static pods source factory, configured at path %q", staticPodsConfigPath) fileSourceUpdates := kcfg.PodConfig.Channel(kubetypes.FileSource) kconfig.NewSourceFile(staticPodsConfigPath, kcfg.HostnameOverride, kcfg.FileCheckFrequency, fileSourceUpdates) // run the kubelet // NOTE: because kcfg != nil holds, the upstream Run function will not // initialize the cloud provider. We explicitly wouldn't want // that because then every kubelet instance would query the master // state.json which does not scale. err = kubeletapp.Run(s.KubeletServer, kcfg) return }
func (s *KubeletExecutorServer) runKubelet(execUpdates <-chan kubetypes.PodUpdate, nodeInfos <-chan executor.NodeInfo, kubeletDone chan<- struct{}, staticPodsConfigPath string, apiclient *client.Client) error { kcfg, err := s.UnsecuredKubeletConfig() if err == nil { // apply Messo specific settings executorDone := make(chan struct{}) kcfg.Builder = func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) { k, pc, err := app.CreateAndInitKubelet(kc) if err != nil { return k, pc, err } klet := k.(*kubelet.Kubelet) s.kletLock.Lock() s.klet = klet s.kletLock.Unlock() // decorate kubelet such that it shuts down when the executor is decorated := &executorKubelet{ Kubelet: klet, kubeletDone: kubeletDone, executorDone: executorDone, } return decorated, pc, nil } kcfg.DockerDaemonContainer = "" // don't move the docker daemon into a cgroup kcfg.Hostname = kcfg.HostnameOverride kcfg.KubeClient = apiclient kcfg.NodeName = kcfg.HostnameOverride kcfg.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kcfg.Recorder) // override the default pod source kcfg.StandaloneMode = false kcfg.SystemContainer = "" // don't take control over other system processes. if kcfg.Cloud != nil { // fail early and hard because having the cloud provider loaded would go unnoticed, // but break bigger cluster because accessing the state.json from every slave kills the master. panic("cloud provider must not be set") } // create custom cAdvisor interface which return the resource values that Mesos reports ni := <-nodeInfos cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, s.CAdvisorPort) if err != nil { return err } kcfg.CAdvisorInterface = cAdvisorInterface go func() { for ni := range nodeInfos { // TODO(sttts): implement with MachineAllocable mechanism when https://github.com/kubernetes/kubernetes/issues/13984 is finished log.V(3).Infof("ignoring updated node resources: %v", ni) } }() // create main pod source updates := kcfg.PodConfig.Channel(MESOS_CFG_SOURCE) go func() { // execUpdates will be closed by the executor on shutdown defer close(executorDone) for u := range execUpdates { u.Source = MESOS_CFG_SOURCE updates <- u } }() // create static-pods directory file source log.V(2).Infof("initializing static pods source factory, configured at path %q", staticPodsConfigPath) fileSourceUpdates := kcfg.PodConfig.Channel(kubetypes.FileSource) kconfig.NewSourceFile(staticPodsConfigPath, kcfg.HostnameOverride, kcfg.FileCheckFrequency, fileSourceUpdates) // run the kubelet, until execUpdates is closed // NOTE: because kcfg != nil holds, the upstream Run function will not // initialize the cloud provider. We explicitly wouldn't want // that because then every kubelet instance would query the master // state.json which does not scale. err = s.KubeletServer.Run(kcfg) } if err != nil { // close the channel here. When Run returns without error, the executorKubelet is // responsible to do this. If it returns with an error, we are responsible here. close(kubeletDone) } return err }
func (s *KubeletExecutorServer) runKubelet( nodeInfos <-chan executor.NodeInfo, kubeletDone chan<- struct{}, staticPodsConfigPath string, apiclient *clientset.Clientset, podLW *cache.ListWatch, registry executor.Registry, executorDone <-chan struct{}, ) (err error) { defer func() { if err != nil { // close the channel here. When Run returns without error, the executorKubelet is // responsible to do this. If it returns with an error, we are responsible here. close(kubeletDone) } }() kubeDeps, err := kubeletapp.UnsecuredKubeletDeps(s.KubeletServer) if err != nil { return err } // apply Mesos specific settings kubeDeps.Builder = func(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, standaloneMode bool) (kubelet.KubeletBootstrap, error) { k, err := kubeletapp.CreateAndInitKubelet(kubeCfg, kubeDeps, standaloneMode) if err != nil { return k, err } // decorate kubelet such that it shuts down when the executor is decorated := &executorKubelet{ Kubelet: k.(*kubelet.Kubelet), kubeletDone: kubeletDone, executorDone: executorDone, } return decorated, nil } s.RuntimeCgroups = "" // don't move the docker daemon into a cgroup kubeDeps.KubeClient = apiclient // taken from KubeletServer#Run(*KubeletConfig) eventClientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer) if err != nil { return err } // make a separate client for events eventClientConfig.QPS = float32(s.EventRecordQPS) eventClientConfig.Burst = int(s.EventBurst) kubeDeps.EventClient, err = clientset.NewForConfig(eventClientConfig) if err != nil { return err } kubeDeps.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kubeDeps.Recorder) // override the default pod source s.SystemCgroups = "" // don't take control over other system processes. if kubeDeps.Cloud != nil { // fail early and hard because having the cloud provider loaded would go unnoticed, // but break bigger cluster because accessing the state.json from every slave kills the master. panic("cloud provider must not be set") } // create custom cAdvisor interface which return the resource values that Mesos reports ni := <-nodeInfos cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, uint(s.CAdvisorPort), s.ContainerRuntime) if err != nil { return err } kubeDeps.CAdvisorInterface = cAdvisorInterface kubeDeps.ContainerManager, err = cm.NewContainerManager(kubeDeps.Mounter, cAdvisorInterface, cm.NodeConfig{ RuntimeCgroupsName: s.RuntimeCgroups, SystemCgroupsName: s.SystemCgroups, KubeletCgroupsName: s.KubeletCgroups, ContainerRuntime: s.ContainerRuntime, }) if err != nil { return err } go func() { for ni := range nodeInfos { // TODO(sttts): implement with MachineAllocable mechanism when https://github.com/kubernetes/kubernetes/issues/13984 is finished log.V(3).Infof("ignoring updated node resources: %v", ni) } }() // create main pod source, it will stop generating events once executorDone is closed var containerOptions []podsource.Option if s.containerID != "" { // tag all pod containers with the containerID so that they can be properly GC'd by Mesos containerOptions = append(containerOptions, podsource.ContainerEnvOverlay([]api.EnvVar{ {Name: envContainerID, Value: s.containerID}, })) kubeDeps.ContainerRuntimeOptions = append(kubeDeps.ContainerRuntimeOptions, dockertools.PodInfraContainerEnv(map[string]string{ envContainerID: s.containerID, })) } podsource.Mesos(executorDone, kubeDeps.PodConfig.Channel(podsource.MesosSource), podLW, registry, containerOptions...) // create static-pods directory file source log.V(2).Infof("initializing static pods source factory, configured at path %q", staticPodsConfigPath) fileSourceUpdates := kubeDeps.PodConfig.Channel(kubetypes.FileSource) kconfig.NewSourceFile(staticPodsConfigPath, s.HostnameOverride, s.FileCheckFrequency.Duration, fileSourceUpdates) // run the kubelet // NOTE: because kubeDeps != nil holds, the upstream Run function will not // initialize the cloud provider. We explicitly wouldn't want // that because then every kubelet instance would query the master // state.json which does not scale. s.KubeletServer.LockFilePath = "" // disable lock file err = kubeletapp.Run(s.KubeletServer, kubeDeps) return }