// Runs containerGCTest using the docker runtime. func dockerContainerGCTest(f *framework.Framework, test testRun) { var runtime docker.DockerInterface BeforeEach(func() { runtime = docker.ConnectToDockerOrDie(defaultDockerEndpoint, defaultRuntimeRequestTimeoutDuration, defaultImagePullProgressDeadline) }) for _, pod := range test.testPods { // Initialize the getContainerNames function to use the dockertools api thisPrefix := pod.containerPrefix pod.getContainerNames = func() ([]string, error) { relevantContainers := []string{} dockerContainers, err := docker.GetKubeletDockerContainers(runtime, true) if err != nil { return relevantContainers, err } for _, container := range dockerContainers { // only look for containers from this testspec if strings.Contains(container.Names[0], thisPrefix) { relevantContainers = append(relevantContainers, container.Names[0]) } } return relevantContainers, nil } } containerGCTest(f, test) }
// TestExecutorRegister ensures that the executor thinks it is connected // after Register is called. func TestExecutorRegister(t *testing.T) { mockDriver := &MockExecutorDriver{} updates := make(chan interface{}, 1024) executor := New(Config{ Docker: dockertools.ConnectToDockerOrDie("fake://"), Updates: updates, SourceName: "executor_test", }) executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) initialPodUpdate := kubelet.PodUpdate{ Pods: []*api.Pod{}, Op: kubelet.SET, Source: executor.sourcename, } receivedInitialPodUpdate := false select { case m := <-updates: update, ok := m.(kubelet.PodUpdate) if ok { if reflect.DeepEqual(initialPodUpdate, update) { receivedInitialPodUpdate = true } } case <-time.After(time.Second): } assert.Equal(t, true, receivedInitialPodUpdate, "executor should have sent an initial PodUpdate "+ "to the updates chan upon registration") assert.Equal(t, true, executor.isConnected(), "executor should be connected") mockDriver.AssertExpectations(t) }
// TestExecutorShutdown ensures that the executor properly shuts down // when Shutdown is called. func TestExecutorShutdown(t *testing.T) { var ( mockDriver = &MockExecutorDriver{} kubeletFinished = make(chan struct{}) exitCalled = int32(0) executor = New(Config{ Docker: dockertools.ConnectToDockerOrDie("fake://", 0), NodeInfos: make(chan NodeInfo, 1), ShutdownAlert: func() { close(kubeletFinished) }, KubeletFinished: kubeletFinished, ExitFunc: func(_ int) { atomic.AddInt32(&exitCalled, 1) }, Registry: newFakeRegistry(), }) ) executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) mockDriver.On("Stop").Return(mesosproto.Status_DRIVER_STOPPED, nil).Once() executor.Shutdown(mockDriver) assert.Equal(t, false, executor.isConnected(), "executor should not be connected after Shutdown") assert.Equal(t, true, executor.isDone(), "executor should be in Done state after Shutdown") assert.Equal(t, true, atomic.LoadInt32(&exitCalled) > 0, "the executor should call its ExitFunc when it is ready to close down") mockDriver.AssertExpectations(t) }
func NewTestKubernetesExecutor() (*KubernetesExecutor, chan kubetypes.PodUpdate) { updates := make(chan kubetypes.PodUpdate, 1024) return New(Config{ Docker: dockertools.ConnectToDockerOrDie("fake://"), Updates: updates, PodLW: &NewMockPodsListWatch(api.PodList{}).ListWatch, }), updates }
func (s *KubeletExecutorServer) runExecutor( nodeInfos chan<- executor.NodeInfo, kubeletFinished <-chan struct{}, staticPodsConfigPath string, apiclient *clientset.Clientset, registry executor.Registry, ) (<-chan struct{}, error) { staticPodFilters := podutil.Filters{ // annotate the pod with BindingHostKey so that the scheduler will ignore the pod // once it appears in the pod registry. the stock kubelet sets the pod host in order // to accomplish the same; we do this because the k8sm scheduler works differently. podutil.Annotator(map[string]string{ meta.BindingHostKey: s.HostnameOverride, }), } if s.containerID != "" { // tag all pod containers with the containerID so that they can be properly GC'd by Mesos staticPodFilters = append(staticPodFilters, podutil.Environment([]api.EnvVar{ {Name: envContainerID, Value: s.containerID}, })) } exec := executor.New(executor.Config{ Registry: registry, APIClient: apiclient, Docker: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), SuicideTimeout: s.SuicideTimeout, KubeletFinished: kubeletFinished, ExitFunc: os.Exit, NodeInfos: nodeInfos, Options: []executor.Option{ executor.StaticPods(staticPodsConfigPath, staticPodFilters), }, }) // initialize driver and initialize the executor with it dconfig := bindings.DriverConfig{ Executor: exec, HostnameOverride: s.HostnameOverride, BindingAddress: net.ParseIP(s.Address), } driver, err := bindings.NewMesosExecutorDriver(dconfig) if err != nil { return nil, fmt.Errorf("failed to create executor driver: %v", err) } log.V(2).Infof("Initialize executor driver...") exec.Init(driver) // start the driver go func() { if _, err := driver.Run(); err != nil { log.Fatalf("executor driver failed: %v", err) } log.Info("executor Run completed") }() return exec.Done(), nil }
// GetKubeClient returns the Kubernetes Docker client. func (_ *Helper) GetKubeClient() (*KubeDocker, string, error) { var endpoint string if len(os.Getenv("DOCKER_HOST")) > 0 { endpoint = os.Getenv("DOCKER_HOST") } else { endpoint = "unix:///var/run/docker.sock" } client := dockertools.ConnectToDockerOrDie(endpoint) originClient := &KubeDocker{client} return originClient, endpoint, nil }
// TestExecutorShutdown ensures that the executor properly shuts down // when Shutdown is called. func TestExecutorShutdown(t *testing.T) { mockDriver := &MockExecutorDriver{} kubeletFinished := make(chan struct{}) var exitCalled int32 = 0 updates := make(chan kubetypes.PodUpdate, 1024) config := Config{ Docker: dockertools.ConnectToDockerOrDie("fake://"), Updates: updates, NodeInfos: make(chan NodeInfo, 1), ShutdownAlert: func() { close(kubeletFinished) }, KubeletFinished: kubeletFinished, ExitFunc: func(_ int) { atomic.AddInt32(&exitCalled, 1) }, PodLW: &NewMockPodsListWatch(api.PodList{}).ListWatch, } executor := New(config) executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) mockDriver.On("Stop").Return(mesosproto.Status_DRIVER_STOPPED, nil).Once() executor.Shutdown(mockDriver) assert.Equal(t, false, executor.isConnected(), "executor should not be connected after Shutdown") assert.Equal(t, true, executor.isDone(), "executor should be in Done state after Shutdown") // channel should be closed now, only a constant number of updates left num := len(updates) drainLoop: for { select { case _, ok := <-updates: if !ok { break drainLoop } num -= 1 default: t.Fatal("Updates chan should be closed after Shutdown") } } assert.Equal(t, num, 0, "Updates chan should get no new updates after Shutdown") assert.Equal(t, true, atomic.LoadInt32(&exitCalled) > 0, "the executor should call its ExitFunc when it is ready to close down") mockDriver.AssertExpectations(t) }
//TODO: do not expose kubelet implementation details after we refactor the runtime API. func dockerRuntime() kubecontainer.Runtime { dockerClient := dockertools.ConnectToDockerOrDie("") pm := kubepod.NewBasicPodManager(nil) dm := dockertools.NewDockerManager( dockerClient, nil, nil, nil, pm, nil, "", 0, 0, "", nil, nil, nil, nil, nil, nil, nil, false, nil, true, false, false, ) return dm }
func (s *KubeletExecutorServer) runExecutor( nodeInfos chan<- executor.NodeInfo, kubeletFinished <-chan struct{}, staticPodsConfigPath string, apiclient *clientset.Clientset, registry executor.Registry, ) (<-chan struct{}, error) { exec := executor.New(executor.Config{ Registry: registry, APIClient: apiclient, Docker: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), SuicideTimeout: s.SuicideTimeout, KubeletFinished: kubeletFinished, ExitFunc: os.Exit, StaticPodsConfigPath: staticPodsConfigPath, NodeInfos: nodeInfos, }) // initialize driver and initialize the executor with it dconfig := bindings.DriverConfig{ Executor: exec, HostnameOverride: s.HostnameOverride, BindingAddress: net.ParseIP(s.Address), } driver, err := bindings.NewMesosExecutorDriver(dconfig) if err != nil { return nil, fmt.Errorf("failed to create executor driver: %v", err) } log.V(2).Infof("Initialize executor driver...") exec.Init(driver) // start the driver go func() { if _, err := driver.Run(); err != nil { log.Fatalf("executor driver failed: %v", err) } log.Info("executor Run completed") }() return exec.Done(), nil }
// UnsecuredKubeletDeps returns a KubeletDeps suitable for being run, or an error if the server setup // is not valid. It will not start any background processes, and does not include authentication/authorization func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error) { // Initialize the TLS Options tlsOptions, err := InitializeTLS(&s.KubeletConfiguration) if err != nil { return nil, err } mounter := mount.New(s.ExperimentalMounterPath) var writer kubeio.Writer = &kubeio.StdWriter{} if s.Containerized { glog.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = mount.NewNsenterMounter() writer = &kubeio.NsenterWriter{} } var dockerClient dockertools.DockerInterface if s.ContainerRuntime == "docker" { dockerClient = dockertools.ConnectToDockerOrDie(s.DockerEndpoint, s.RuntimeRequestTimeout.Duration, s.ImagePullProgressDeadline.Duration) } else { dockerClient = nil } return &kubelet.KubeletDeps{ Auth: nil, // default does not enforce auth[nz] CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here Cloud: nil, // cloud provider might start background processes ContainerManager: nil, DockerClient: dockerClient, KubeClient: nil, ExternalKubeClient: nil, Mounter: mounter, NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir, s.CNIConfDir, s.CNIBinDir), OOMAdjuster: oom.NewOOMAdjuster(), OSInterface: kubecontainer.RealOS{}, Writer: writer, VolumePlugins: ProbeVolumePlugins(s.VolumePluginDir), TLSOptions: tlsOptions, }, nil }
// TestExecutorShutdown ensures that the executor properly shuts down // when Shutdown is called. func TestExecutorShutdown(t *testing.T) { mockDriver := &MockExecutorDriver{} kubeletFinished := make(chan struct{}) var exitCalled int32 = 0 config := Config{ Docker: dockertools.ConnectToDockerOrDie("fake://"), Updates: make(chan interface{}, 1024), ShutdownAlert: func() { close(kubeletFinished) }, KubeletFinished: kubeletFinished, ExitFunc: func(_ int) { atomic.AddInt32(&exitCalled, 1) }, } executor := New(config) executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) mockDriver.On("Stop").Return(mesosproto.Status_DRIVER_STOPPED, nil).Once() executor.Shutdown(mockDriver) assert.Equal(t, false, executor.isConnected(), "executor should not be connected after Shutdown") assert.Equal(t, true, executor.isDone(), "executor should be in Done state after Shutdown") select { case <-executor.Done(): default: t.Fatal("done channel should be closed after shutdown") } assert.Equal(t, true, atomic.LoadInt32(&exitCalled) > 0, "the executor should call its ExitFunc when it is ready to close down") mockDriver.AssertExpectations(t) }
// New creates a new implementation of the STI Docker interface func New(config *api.DockerConfig, auth api.AuthConfig) (Docker, error) { var client *dockerapi.Client var httpClient *http.Client if config.CertFile != "" && config.KeyFile != "" && config.CAFile != "" { tlscOptions := tlsconfig.Options{ CAFile: config.CAFile, CertFile: config.CertFile, KeyFile: config.KeyFile, } tlsc, tlsErr := tlsconfig.Client(tlscOptions) if tlsErr != nil { return nil, tlsErr } httpClient = &http.Client{ Transport: k8snet.SetTransportDefaults(&http.Transport{ TLSClientConfig: tlsc, }), } } client, err := dockerapi.NewClient(config.Endpoint, "", httpClient, nil) if err != nil { return nil, err } k8sDocker := dockertools.ConnectToDockerOrDie(config.Endpoint, 0) return &stiDocker{ kubeDockerClient: k8sDocker, client: client, httpClient: httpClient, dialer: &net.Dialer{}, pullAuth: dockertypes.AuthConfig{ Username: auth.Username, Password: auth.Password, Email: auth.Email, ServerAddress: auth.ServerAddress, }, endpoint: config.Endpoint, }, nil }
func NewTestKubernetesExecutor() *Executor { return New(Config{ Docker: dockertools.ConnectToDockerOrDie("fake://", 0), Registry: newFakeRegistry(), }) }
// TestExecutorFrameworkMessage ensures that the executor is able to // handle messages from the framework, specifically about lost tasks // and Kamikaze. When a task is lost, the executor needs to clean up // its state. When a Kamikaze message is received, the executor should // attempt suicide. func TestExecutorFrameworkMessage(t *testing.T) { // create fake apiserver podListWatch := NewMockPodsListWatch(api.PodList{}) testApiServer := NewTestServer(t, api.NamespaceDefault, &podListWatch.list) defer testApiServer.server.Close() // create and start executor mockDriver := &MockExecutorDriver{} kubeletFinished := make(chan struct{}) config := Config{ Docker: dockertools.ConnectToDockerOrDie("fake://"), Updates: make(chan interface{}, 1024), APIClient: client.NewOrDie(&client.Config{ Host: testApiServer.server.URL, Version: testapi.Default.Version(), }), Kubelet: &fakeKubelet{ Kubelet: &kubelet.Kubelet{}, hostIP: net.IPv4(127, 0, 0, 1), }, PodStatusFunc: func(kl KubeletInterface, pod *api.Pod) (*api.PodStatus, error) { return &api.PodStatus{ ContainerStatuses: []api.ContainerStatus{ { Name: "foo", State: api.ContainerState{ Running: &api.ContainerStateRunning{}, }, }, }, Phase: api.PodRunning, }, nil }, ShutdownAlert: func() { close(kubeletFinished) }, KubeletFinished: kubeletFinished, } executor := New(config) executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) executor.FrameworkMessage(mockDriver, "test framework message") // set up a pod to then lose pod := NewTestPod(1) podTask, _ := podtask.New(api.NewDefaultContext(), "foo", *pod, &mesosproto.ExecutorInfo{}) taskInfo := podTask.BuildTaskInfo() data, _ := testapi.Default.Codec().Encode(pod) taskInfo.Data = data mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_STARTING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Once() called := make(chan struct{}) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_RUNNING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once() executor.LaunchTask(mockDriver, taskInfo) // waiting until the pod is really running b/c otherwise a TASK_FAILED could be // triggered by the asynchronously running _launchTask, __launchTask methods // when removing the task from k.tasks through the "task-lost:foo" message below. select { case <-called: case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for SendStatusUpdate for the running task") } // send task-lost message for it called = make(chan struct{}) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_LOST, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once() executor.FrameworkMessage(mockDriver, "task-lost:foo") assertext.EventuallyTrue(t, 5*time.Second, func() bool { executor.lock.Lock() defer executor.lock.Unlock() return len(executor.tasks) == 0 && len(executor.pods) == 0 }, "executor must be able to kill a created task and pod") select { case <-called: case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for SendStatusUpdate") } mockDriver.On("Stop").Return(mesosproto.Status_DRIVER_STOPPED, nil).Once() executor.FrameworkMessage(mockDriver, messages.Kamikaze) assert.Equal(t, true, executor.isDone(), "executor should have shut down after receiving a Kamikaze message") mockDriver.AssertExpectations(t) }
// TestExecutorStaticPods test that the ExecutorInfo.data is parsed // as a zip archive with pod definitions. func TestExecutorStaticPods(t *testing.T) { // create some zip with static pod definition var buf bytes.Buffer zw := zip.NewWriter(&buf) createStaticPodFile := func(fileName, id, name string) { w, err := zw.Create(fileName) assert.NoError(t, err) spod := `{ "apiVersion": "v1", "kind": "Pod", "metadata": { "name": "%v", "labels": { "name": "foo", "cluster": "bar" } }, "spec": { "containers": [{ "name": "%v", "image": "library/nginx", "ports": [{ "containerPort": 80, "name": "http" }], "livenessProbe": { "enabled": true, "type": "http", "initialDelaySeconds": 30, "httpGet": { "path": "/", "port": 80 } } }] } }` _, err = w.Write([]byte(fmt.Sprintf(spod, id, name))) assert.NoError(t, err) } createStaticPodFile("spod.json", "spod-id-01", "spod-01") createStaticPodFile("spod2.json", "spod-id-02", "spod-02") createStaticPodFile("dir/spod.json", "spod-id-03", "spod-03") // same file name as first one to check for overwriting expectedStaticPodsNum := 2 // subdirectories are ignored by FileSource, hence only 2 err := zw.Close() assert.NoError(t, err) // create fake apiserver testApiServer := NewTestServer(t, api.NamespaceDefault, nil) defer testApiServer.server.Close() // temporary directory which is normally located in the executor sandbox staticPodsConfigPath, err := ioutil.TempDir("/tmp", "executor-k8sm-archive") assert.NoError(t, err) defer os.RemoveAll(staticPodsConfigPath) mockDriver := &MockExecutorDriver{} updates := make(chan interface{}, 1024) config := Config{ Docker: dockertools.ConnectToDockerOrDie("fake://"), Updates: make(chan interface{}, 1), // allow kube-executor source to proceed past init APIClient: client.NewOrDie(&client.Config{ Host: testApiServer.server.URL, Version: testapi.Default.Version(), }), Kubelet: &kubelet.Kubelet{}, PodStatusFunc: func(kl KubeletInterface, pod *api.Pod) (*api.PodStatus, error) { return &api.PodStatus{ ContainerStatuses: []api.ContainerStatus{ { Name: "foo", State: api.ContainerState{ Running: &api.ContainerStateRunning{}, }, }, }, Phase: api.PodRunning, }, nil }, StaticPodsConfigPath: staticPodsConfigPath, } executor := New(config) hostname := "h1" go executor.InitializeStaticPodsSource(func() { kconfig.NewSourceFile(staticPodsConfigPath, hostname, 1*time.Second, updates) }) // create ExecutorInfo with static pod zip in data field executorInfo := mesosutil.NewExecutorInfo( mesosutil.NewExecutorID("ex1"), mesosutil.NewCommandInfo("k8sm-executor"), ) executorInfo.Data = buf.Bytes() // start the executor with the static pod data executor.Init(mockDriver) executor.Registered(mockDriver, executorInfo, nil, nil) // wait for static pod to start seenPods := map[string]struct{}{} timeout := time.After(time.Second) defer mockDriver.AssertExpectations(t) for { // filter by PodUpdate type select { case <-timeout: t.Fatalf("Executor should send pod updates for %v pods, only saw %v", expectedStaticPodsNum, len(seenPods)) case update, ok := <-updates: if !ok { return } podUpdate, ok := update.(kubelet.PodUpdate) if !ok { continue } for _, pod := range podUpdate.Pods { seenPods[pod.Name] = struct{}{} } if len(seenPods) == expectedStaticPodsNum { return } } } }
// TestExecutorLaunchAndKillTask ensures that the executor is able to launch // and kill tasks while properly bookkeping its tasks. func TestExecutorLaunchAndKillTask(t *testing.T) { // create a fake pod watch. We use that below to submit new pods to the scheduler podListWatch := NewMockPodsListWatch(api.PodList{}) // create fake apiserver testApiServer := NewTestServer(t, api.NamespaceDefault, &podListWatch.list) defer testApiServer.server.Close() mockDriver := &MockExecutorDriver{} updates := make(chan interface{}, 1024) config := Config{ Docker: dockertools.ConnectToDockerOrDie("fake://"), Updates: updates, APIClient: client.NewOrDie(&client.Config{ Host: testApiServer.server.URL, Version: testapi.Default.Version(), }), Kubelet: &fakeKubelet{ Kubelet: &kubelet.Kubelet{}, hostIP: net.IPv4(127, 0, 0, 1), }, PodStatusFunc: func(kl KubeletInterface, pod *api.Pod) (*api.PodStatus, error) { return &api.PodStatus{ ContainerStatuses: []api.ContainerStatus{ { Name: "foo", State: api.ContainerState{ Running: &api.ContainerStateRunning{}, }, }, }, Phase: api.PodRunning, }, nil }, } executor := New(config) executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) select { case <-updates: case <-time.After(time.Second): t.Fatalf("Executor should send an initial update on Registration") } pod := NewTestPod(1) podTask, err := podtask.New(api.NewDefaultContext(), "", *pod, &mesosproto.ExecutorInfo{}) assert.Equal(t, nil, err, "must be able to create a task from a pod") taskInfo := podTask.BuildTaskInfo() data, err := testapi.Default.Codec().Encode(pod) assert.Equal(t, nil, err, "must be able to encode a pod's spec data") taskInfo.Data = data var statusUpdateCalls sync.WaitGroup statusUpdateDone := func(_ mock.Arguments) { statusUpdateCalls.Done() } statusUpdateCalls.Add(1) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_STARTING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once() statusUpdateCalls.Add(1) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_RUNNING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once() executor.LaunchTask(mockDriver, taskInfo) assertext.EventuallyTrue(t, 5*time.Second, func() bool { executor.lock.Lock() defer executor.lock.Unlock() return len(executor.tasks) == 1 && len(executor.pods) == 1 }, "executor must be able to create a task and a pod") gotPodUpdate := false select { case m := <-updates: update, ok := m.(kubelet.PodUpdate) if ok && len(update.Pods) == 1 { gotPodUpdate = true } case <-time.After(time.Second): } assert.Equal(t, true, gotPodUpdate, "the executor should send an update about a new pod to "+ "the updates chan when creating a new one.") // Allow some time for asynchronous requests to the driver. finished := kmruntime.After(statusUpdateCalls.Wait) select { case <-finished: case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for status update calls to finish") } statusUpdateCalls.Add(1) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_KILLED, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once() executor.KillTask(mockDriver, taskInfo.TaskId) assertext.EventuallyTrue(t, 5*time.Second, func() bool { executor.lock.Lock() defer executor.lock.Unlock() return len(executor.tasks) == 0 && len(executor.pods) == 0 }, "executor must be able to kill a created task and pod") // Allow some time for asynchronous requests to the driver. finished = kmruntime.After(statusUpdateCalls.Wait) select { case <-finished: case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for status update calls to finish") } mockDriver.AssertExpectations(t) }
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup // is not valid. It will not start any background processes, and does not include authentication/authorization func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) { hostNetworkSources, err := kubetypes.GetValidatedSources(s.HostNetworkSources) if err != nil { return nil, err } hostPIDSources, err := kubetypes.GetValidatedSources(s.HostPIDSources) if err != nil { return nil, err } hostIPCSources, err := kubetypes.GetValidatedSources(s.HostIPCSources) if err != nil { return nil, err } mounter := mount.New() var writer io.Writer = &io.StdWriter{} if s.Containerized { glog.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = mount.NewNsenterMounter() writer = &io.NsenterWriter{} } tlsOptions, err := InitializeTLS(s) if err != nil { return nil, err } var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": dockerExecHandler = &dockertools.NsenterExecHandler{} default: glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } imageGCPolicy := images.ImageGCPolicy{ MinAge: s.ImageMinimumGCAge.Duration, HighThresholdPercent: int(s.ImageGCHighThresholdPercent), LowThresholdPercent: int(s.ImageGCLowThresholdPercent), } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: int(s.LowDiskSpaceThresholdMB), RootFreeDiskMB: int(s.LowDiskSpaceThresholdMB), } manifestURLHeader := make(http.Header) if s.ManifestURLHeader != "" { pieces := strings.Split(s.ManifestURLHeader, ":") if len(pieces) != 2 { return nil, fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader) } manifestURLHeader.Set(pieces[0], pieces[1]) } reservation, err := parseReservation(s.KubeReserved, s.SystemReserved) if err != nil { return nil, err } thresholds, err := eviction.ParseThresholdConfig(s.EvictionHard, s.EvictionSoft, s.EvictionSoftGracePeriod, s.EvictionMinimumReclaim) if err != nil { return nil, err } evictionConfig := eviction.Config{ PressureTransitionPeriod: s.EvictionPressureTransitionPeriod.Duration, MaxPodGracePeriodSeconds: int64(s.EvictionMaxPodGracePeriod), Thresholds: thresholds, } return &KubeletConfig{ Address: net.ParseIP(s.Address), AllowPrivileged: s.AllowPrivileged, Auth: nil, // default does not enforce auth[nz] CAdvisorInterface: nil, // launches background processes, not set here VolumeStatsAggPeriod: s.VolumeStatsAggPeriod.Duration, CgroupRoot: s.CgroupRoot, Cloud: nil, // cloud provider might start background processes ClusterDNS: net.ParseIP(s.ClusterDNS), ClusterDomain: s.ClusterDomain, ConfigFile: s.Config, ConfigureCBR0: s.ConfigureCBR0, ContainerManager: nil, ContainerRuntime: s.ContainerRuntime, RuntimeRequestTimeout: s.RuntimeRequestTimeout.Duration, CPUCFSQuota: s.CPUCFSQuota, DiskSpacePolicy: diskSpacePolicy, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint, s.RuntimeRequestTimeout.Duration), // TODO(random-liu): Set RuntimeRequestTimeout for rkt. RuntimeCgroups: s.RuntimeCgroups, DockerExecHandler: dockerExecHandler, EnableControllerAttachDetach: s.EnableControllerAttachDetach, EnableCustomMetrics: s.EnableCustomMetrics, EnableDebuggingHandlers: s.EnableDebuggingHandlers, CgroupsPerQOS: s.CgroupsPerQOS, EnableServer: s.EnableServer, EventBurst: int(s.EventBurst), EventRecordQPS: float32(s.EventRecordQPS), FileCheckFrequency: s.FileCheckFrequency.Duration, HostnameOverride: s.HostnameOverride, HostNetworkSources: hostNetworkSources, HostPIDSources: hostPIDSources, HostIPCSources: hostIPCSources, HTTPCheckFrequency: s.HTTPCheckFrequency.Duration, ImageGCPolicy: imageGCPolicy, KubeClient: nil, ManifestURL: s.ManifestURL, ManifestURLHeader: manifestURLHeader, MasterServiceNamespace: s.MasterServiceNamespace, MaxContainerCount: int(s.MaxContainerCount), MaxOpenFiles: uint64(s.MaxOpenFiles), MaxPerPodContainerCount: int(s.MaxPerPodContainerCount), MaxPods: int(s.MaxPods), NvidiaGPUs: int(s.NvidiaGPUs), MinimumGCAge: s.MinimumGCAge.Duration, Mounter: mounter, NetworkPluginName: s.NetworkPluginName, NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir), NodeLabels: s.NodeLabels, NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency.Duration, NonMasqueradeCIDR: s.NonMasqueradeCIDR, OOMAdjuster: oom.NewOOMAdjuster(), OSInterface: kubecontainer.RealOS{}, PodCIDR: s.PodCIDR, ReconcileCIDR: s.ReconcileCIDR, PodInfraContainerImage: s.PodInfraContainerImage, Port: uint(s.Port), ReadOnlyPort: uint(s.ReadOnlyPort), RegisterNode: s.RegisterNode, RegisterSchedulable: s.RegisterSchedulable, RegistryBurst: int(s.RegistryBurst), RegistryPullQPS: float64(s.RegistryPullQPS), ResolverConfig: s.ResolverConfig, Reservation: *reservation, KubeletCgroups: s.KubeletCgroups, RktPath: s.RktPath, RktAPIEndpoint: s.RktAPIEndpoint, RktStage1Image: s.RktStage1Image, RootDirectory: s.RootDirectory, SeccompProfileRoot: s.SeccompProfileRoot, Runonce: s.RunOnce, SerializeImagePulls: s.SerializeImagePulls, StandaloneMode: (len(s.APIServerList) == 0), StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout.Duration, SyncFrequency: s.SyncFrequency.Duration, SystemCgroups: s.SystemCgroups, TLSOptions: tlsOptions, Writer: writer, VolumePlugins: ProbeVolumePlugins(s.VolumePluginDir), OutOfDiskTransitionFrequency: s.OutOfDiskTransitionFrequency.Duration, HairpinMode: s.HairpinMode, BabysitDaemons: s.BabysitDaemons, ExperimentalFlannelOverlay: s.ExperimentalFlannelOverlay, NodeIP: net.ParseIP(s.NodeIP), EvictionConfig: evictionConfig, PodsPerCore: int(s.PodsPerCore), }, nil }
// Run runs the specified KubeletExecutorServer. func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error { rand.Seed(time.Now().UTC().UnixNano()) oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { log.Info(err) } // derive the executor cgroup and use it as docker container cgroup root mesosCgroup := findMesosCgroup(s.cgroupPrefix) s.cgroupRoot = mesosCgroup log.V(2).Infof("passing cgroup %q to the kubelet as cgroup root", s.CgroupRoot) // empty string for the docker and system containers (= cgroup paths). This // stops the kubelet taking any control over other system processes. s.SystemContainer = "" s.DockerDaemonContainer = "" // We set kubelet container to its own cgroup below the executor cgroup. // In contrast to the docker and system container, this has no other // undesired side-effects. s.ResourceContainer = mesosCgroup + "/kubelet" // create apiserver client var apiclient *client.Client clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { apiclient, err = client.New(clientConfig) } if err != nil { // required for k8sm since we need to send api.Binding information // back to the apiserver log.Fatalf("No API client: %v", err) } log.Infof("Using root directory: %v", s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) shutdownCloser, err := s.syncExternalShutdownWatcher() if err != nil { return err } cadvisorInterface, err := cadvisor.New(s.CadvisorPort) if err != nil { return err } imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } //TODO(jdef) intentionally NOT initializing a cloud provider here since: //(a) the kubelet doesn't actually use it //(b) we don't need to create N-kubelet connections to zookeeper for no good reason //cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) //log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err } tlsOptions, err := s.InitializeTLS() if err != nil { return err } mounter := mount.New() if s.Containerized { log.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = &mount.NsenterMounter{} } var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": dockerExecHandler = &dockertools.NsenterExecHandler{} default: log.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } kcfg := app.KubeletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, HostNetworkSources: hostNetworkSources, HostnameOverride: s.HostnameOverride, RootDirectory: s.RootDirectory, // ConfigFile: "" // ManifestURL: "" FileCheckFrequency: s.FileCheckFrequency, // HTTPCheckFrequency PodInfraContainerImage: s.PodInfraContainerImage, SyncFrequency: s.SyncFrequency, RegistryPullQPS: s.RegistryPullQPS, RegistryBurst: s.RegistryBurst, MinimumGCAge: s.MinimumGCAge, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxContainerCount: s.MaxContainerCount, RegisterNode: s.RegisterNode, // StandaloneMode: false ClusterDomain: s.ClusterDomain, ClusterDNS: s.ClusterDNS, Runonce: s.RunOnce, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, CadvisorInterface: cadvisorInterface, EnableServer: s.EnableServer, EnableDebuggingHandlers: s.EnableDebuggingHandlers, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), KubeClient: apiclient, MasterServiceNamespace: s.MasterServiceNamespace, VolumePlugins: app.ProbeVolumePlugins(), NetworkPlugins: app.ProbeNetworkPlugins(s.NetworkPluginDir), NetworkPluginName: s.NetworkPluginName, StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, TLSOptions: tlsOptions, ImageGCPolicy: imageGCPolicy, DiskSpacePolicy: diskSpacePolicy, Cloud: nil, // TODO(jdef) Cloud, specifying null here because we don't want all kubelets polling mesos-master; need to account for this in the cloudprovider impl NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, ResourceContainer: s.ResourceContainer, CgroupRoot: s.cgroupRoot, ContainerRuntime: s.ContainerRuntime, Mounter: mounter, DockerDaemonContainer: s.DockerDaemonContainer, SystemContainer: s.SystemContainer, ConfigureCBR0: s.ConfigureCBR0, MaxPods: s.MaxPods, DockerExecHandler: dockerExecHandler, } kcfg.NodeName = kcfg.Hostname err = app.RunKubelet(&kcfg, app.KubeletBuilder(func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) { return s.createAndInitKubelet(kc, hks, clientConfig, shutdownCloser) })) if err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Until(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { log.Errorf("Starting health server failed: %v", err) } }, 5*time.Second, util.NeverStop) } // block until executor is shut down or commits shutdown select {} }
func (s *KubeletExecutorServer) runExecutor(execUpdates chan<- kubetypes.PodUpdate, nodeInfos chan<- executor.NodeInfo, kubeletFinished <-chan struct{}, staticPodsConfigPath string, apiclient *client.Client) error { exec := executor.New(executor.Config{ Updates: execUpdates, APIClient: apiclient, Docker: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), SuicideTimeout: s.SuicideTimeout, KubeletFinished: kubeletFinished, ExitFunc: os.Exit, PodStatusFunc: func(pod *api.Pod) (*api.PodStatus, error) { s.kletLock.Lock() defer s.kletLock.Unlock() if s.klet == nil { return nil, fmt.Errorf("PodStatucFunc called before kubelet is initialized") } status, err := s.klet.GetRuntime().GetPodStatus(pod) if err != nil { return nil, err } status.Phase = kubelet.GetPhase(&pod.Spec, status.ContainerStatuses) hostIP, err := s.klet.GetHostIP() if err != nil { log.Errorf("Cannot get host IP: %v", err) } else { status.HostIP = hostIP.String() } return status, nil }, StaticPodsConfigPath: staticPodsConfigPath, PodLW: cache.NewListWatchFromClient(apiclient, "pods", api.NamespaceAll, fields.OneTermEqualSelector(client.PodHost, s.HostnameOverride), ), NodeInfos: nodeInfos, }) // initialize driver and initialize the executor with it dconfig := bindings.DriverConfig{ Executor: exec, HostnameOverride: s.HostnameOverride, BindingAddress: s.Address, } driver, err := bindings.NewMesosExecutorDriver(dconfig) if err != nil { return fmt.Errorf("failed to create executor driver: %v", err) } log.V(2).Infof("Initialize executor driver...") exec.Init(driver) // start the driver go func() { if _, err := driver.Run(); err != nil { log.Fatalf("executor driver failed: %v", err) } log.Info("executor Run completed") }() return nil }
// TestExecutorLaunchAndKillTask ensures that the executor is able to launch tasks and generates // appropriate status messages for mesos. It then kills the task and validates that appropriate // actions are taken by the executor. func TestExecutorLaunchAndKillTask(t *testing.T) { var ( mockDriver = &MockExecutorDriver{} registry = newFakeRegistry() executor = New(Config{ Docker: dockertools.ConnectToDockerOrDie("fake://", 0), NodeInfos: make(chan NodeInfo, 1), Registry: registry, }) mockKubeAPI = &mockKubeAPI{} pod = NewTestPod(1) executorinfo = &mesosproto.ExecutorInfo{} ) executor.kubeAPI = mockKubeAPI executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) podTask, err := podtask.New( api.NewDefaultContext(), podtask.Config{ Prototype: executorinfo, HostPortStrategy: hostport.StrategyWildcard, }, pod, ) assert.Equal(t, nil, err, "must be able to create a task from a pod") pod.Annotations = map[string]string{ "k8s.mesosphere.io/taskId": podTask.ID, } podTask.Spec = &podtask.Spec{Executor: executorinfo} taskInfo, err := podTask.BuildTaskInfo() assert.Equal(t, nil, err, "must be able to build task info") data, err := runtime.Encode(testapi.Default.Codec(), pod) assert.Equal(t, nil, err, "must be able to encode a pod's spec data") taskInfo.Data = data var statusUpdateCalls sync.WaitGroup statusUpdateCalls.Add(1) statusUpdateDone := func(_ mock.Arguments) { statusUpdateCalls.Done() } mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_STARTING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once() statusUpdateCalls.Add(1) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_RUNNING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once() executor.LaunchTask(mockDriver, taskInfo) assertext.EventuallyTrue(t, wait.ForeverTestTimeout, func() bool { executor.lock.Lock() defer executor.lock.Unlock() return !registry.empty() }, "executor must be able to create a task and a pod") // simulate a pod source update; normally this update is generated when binding a pod err = registry.phaseChange(pod, api.PodPending) assert.NoError(t, err) // simulate a pod source update; normally this update is generated by the kubelet once the pod is healthy err = registry.phaseChange(pod, api.PodRunning) assert.NoError(t, err) // Allow some time for asynchronous requests to the driver. finished := kmruntime.After(statusUpdateCalls.Wait) select { case <-finished: case <-time.After(wait.ForeverTestTimeout): t.Fatalf("timed out waiting for status update calls to finish") } statusUpdateCalls.Add(1) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_KILLED, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once() // simulate what happens when the apiserver is told to delete a pod mockKubeAPI.On("killPod", pod.Namespace, pod.Name).Return(nil).Run(func(_ mock.Arguments) { registry.Remove(podTask.ID) }) executor.KillTask(mockDriver, taskInfo.TaskId) assertext.EventuallyTrue(t, wait.ForeverTestTimeout, func() bool { executor.lock.Lock() defer executor.lock.Unlock() return registry.empty() }, "executor must be able to kill a created task and pod") // Allow some time for asynchronous requests to the driver. finished = kmruntime.After(statusUpdateCalls.Wait) select { case <-finished: case <-time.After(wait.ForeverTestTimeout): t.Fatalf("timed out waiting for status update calls to finish") } mockDriver.AssertExpectations(t) mockKubeAPI.AssertExpectations(t) }
// TestExecutorFrameworkMessage ensures that the executor is able to // handle messages from the framework, specifically about lost tasks // and Kamikaze. When a task is lost, the executor needs to clean up // its state. When a Kamikaze message is received, the executor should // attempt suicide. func TestExecutorFrameworkMessage(t *testing.T) { // TODO(jdef): Fix the unexpected call in the mocking system. t.Skip("This test started failing when panic catching was disabled.") var ( mockDriver = &MockExecutorDriver{} kubeletFinished = make(chan struct{}) registry = newFakeRegistry() executor = New(Config{ Docker: dockertools.ConnectToDockerOrDie("fake://", 0), NodeInfos: make(chan NodeInfo, 1), ShutdownAlert: func() { close(kubeletFinished) }, KubeletFinished: kubeletFinished, Registry: registry, }) pod = NewTestPod(1) mockKubeAPI = &mockKubeAPI{} ) executor.kubeAPI = mockKubeAPI executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) executor.FrameworkMessage(mockDriver, "test framework message") // set up a pod to then lose executorinfo := &mesosproto.ExecutorInfo{} podTask, _ := podtask.New( api.NewDefaultContext(), podtask.Config{ ID: "foo", Prototype: executorinfo, HostPortStrategy: hostport.StrategyWildcard, }, pod, ) pod.Annotations = map[string]string{ "k8s.mesosphere.io/taskId": podTask.ID, } podTask.Spec = &podtask.Spec{ Executor: executorinfo, } taskInfo, err := podTask.BuildTaskInfo() assert.Equal(t, nil, err, "must be able to build task info") data, _ := runtime.Encode(testapi.Default.Codec(), pod) taskInfo.Data = data mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_STARTING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Once() called := make(chan struct{}) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_RUNNING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once() executor.LaunchTask(mockDriver, taskInfo) // must wait for this otherwise phase changes may not apply assertext.EventuallyTrue(t, wait.ForeverTestTimeout, func() bool { executor.lock.Lock() defer executor.lock.Unlock() return !registry.empty() }, "executor must be able to create a task and a pod") err = registry.phaseChange(pod, api.PodPending) assert.NoError(t, err) err = registry.phaseChange(pod, api.PodRunning) assert.NoError(t, err) // waiting until the pod is really running b/c otherwise a TASK_FAILED could be // triggered by the asynchronously running executor methods when removing the task // from k.tasks through the "task-lost:foo" message below. select { case <-called: case <-time.After(wait.ForeverTestTimeout): t.Fatalf("timed out waiting for SendStatusUpdate for the running task") } // send task-lost message for it called = make(chan struct{}) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_LOST, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once() // simulate what happens when the apiserver is told to delete a pod mockKubeAPI.On("killPod", pod.Namespace, pod.Name).Return(nil).Run(func(_ mock.Arguments) { registry.Remove(podTask.ID) }) executor.FrameworkMessage(mockDriver, "task-lost:foo") assertext.EventuallyTrue(t, wait.ForeverTestTimeout, func() bool { executor.lock.Lock() defer executor.lock.Unlock() return registry.empty() }, "executor must be able to kill a created task and pod") select { case <-called: case <-time.After(wait.ForeverTestTimeout): t.Fatalf("timed out waiting for SendStatusUpdate") } mockDriver.On("Stop").Return(mesosproto.Status_DRIVER_STOPPED, nil).Once() executor.FrameworkMessage(mockDriver, messages.Kamikaze) assert.Equal(t, true, executor.isDone(), "executor should have shut down after receiving a Kamikaze message") mockDriver.AssertExpectations(t) mockKubeAPI.AssertExpectations(t) }
// Run runs the specified KubeletExecutorServer. func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error { rand.Seed(time.Now().UTC().UnixNano()) oomAdjuster := oom.NewOOMAdjuster() if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil { log.Info(err) } // empty string for the docker and system containers (= cgroup paths). This // stops the kubelet taking any control over other system processes. s.SystemContainer = "" s.DockerDaemonContainer = "" // create apiserver client var apiclient *client.Client clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { apiclient, err = client.New(clientConfig) } if err != nil { // required for k8sm since we need to send api.Binding information // back to the apiserver log.Fatalf("No API client: %v", err) } log.Infof("Using root directory: %v", s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) cAdvisorInterface, err := cadvisor.New(s.CAdvisorPort) if err != nil { return err } imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } //TODO(jdef) intentionally NOT initializing a cloud provider here since: //(a) the kubelet doesn't actually use it //(b) we don't need to create N-kubelet connections to zookeeper for no good reason //cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) //log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err } hostPIDSources, err := kubelet.GetValidatedSources(strings.Split(s.HostPIDSources, ",")) if err != nil { return err } hostIPCSources, err := kubelet.GetValidatedSources(strings.Split(s.HostIPCSources, ",")) if err != nil { return err } tlsOptions, err := s.InitializeTLS() if err != nil { return err } mounter := mount.New() if s.Containerized { log.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = &mount.NsenterMounter{} } var writer utilio.Writer = &utilio.StdWriter{} var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": writer = &utilio.NsenterWriter{} dockerExecHandler = &dockertools.NsenterExecHandler{} default: log.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } manifestURLHeader := make(http.Header) if s.ManifestURLHeader != "" { pieces := strings.Split(s.ManifestURLHeader, ":") if len(pieces) != 2 { return fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader) } manifestURLHeader.Set(pieces[0], pieces[1]) } kcfg := app.KubeletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, CAdvisorInterface: cAdvisorInterface, CgroupRoot: s.CgroupRoot, Cloud: nil, // TODO(jdef) Cloud, specifying null here because we don't want all kubelets polling mesos-master; need to account for this in the cloudprovider impl ClusterDNS: s.ClusterDNS, ClusterDomain: s.ClusterDomain, // ConfigFile: "" ConfigureCBR0: s.ConfigureCBR0, ContainerRuntime: s.ContainerRuntime, CPUCFSQuota: s.CPUCFSQuota, DiskSpacePolicy: diskSpacePolicy, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), DockerDaemonContainer: s.DockerDaemonContainer, DockerExecHandler: dockerExecHandler, EnableDebuggingHandlers: s.EnableDebuggingHandlers, EnableServer: s.EnableServer, EventBurst: s.EventBurst, EventRecordQPS: s.EventRecordQPS, FileCheckFrequency: s.FileCheckFrequency, HostnameOverride: s.HostnameOverride, HostNetworkSources: hostNetworkSources, HostPIDSources: hostPIDSources, HostIPCSources: hostIPCSources, // HTTPCheckFrequency ImageGCPolicy: imageGCPolicy, KubeClient: apiclient, // ManifestURL: "" ManifestURLHeader: manifestURLHeader, MasterServiceNamespace: s.MasterServiceNamespace, MaxContainerCount: s.MaxContainerCount, MaxOpenFiles: s.MaxOpenFiles, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxPods: s.MaxPods, MinimumGCAge: s.MinimumGCAge, Mounter: mounter, NetworkPluginName: s.NetworkPluginName, NetworkPlugins: app.ProbeNetworkPlugins(s.NetworkPluginDir), NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, OOMAdjuster: oomAdjuster, OSInterface: kubecontainer.RealOS{}, PodCIDR: s.PodCIDR, PodInfraContainerImage: s.PodInfraContainerImage, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, RegisterNode: s.RegisterNode, RegistryBurst: s.RegistryBurst, RegistryPullQPS: s.RegistryPullQPS, ResolverConfig: s.ResolverConfig, ResourceContainer: s.ResourceContainer, RootDirectory: s.RootDirectory, Runonce: s.RunOnce, // StandaloneMode: false StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, SyncFrequency: s.SyncFrequency, SystemContainer: s.SystemContainer, TLSOptions: tlsOptions, VolumePlugins: app.ProbeVolumePlugins(), Writer: writer, } kcfg.NodeName = kcfg.Hostname kcfg.Builder = app.KubeletBuilder(func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) { return s.createAndInitKubelet(kc, hks, clientConfig) }) err = app.RunKubelet(&kcfg) if err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Until(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { log.Errorf("Starting health server failed: %v", err) } }, 5*time.Second, util.NeverStop) } // block until executor is shut down or commits shutdown select {} }
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup // is not valid. It will not start any background processes, and does not include authentication/authorization func (s *KubeletServer) UnsecuredKubeletConfig() (*KubeletConfig, error) { hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return nil, err } hostPIDSources, err := kubelet.GetValidatedSources(strings.Split(s.HostPIDSources, ",")) if err != nil { return nil, err } hostIPCSources, err := kubelet.GetValidatedSources(strings.Split(s.HostIPCSources, ",")) if err != nil { return nil, err } mounter := mount.New() if s.Containerized { glog.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = mount.NewNsenterMounter() } tlsOptions, err := s.InitializeTLS() if err != nil { return nil, err } var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": dockerExecHandler = &dockertools.NsenterExecHandler{} default: glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } manifestURLHeader := make(http.Header) if s.ManifestURLHeader != "" { pieces := strings.Split(s.ManifestURLHeader, ":") if len(pieces) != 2 { return nil, fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader) } manifestURLHeader.Set(pieces[0], pieces[1]) } return &KubeletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, CadvisorInterface: nil, // launches background processes, not set here Auth: nil, // default does not enforce auth[nz] CgroupRoot: s.CgroupRoot, Cloud: nil, // cloud provider might start background processes ClusterDNS: s.ClusterDNS, ClusterDomain: s.ClusterDomain, ConfigFile: s.Config, ConfigureCBR0: s.ConfigureCBR0, ContainerRuntime: s.ContainerRuntime, CPUCFSQuota: s.CPUCFSQuota, DiskSpacePolicy: diskSpacePolicy, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), DockerDaemonContainer: s.DockerDaemonContainer, DockerExecHandler: dockerExecHandler, EnableDebuggingHandlers: s.EnableDebuggingHandlers, EnableServer: s.EnableServer, EventBurst: s.EventBurst, EventRecordQPS: s.EventRecordQPS, FileCheckFrequency: s.FileCheckFrequency, HostnameOverride: s.HostnameOverride, HostNetworkSources: hostNetworkSources, HostPIDSources: hostPIDSources, HostIPCSources: hostIPCSources, HTTPCheckFrequency: s.HTTPCheckFrequency, ImageGCPolicy: imageGCPolicy, KubeClient: nil, ManifestURL: s.ManifestURL, ManifestURLHeader: manifestURLHeader, MasterServiceNamespace: s.MasterServiceNamespace, MaxContainerCount: s.MaxContainerCount, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxPods: s.MaxPods, MinimumGCAge: s.MinimumGCAge, Mounter: mounter, NetworkPluginName: s.NetworkPluginName, NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir), NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, OSInterface: kubecontainer.RealOS{}, PodCIDR: s.PodCIDR, PodInfraContainerImage: s.PodInfraContainerImage, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, RegisterNode: s.RegisterNode, RegistryBurst: s.RegistryBurst, RegistryPullQPS: s.RegistryPullQPS, ResolverConfig: s.ResolverConfig, ResourceContainer: s.ResourceContainer, RktPath: s.RktPath, RktStage1Image: s.RktStage1Image, RootDirectory: s.RootDirectory, Runonce: s.RunOnce, StandaloneMode: (len(s.APIServerList) == 0), StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, SyncFrequency: s.SyncFrequency, SystemContainer: s.SystemContainer, TLSOptions: tlsOptions, VolumePlugins: ProbeVolumePlugins(), }, nil }
// Run runs the specified KubeletServer. This should never exit. func (s *KubeletServer) Run(_ []string) error { util.ReallyCrash = s.ReallyCrashForTesting rand.Seed(time.Now().UTC().UnixNano()) // TODO(vmarmol): Do this through container config. if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.Warning(err) } var apiclient *client.Client clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { apiclient, err = client.New(clientConfig) } if err != nil && len(s.APIServerList) > 0 { glog.Warningf("No API client: %v", err) } glog.V(2).Infof("Using root directory: %v", s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) cadvisorInterface, err := cadvisor.New(s.CadvisorPort) if err != nil { return err } imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { return err } glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) manifestURLHeader := make(http.Header) if s.ManifestURLHeader != "" { pieces := strings.Split(s.ManifestURLHeader, ":") if len(pieces) != 2 { return fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader) } manifestURLHeader.Set(pieces[0], pieces[1]) } hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err } tlsOptions, err := s.InitializeTLS() if err != nil { return err } mounter := mount.New() if s.Containerized { glog.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = mount.NewNsenterMounter() } var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": dockerExecHandler = &dockertools.NsenterExecHandler{} default: glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } kcfg := KubeletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, HostNetworkSources: hostNetworkSources, HostnameOverride: s.HostnameOverride, RootDirectory: s.RootDirectory, ConfigFile: s.Config, ManifestURL: s.ManifestURL, ManifestURLHeader: manifestURLHeader, FileCheckFrequency: s.FileCheckFrequency, HTTPCheckFrequency: s.HTTPCheckFrequency, PodInfraContainerImage: s.PodInfraContainerImage, SyncFrequency: s.SyncFrequency, RegistryPullQPS: s.RegistryPullQPS, RegistryBurst: s.RegistryBurst, MinimumGCAge: s.MinimumGCAge, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxContainerCount: s.MaxContainerCount, RegisterNode: s.RegisterNode, StandaloneMode: (len(s.APIServerList) == 0), ClusterDomain: s.ClusterDomain, ClusterDNS: s.ClusterDNS, Runonce: s.RunOnce, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, CadvisorInterface: cadvisorInterface, EnableServer: s.EnableServer, EnableDebuggingHandlers: s.EnableDebuggingHandlers, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), KubeClient: apiclient, MasterServiceNamespace: s.MasterServiceNamespace, VolumePlugins: ProbeVolumePlugins(), NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir), NetworkPluginName: s.NetworkPluginName, StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, TLSOptions: tlsOptions, ImageGCPolicy: imageGCPolicy, DiskSpacePolicy: diskSpacePolicy, Cloud: cloud, NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, ResourceContainer: s.ResourceContainer, CgroupRoot: s.CgroupRoot, ContainerRuntime: s.ContainerRuntime, Mounter: mounter, DockerDaemonContainer: s.DockerDaemonContainer, SystemContainer: s.SystemContainer, ConfigureCBR0: s.ConfigureCBR0, PodCIDR: s.PodCIDR, MaxPods: s.MaxPods, DockerExecHandler: dockerExecHandler, } if err := RunKubelet(&kcfg, nil); err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Forever(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } if s.RunOnce { return nil } // run forever select {} }