func TestDeleteOne_Running(t *testing.T) { assert := assert.New(t) obj := &MockScheduler{} reg := podtask.NewInMemoryRegistry() obj.On("tasks").Return(reg) pod := &Pod{Pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", UID: "foo0", Namespace: api.NamespaceDefault, }}} task, err := reg.Register(podtask.New(api.NewDefaultContext(), "bar", *pod.Pod, &mesos.ExecutorInfo{})) if err != nil { t.Fatalf("unexpected error: %v", err) } task.Set(podtask.Launched) err = reg.Update(task) if err != nil { t.Fatalf("unexpected error: %v", err) } // preconditions qr := newQueuer(nil) qr.podQueue.Add(pod, queue.ReplaceExisting) assert.Equal(1, len(qr.podQueue.List())) _, found := qr.podQueue.Get("default/foo") assert.True(found) obj.On("killTask", task.ID).Return(nil) // exec & post conditions d := &deleter{ api: obj, qr: qr, } err = d.deleteOne(pod) assert.Nil(err) _, found = qr.podQueue.Get("foo0") assert.False(found) assert.Equal(0, len(qr.podQueue.List())) obj.AssertExpectations(t) }
// TestExecutorFrameworkMessage ensures that the executor is able to // handle messages from the framework, specifically about lost tasks // and Kamikaze. When a task is lost, the executor needs to clean up // its state. When a Kamikaze message is received, the executor should // attempt suicide. func TestExecutorFrameworkMessage(t *testing.T) { // create fake apiserver podListWatch := NewMockPodsListWatch(api.PodList{}) testApiServer := NewTestServer(t, api.NamespaceDefault, &podListWatch.list) defer testApiServer.server.Close() // create and start executor mockDriver := &MockExecutorDriver{} kubeletFinished := make(chan struct{}) config := Config{ Docker: dockertools.ConnectToDockerOrDie("fake://"), Updates: make(chan interface{}, 1024), APIClient: client.NewOrDie(&client.Config{ Host: testApiServer.server.URL, Version: testapi.Version(), }), Kubelet: &fakeKubelet{ Kubelet: &kubelet.Kubelet{}, hostIP: net.IPv4(127, 0, 0, 1), }, PodStatusFunc: func(kl KubeletInterface, pod *api.Pod) (*api.PodStatus, error) { return &api.PodStatus{ ContainerStatuses: []api.ContainerStatus{ { Name: "foo", State: api.ContainerState{ Running: &api.ContainerStateRunning{}, }, }, }, Phase: api.PodRunning, }, nil }, ShutdownAlert: func() { close(kubeletFinished) }, KubeletFinished: kubeletFinished, } executor := New(config) executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) executor.FrameworkMessage(mockDriver, "test framework message") // set up a pod to then lose pod := NewTestPod(1) podTask, _ := podtask.New(api.NewDefaultContext(), "foo", *pod, &mesosproto.ExecutorInfo{}) taskInfo := podTask.BuildTaskInfo() data, _ := testapi.Codec().Encode(pod) taskInfo.Data = data mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_STARTING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Once() called := make(chan struct{}) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_RUNNING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once() executor.LaunchTask(mockDriver, taskInfo) // waiting until the pod is really running b/c otherwise a TASK_FAILED could be // triggered by the asynchronously running _launchTask, __launchTask methods // when removing the task from k.tasks through the "task-lost:foo" message below. select { case <-called: case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for SendStatusUpdate for the running task") } // send task-lost message for it called = make(chan struct{}) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_LOST, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once() executor.FrameworkMessage(mockDriver, "task-lost:foo") assertext.EventuallyTrue(t, 5*time.Second, func() bool { executor.lock.Lock() defer executor.lock.Unlock() return len(executor.tasks) == 0 && len(executor.pods) == 0 }, "executor must be able to kill a created task and pod") select { case <-called: case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for SendStatusUpdate") } mockDriver.On("Stop").Return(mesosproto.Status_DRIVER_STOPPED, nil).Once() executor.FrameworkMessage(mockDriver, messages.Kamikaze) assert.Equal(t, true, executor.isDone(), "executor should have shut down after receiving a Kamikaze message") mockDriver.AssertExpectations(t) }
// TestExecutorLaunchAndKillTask ensures that the executor is able to launch // and kill tasks while properly bookkeping its tasks. func TestExecutorLaunchAndKillTask(t *testing.T) { // create a fake pod watch. We use that below to submit new pods to the scheduler podListWatch := NewMockPodsListWatch(api.PodList{}) // create fake apiserver testApiServer := NewTestServer(t, api.NamespaceDefault, &podListWatch.list) defer testApiServer.server.Close() mockDriver := &MockExecutorDriver{} updates := make(chan interface{}, 1024) config := Config{ Docker: dockertools.ConnectToDockerOrDie("fake://"), Updates: updates, APIClient: client.NewOrDie(&client.Config{ Host: testApiServer.server.URL, Version: testapi.Version(), }), Kubelet: &fakeKubelet{ Kubelet: &kubelet.Kubelet{}, hostIP: net.IPv4(127, 0, 0, 1), }, PodStatusFunc: func(kl KubeletInterface, pod *api.Pod) (*api.PodStatus, error) { return &api.PodStatus{ ContainerStatuses: []api.ContainerStatus{ { Name: "foo", State: api.ContainerState{ Running: &api.ContainerStateRunning{}, }, }, }, Phase: api.PodRunning, }, nil }, } executor := New(config) executor.Init(mockDriver) executor.Registered(mockDriver, nil, nil, nil) select { case <-updates: case <-time.After(time.Second): t.Fatalf("Executor should send an intial update on Registration") } pod := NewTestPod(1) podTask, err := podtask.New(api.NewDefaultContext(), "", *pod, &mesosproto.ExecutorInfo{}) assert.Equal(t, nil, err, "must be able to create a task from a pod") taskInfo := podTask.BuildTaskInfo() data, err := testapi.Codec().Encode(pod) assert.Equal(t, nil, err, "must be able to encode a pod's spec data") taskInfo.Data = data var statusUpdateCalls sync.WaitGroup statusUpdateDone := func(_ mock.Arguments) { statusUpdateCalls.Done() } statusUpdateCalls.Add(1) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_STARTING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once() statusUpdateCalls.Add(1) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_RUNNING, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once() executor.LaunchTask(mockDriver, taskInfo) assertext.EventuallyTrue(t, 5*time.Second, func() bool { executor.lock.Lock() defer executor.lock.Unlock() return len(executor.tasks) == 1 && len(executor.pods) == 1 }, "executor must be able to create a task and a pod") gotPodUpdate := false select { case m := <-updates: update, ok := m.(kubelet.PodUpdate) if ok && len(update.Pods) == 1 { gotPodUpdate = true } case <-time.After(time.Second): } assert.Equal(t, true, gotPodUpdate, "the executor should send an update about a new pod to "+ "the updates chan when creating a new one.") // Allow some time for asynchronous requests to the driver. finished := kmruntime.After(statusUpdateCalls.Wait) select { case <-finished: case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for status update calls to finish") } statusUpdateCalls.Add(1) mockDriver.On( "SendStatusUpdate", mesosproto.TaskState_TASK_KILLED, ).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once() executor.KillTask(mockDriver, taskInfo.TaskId) assertext.EventuallyTrue(t, 5*time.Second, func() bool { executor.lock.Lock() defer executor.lock.Unlock() return len(executor.tasks) == 0 && len(executor.pods) == 0 }, "executor must be able to kill a created task and pod") // Allow some time for asynchronous requests to the driver. finished = kmruntime.After(statusUpdateCalls.Wait) select { case <-finished: case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for status update calls to finish") } mockDriver.AssertExpectations(t) }
func (k *k8smScheduler) createPodTask(ctx api.Context, pod *api.Pod) (*podtask.T, error) { return podtask.New(ctx, "", *pod, k.internal.executor) }