func (s *KubeletExecutorServer) runExecutor(execUpdates chan<- kubetypes.PodUpdate, nodeInfos chan<- executor.NodeInfo, kubeletFinished <-chan struct{}, staticPodsConfigPath string, apiclient *client.Client) error { exec := executor.New(executor.Config{ Updates: execUpdates, APIClient: apiclient, Docker: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), SuicideTimeout: s.SuicideTimeout, KubeletFinished: kubeletFinished, ExitFunc: os.Exit, PodStatusFunc: func(pod *api.Pod) (*api.PodStatus, error) { s.kletLock.Lock() defer s.kletLock.Unlock() if s.klet == nil { return nil, fmt.Errorf("PodStatucFunc called before kubelet is initialized") } status, err := s.klet.GetRuntime().GetPodStatus(pod) if err != nil { return nil, err } status.Phase = kubelet.GetPhase(&pod.Spec, status.ContainerStatuses) hostIP, err := s.klet.GetHostIP() if err != nil { log.Errorf("Cannot get host IP: %v", err) } else { status.HostIP = hostIP.String() } return status, nil }, StaticPodsConfigPath: staticPodsConfigPath, PodLW: cache.NewListWatchFromClient(apiclient, "pods", api.NamespaceAll, fields.OneTermEqualSelector(client.PodHost, s.HostnameOverride), ), NodeInfos: nodeInfos, }) // initialize driver and initialize the executor with it dconfig := bindings.DriverConfig{ Executor: exec, HostnameOverride: s.HostnameOverride, BindingAddress: s.Address, } driver, err := bindings.NewMesosExecutorDriver(dconfig) if err != nil { return fmt.Errorf("failed to create executor driver: %v", err) } log.V(2).Infof("Initialize executor driver...") exec.Init(driver) // start the driver go func() { if _, err := driver.Run(); err != nil { log.Fatalf("executor driver failed: %v", err) } log.Info("executor Run completed") }() return nil }
// async continuation of LaunchTask func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId string, pod *api.Pod) { deleteTask := func() { k.lock.Lock() defer k.lock.Unlock() delete(k.tasks, taskId) k.resetSuicideWatch(driver) } // TODO(k8s): use Pods interface for binding once clusters are upgraded // return b.Pods(binding.Namespace).Bind(binding) if pod.Spec.NodeName == "" { //HACK(jdef): cloned binding construction from k8s plugin/pkg/scheduler/scheduler.go binding := &api.Binding{ ObjectMeta: api.ObjectMeta{ Namespace: pod.Namespace, Name: pod.Name, Annotations: make(map[string]string), }, Target: api.ObjectReference{ Kind: "Node", Name: pod.Annotations[meta.BindingHostKey], }, } // forward the annotations that the scheduler wants to apply for k, v := range pod.Annotations { binding.Annotations[k] = v } // create binding on apiserver log.Infof("Binding '%v/%v' to '%v' with annotations %+v...", pod.Namespace, pod.Name, binding.Target.Name, binding.Annotations) ctx := api.WithNamespace(api.NewContext(), binding.Namespace) err := k.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error() if err != nil { deleteTask() k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, messages.CreateBindingFailure)) return } } else { // post annotations update to apiserver patch := struct { Metadata struct { Annotations map[string]string `json:"annotations"` } `json:"metadata"` }{} patch.Metadata.Annotations = pod.Annotations patchJson, _ := json.Marshal(patch) log.V(4).Infof("Patching annotations %v of pod %v/%v: %v", pod.Annotations, pod.Namespace, pod.Name, string(patchJson)) err := k.client.Patch(api.MergePatchType).RequestURI(pod.SelfLink).Body(patchJson).Do().Error() if err != nil { log.Errorf("Error updating annotations of ready-to-launch pod %v/%v: %v", pod.Namespace, pod.Name, err) deleteTask() k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, messages.AnnotationUpdateFailure)) return } } podFullName := container.GetPodFullName(pod) // allow a recently failed-over scheduler the chance to recover the task/pod binding: // it may have failed and recovered before the apiserver is able to report the updated // binding information. replays of this status event will signal to the scheduler that // the apiserver should be up-to-date. data, err := json.Marshal(api.PodStatusResult{ ObjectMeta: api.ObjectMeta{ Name: podFullName, SelfLink: "/podstatusresult", }, }) if err != nil { deleteTask() log.Errorf("failed to marshal pod status result: %v", err) k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, err.Error())) return } k.lock.Lock() defer k.lock.Unlock() // Add the task. task, found := k.tasks[taskId] if !found { log.V(1).Infof("task %v not found, probably killed: aborting launch, reporting lost", taskId) k.reportLostTask(driver, taskId, messages.LaunchTaskFailed) return } //TODO(jdef) check for duplicate pod name, if found send TASK_ERROR // from here on, we need to delete containers associated with the task // upon it going into a terminal state task.podName = podFullName k.pods[podFullName] = pod // send the new pod to the kubelet which will spin it up update := kubelet.PodUpdate{ Op: kubelet.ADD, Pods: []*api.Pod{pod}, } k.updateChan <- update statusUpdate := &mesos.TaskStatus{ TaskId: mutil.NewTaskID(taskId), State: mesos.TaskState_TASK_STARTING.Enum(), Message: proto.String(messages.CreateBindingSuccess), Data: data, } k.sendStatus(driver, statusUpdate) // Delay reporting 'task running' until container is up. psf := podStatusFunc(func() (*api.PodStatus, error) { status, err := k.podStatusFunc(k.kl, pod) if err != nil { return nil, err } status.Phase = kubelet.GetPhase(&pod.Spec, status.ContainerStatuses) hostIP, err := k.kl.GetHostIP() if err != nil { log.Errorf("Cannot get host IP: %v", err) } else { status.HostIP = hostIP.String() } return status, nil }) go k._launchTask(driver, taskId, podFullName, psf) }