// StatusUpdate is called when a status update message is sent to the scheduler. func (k *KubernetesScheduler) StatusUpdate(driver bindings.SchedulerDriver, taskStatus *mesos.TaskStatus) { source, reason := "none", "none" if taskStatus.Source != nil { source = (*taskStatus.Source).String() } if taskStatus.Reason != nil { reason = (*taskStatus.Reason).String() } taskState := taskStatus.GetState() metrics.StatusUpdates.WithLabelValues(source, reason, taskState.String()).Inc() log.Infof( "task status update %q from %q for task %q on slave %q executor %q for reason %q", taskState.String(), source, taskStatus.TaskId.GetValue(), taskStatus.SlaveId.GetValue(), taskStatus.ExecutorId.GetValue(), reason) switch taskState { case mesos.TaskState_TASK_RUNNING, mesos.TaskState_TASK_FINISHED, mesos.TaskState_TASK_STARTING, mesos.TaskState_TASK_STAGING: if _, state := k.taskRegistry.UpdateStatus(taskStatus); state == podtask.StateUnknown { if taskState != mesos.TaskState_TASK_FINISHED { //TODO(jdef) what if I receive this after a TASK_LOST or TASK_KILLED? //I don't want to reincarnate then.. TASK_LOST is a special case because //the master is stateless and there are scenarios where I may get TASK_LOST //followed by TASK_RUNNING. //TODO(jdef) consider running this asynchronously since there are API server //calls that may be made k.reconcileNonTerminalTask(driver, taskStatus) } // else, we don't really care about FINISHED tasks that aren't registered return } if _, exists := k.slaves.getSlave(taskStatus.GetSlaveId().GetValue()); !exists { // a registered task has an update reported by a slave that we don't recognize. // this should never happen! So we don't reconcile it. log.Errorf("Ignore status %+v because the slave does not exist", taskStatus) return } case mesos.TaskState_TASK_FAILED: if task, _ := k.taskRegistry.UpdateStatus(taskStatus); task != nil { if task.Has(podtask.Launched) && !task.Has(podtask.Bound) { go k.plugin.reconcilePod(task.Pod) return } } else { // unknown task failed, not much we can do about it return } // last-ditch effort to reconcile our records fallthrough case mesos.TaskState_TASK_LOST, mesos.TaskState_TASK_KILLED: k.reconcileTerminalTask(driver, taskStatus) } }
func (sched *Scheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) { if glog.V(1) { glog.Infof("status update from task %s in state %s under executor %s on slave %s: %s", status.GetTaskId().GetValue(), status.GetState(), status.GetExecutorId().GetValue(), status.GetSlaveId().GetValue(), status.GetMessage(), ) } }
func Status(status *mesos.TaskStatus) string { var buffer bytes.Buffer buffer.WriteString(fmt.Sprintf("%s %s", status.GetTaskId().GetValue(), status.GetState().String())) if status.GetSlaveId().GetValue() != "" { buffer.WriteString(" slave: ") buffer.WriteString(ID(status.GetSlaveId().GetValue())) } if status.GetState() != mesos.TaskState_TASK_RUNNING { buffer.WriteString(" reason: ") buffer.WriteString(status.GetReason().String()) } if status.GetMessage() != "" { buffer.WriteString(" message: ") buffer.WriteString(status.GetMessage()) } return buffer.String() }
func statusString(status *mesos.TaskStatus) string { s := fmt.Sprintf("%s %s slave: %s", status.GetTaskId().GetValue(), status.GetState().String(), idString(status.GetSlaveId().GetValue())) if status.GetState() != mesos.TaskState_TASK_RUNNING { s += " reason: " + status.GetReason().String() } if status.GetMessage() != "" { s += " message: " + status.GetMessage() } return s }