func (sched *ExampleScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) { log.Infoln("Status update: task", status.TaskId.GetValue(), " is in state ", status.State.Enum().String()) //if RunContainer finished, add if status.State.Enum().String() == "TASK_FINISHED" { labels := status.GetLabels() taskType, err := shared.GetValueFromLabels(labels, shared.Tags.TASK_TYPE) if err != nil { log.Infof("ERROR: Malformed task info, discarding task with status: %v", status) return } acceptedHost, err := shared.GetValueFromLabels(labels, shared.Tags.ACCEPTED_HOST) if err != nil { log.Infof("ERROR: Malformed task info, discarding task with status: %v", status) return } containerName, err := shared.GetValueFromLabels(labels, shared.Tags.CONTAINER_NAME) if err != nil { log.Infof("ERROR: Malformed task info, discarding task with status: %v", status) return } switch taskType { case shared.TaskTypes.RUN_CONTAINER: sched.ContainerSlaveMap[containerName] = acceptedHost break case shared.TaskTypes.CHECKPOINT_CONTAINER: delete(sched.ContainerSlaveMap, containerName) break case shared.TaskTypes.RESTORE_CONTAINER: sched.ContainerSlaveMap[containerName] = acceptedHost break } } }
func (mExecutor *migrationExecutor) LaunchTask(driver executor.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Printf("Launching task %v with data [%#x]\n", taskInfo.GetName(), taskInfo.Data) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } mExecutor.tasksLaunched++ /*** run task ***/ taskType, err := shared.GetValueFromLabels(taskInfo.Labels, shared.Tags.TASK_TYPE) if err != nil { fmt.Println("Got error", err) } url, err := shared.GetValueFromLabels(taskInfo.Labels, shared.Tags.FILESERVER_IP) if err != nil { fmt.Println("Got error", err) } containerName, err := shared.GetValueFromLabels(taskInfo.Labels, shared.Tags.CONTAINER_NAME) if err != nil { fmt.Println("Got error", err) } switch taskType { case shared.TaskTypes.RUN_CONTAINER: mExecutor.StartContainer(containerName, url) break case shared.TaskTypes.CHECKPOINT_CONTAINER: mExecutor.CheckpointContainer(containerName, url) break case shared.TaskTypes.RESTORE_CONTAINER: mExecutor.RestoreContainer(containerName, url) break case shared.TaskTypes.TEST_TASK: mExecutor.TestRunAndKillContainer(containerName, url) break case shared.TaskTypes.GET_LOGS: mExecutor.GetLogsFromContainer(containerName, url) break } /*** finish task ***/ fmt.Println("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), Labels: taskInfo.Labels, State: mesos.TaskState_TASK_FINISHED.Enum(), } _, err = driver.SendStatusUpdate(finStatus) if err != nil { fmt.Println("Got error", err) } fmt.Println("Task finished", taskInfo.GetName()) }
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) { logOffers(offers) log.Infof("received some offers, but do I care?") for _, offer := range offers { remainingCpus := getOfferCpu(offer) remainingMems := getOfferMem(offer) var tasks []*mesos.TaskInfo for sched.cpuPerTask <= remainingCpus && sched.memPerTask <= remainingMems && len(sched.TaskQueue) > 0 { log.Infof("Launched tasks: " + string(sched.tasksLaunched)) log.Infof("Tasks remaining ot be launched: " + string(len(sched.TaskQueue))) sched.tasksLaunched++ task := sched.popTask() taskType, err := shared.GetValueFromLabels(task.Labels, shared.Tags.TASK_TYPE) if err != nil { log.Infof("ERROR: Malformed task info, discarding task %v", task) return } targetHost, err := shared.GetValueFromLabels(task.Labels, shared.Tags.TARGET_HOST) if err != nil && taskType != shared.TaskTypes.RUN_CONTAINER { log.Infof("ERROR: Malformed task info, discarding task %v", task) return } containerName, err := shared.GetValueFromLabels(task.Labels, shared.Tags.CONTAINER_NAME) if err != nil { log.Infof("ERROR: Malformed task info, discarding task %v", task) return } foundAMatch := false switch taskType { case shared.TaskTypes.RESTORE_CONTAINER: if _, ok := sched.ContainerSlaveMap[containerName]; ok { log.Infof("ERROR: %s is already running", containerName) return } foundAMatch = true break case shared.TaskTypes.GET_LOGS: if targetHost == offer.GetHostname() { foundAMatch = sched.ContainerSlaveMap[containerName] == targetHost } break case shared.TaskTypes.CHECKPOINT_CONTAINER: if targetHost == offer.GetHostname() { foundAMatch = sched.ContainerSlaveMap[containerName] == targetHost } break case shared.TaskTypes.RESTORE_CONTAINER: if _, ok := sched.ContainerSlaveMap[containerName]; ok { log.Infof("ERROR: %s is already running", containerName) return } if targetHost == offer.GetHostname() { foundAMatch = true } break default: foundAMatch = true break } if foundAMatch { task.SlaveId = offer.SlaveId task.Labels.Labels = append(task.Labels.Labels, shared.CreateLabel(shared.Tags.ACCEPTED_HOST, *offer.Hostname)) log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue()) tasks = append(tasks, task) remainingCpus -= sched.cpuPerTask remainingMems -= sched.memPerTask } else { defer sched.pushTask(task) } } log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue(), "\nSlaveID: ", offer.GetSlaveId(), "SlaveHostname: ", offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)}) } }