func (exec *ExecutorCore) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { exec.lock.Lock() defer exec.lock.Unlock() fmt.Println("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetValue()) os.Args[0] = fmt.Sprintf("executor - %s", taskInfo.TaskId.GetValue()) //fmt.Println("Other hilarious facts: ", taskInfo) // // this is where one would perform the requested task // fmt.Println("Starting task") runStatus := &mesos.TaskStatus{ TaskId: taskInfo.TaskId, State: mesos.TaskState_TASK_STARTING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { log.Panic("Got error", err) } if exec.riakNode != nil { log.Fatalf("Task being started, twice, existing task: %+v, new task: %+v", exec.riakNode) } exec.riakNode = NewRiakNode(taskInfo, exec) exec.riakNode.Run() }
// mesos.Executor interface method. // Invoked when a task has been launched on this executor. func (this *TransformExecutor) LaunchTask(driver executor.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Printf("Launching task %s with command %s\n", taskInfo.GetName(), taskInfo.Command.GetValue()) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } if _, err := driver.SendStatusUpdate(runStatus); err != nil { fmt.Printf("Failed to send status update: %s\n", runStatus) } go func() { // this.avroDecoder = kafka.NewKafkaAvroDecoder(this.config.SchemaRegistryUrl) this.startHTTPServer() this.startProducer() <-this.close close(this.incoming) // finish task fmt.Printf("Finishing task %s\n", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } if _, err := driver.SendStatusUpdate(finStatus); err != nil { fmt.Printf("Failed to send status update: %s\n", finStatus) } fmt.Printf("Task %s has finished\n", taskInfo.GetName()) }() }
func (self *DatabaseExecutor) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { log.Infoln("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetValue()) self.driver = driver runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { log.Infoln("Got error", err) } self.tasksLaunched++ log.Infoln("Total tasks launched ", self.tasksLaunched) // // this is where one would perform the requested task // self.runProg() time.Sleep(time.Second * 40) // finish task log.Infoln("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } _, err = driver.SendStatusUpdate(finStatus) if err != nil { log.Infoln("Got error", err) } log.Infoln("Task finished", taskInfo.GetName()) }
func (exec *exampleExecutor) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Println("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetValue()) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } exec.tasksLaunched++ fmt.Println("Total tasks launched ", exec.tasksLaunched) // // this is where one would perform the requested task // // finish task fmt.Println("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } _, err = driver.SendStatusUpdate(finStatus) if err != nil { fmt.Println("Got error", err) } fmt.Println("Task finished", taskInfo.GetName()) }
func (c *config) Read(task *mesos.TaskInfo) { config := new(config) Logger.Debugf("Task data: %s", string(task.GetData())) err := json.Unmarshal(task.GetData(), config) if err != nil { Logger.Critical(err) os.Exit(1) } *c = *config }
func (exec *exampleExecutor) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Printf("Launching task %v with data [%#x]\n", taskInfo.GetName(), taskInfo.Data) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } exec.tasksLaunched++ fmt.Println("Total tasks launched ", exec.tasksLaunched) // Download image fileName, err := downloadImage(string(taskInfo.Data)) if err != nil { fmt.Printf("Failed to download image with error: %v\n", err) return } fmt.Printf("Downloaded image: %v\n", fileName) // Process image fmt.Printf("Processing image: %v\n", fileName) outFile, err := procImage(fileName) if err != nil { fmt.Printf("Failed to process image with error: %v\n", err) return } // Upload image fmt.Printf("Uploading image: %v\n", outFile) if err = uploadImage("http://127.0.0.1:12345/", outFile); err != nil { fmt.Printf("Failed to upload image with error: %v\n", err) return } else { fmt.Printf("Uploaded image: %v\n", outFile) } // Finish task fmt.Println("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } _, err = driver.SendStatusUpdate(finStatus) if err != nil { fmt.Println("Got error", err) return } fmt.Println("Task finished", taskInfo.GetName()) }
func (exec *exampleExecutor) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Println("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetValue()) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } exec.tasksLaunched++ fmt.Println("Total tasks launched ", exec.tasksLaunched) // // this is where one would perform the requested task // // rexray.UpdateLogLevel() // rexray.InitDriverManagers() // sdm := rexray.GetSdm() // // allVolumes, err := sdm.GetVolume("", "") // if err != nil { // log.Fatal(err) // } // // if len(allVolumes) > 0 { // yamlOutput, err := yaml.Marshal(&allVolumes) // if err != nil { // log.Fatal(err) // } // fmt.Printf(string(yamlOutput)) // } client, _ := plugins.NewClient("unix:///run/mesos/executor/rexray.sock", tlsconfig.Options{InsecureSkipVerify: true}) vd := volumeDriverProxy{client} err = vd.Create("test", nil) if err != nil { fmt.Println("Got error", err) } // finish task fmt.Println("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } _, err = driver.SendStatusUpdate(finStatus) if err != nil { fmt.Println("Got error", err) } fmt.Println("Task finished", taskInfo.GetName()) }
func (e *Executor) LaunchTask(driver executor.ExecutorDriver, task *mesos.TaskInfo) { Logger.Infof("[LaunchTask] %s", task) Config.Read(task) runStatus := &mesos.TaskStatus{ TaskId: task.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } if _, err := driver.SendStatusUpdate(runStatus); err != nil { Logger.Errorf("Failed to send status update: %s", runStatus) } go func() { e.producer = e.newSyslogProducer() e.producer.Start() <-e.close // finish task Logger.Infof("Finishing task %s", task.GetName()) finStatus := &mesos.TaskStatus{ TaskId: task.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } if _, err := driver.SendStatusUpdate(finStatus); err != nil { Logger.Errorf("Failed to send status update: %s", finStatus) os.Exit(1) } Logger.Infof("Task %s has finished", task.GetName()) time.Sleep(time.Second) os.Exit(0) }() }
func (e *Executor) LaunchTask(driver executor.ExecutorDriver, task *mesos.TaskInfo) { Logger.Infof("[LaunchTask] %s", task) runStatus := &mesos.TaskStatus{ TaskId: task.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } if _, err := driver.SendStatusUpdate(runStatus); err != nil { Logger.Errorf("Failed to send status update: %s", runStatus) os.Exit(1) //TODO not sure if we should exit in this case, but probably yes } go func() { err := e.start() if err != nil { Logger.Errorf("Can't start executor: %s", err) } // finish task Logger.Infof("Finishing task %s", task.GetName()) finStatus := &mesos.TaskStatus{ TaskId: task.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } if _, err := driver.SendStatusUpdate(finStatus); err != nil { Logger.Errorf("Failed to send status update: %s", finStatus) os.Exit(1) } Logger.Infof("Task %s has finished", task.GetName()) }() }
func (exec *scraperExecutor) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Printf("Launching task %v with data [%#x]\n", taskInfo.GetName(), taskInfo.Data) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } exec.tasksLaunched++ fmt.Println("Total tasks launched ", exec.tasksLaunched) // Download html uri := string(taskInfo.Data) fileName, url, err := downloadHTML(uri) if err != nil { fmt.Printf("Failed to scrape html with error: %v\n", err) return } fmt.Printf("Scraped URI: %v\n", fileName) // Upload html path := base64.StdEncoding.EncodeToString([]byte(url)) fmt.Printf("Uploading html: %v\n", fileName) if err = uploadImageToS3(path, fileName); err != nil { fmt.Printf("Failed to upload html with error: %v\n", err) return } else { fmt.Printf("Uploaded html: %v\n", fileName) } // Finish task fmt.Println("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), Data: []byte(uri), } _, err = driver.SendStatusUpdate(finStatus) if err != nil { fmt.Println("Got error", err) return } fmt.Println("Task finished", taskInfo.GetName()) }
// LaunchTask is called when the executor receives a request to launch a task. // The happens when the k8sm scheduler has decided to schedule the pod // (which corresponds to a Mesos Task) onto the node where this executor // is running, but the binding is not recorded in the Kubernetes store yet. // This function is invoked to tell the executor to record the binding in the // Kubernetes store and start the pod via the Kubelet. func (k *Executor) LaunchTask(driver bindings.ExecutorDriver, taskInfo *mesos.TaskInfo) { if k.isDone() { return } log.Infof("Launch task %v\n", taskInfo) if !k.isConnected() { log.Errorf("Ignore launch task because the executor is disconnected\n") k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED, messages.ExecutorUnregistered)) return } obj, err := api.Codec.Decode(taskInfo.GetData()) if err != nil { log.Errorf("failed to extract yaml data from the taskInfo.data %v", err) k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED, messages.UnmarshalTaskDataFailure)) return } pod, ok := obj.(*api.Pod) if !ok { log.Errorf("expected *api.Pod instead of %T: %+v", pod, pod) k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED, messages.UnmarshalTaskDataFailure)) return } taskId := taskInfo.GetTaskId().GetValue() k.lock.Lock() defer k.lock.Unlock() if _, found := k.tasks[taskId]; found { log.Errorf("task already launched\n") // Not to send back TASK_RUNNING here, because // may be duplicated messages or duplicated task id. return } // remember this task so that: // (a) we ignore future launches for it // (b) we have a record of it so that we can kill it if needed // (c) we're leaving podName == "" for now, indicates we don't need to delete containers k.tasks[taskId] = &kuberTask{ mesosTaskInfo: taskInfo, launchTimer: time.NewTimer(k.launchGracePeriod), } k.resetSuicideWatch(driver) go k.launchTask(driver, taskId, pod) }
// LaunchTask called when executor launch tasks func (builder *ImageBuilder) LaunchTask(driver executor.ExecutorDriver, taskInfo *mesosproto.TaskInfo) { fmt.Printf("Launching task %v with ID %v\n", taskInfo.GetName(), taskInfo.GetTaskId().GetValue()) status := &mesosproto.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesosproto.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(status) if err != nil { fmt.Println("Send task running status error: ", err) } // Download context tar file // use data in task info contextTar, err := utils.Download(string(taskInfo.Data)) if err != nil { fmt.Printf("Download context error: %v", err) } // Untar context file(filename.tar -> filename) contextDir := strings.TrimSuffix(contextTar, ".tar") err = utils.UnTar(contextTar, contextDir) if err != nil { fmt.Printf("Untar context error: %v", err) } // Build image with context var buf bytes.Buffer opts := docker.BuildImageOptions{ Name: taskInfo.GetTaskId().GetValue(), ContextDir: contextDir, SuppressOutput: false, OutputStream: &buf, } err = builder.client.BuildImage(opts) if err != nil { fmt.Printf("Build image error: %v\n", err) } fmt.Println(buf.String()) fmt.Println("Task finished", taskInfo.GetName()) status.State = mesosproto.TaskState_TASK_FINISHED.Enum() _, err = driver.SendStatusUpdate(status) if err != nil { fmt.Println("Send task finished status error: ", err) } }
// mesos.Executor interface method. // Invoked when a task has been launched on this executor. func (this *HttpMirrorExecutor) LaunchTask(driver executor.ExecutorDriver, taskInfo *mesos.TaskInfo) { log.Logger.Info("Launching task %s with command %s", taskInfo.GetName(), taskInfo.Command.GetValue()) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } log.Logger.Debug(string(taskInfo.Data)) config := consumer.NewPartitionConsumerConfig("syphon") json.Unmarshal(taskInfo.Data, config) log.Logger.Debug("%v", config) this.partitionConsumer = consumer.NewPartitionConsumer(*config) if _, err := driver.SendStatusUpdate(runStatus); err != nil { log.Logger.Warn("Failed to send status update: %s", runStatus) } }
// mesos.Executor interface method. // Invoked when a task has been launched on this executor. func (this *HttpMirrorExecutor) LaunchTask(driver executor.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Printf("Launching task %s with command %s\n", taskInfo.GetName(), taskInfo.Command.GetValue()) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } fmt.Println(string(taskInfo.Data)) config := &consumer.PartitionConsumerConfig{} json.Unmarshal(taskInfo.Data, config) fmt.Printf("%v\n", config) this.partitionConsumer = consumer.NewPartitionConsumer(*config) if _, err := driver.SendStatusUpdate(runStatus); err != nil { fmt.Printf("Failed to send status update: %s\n", runStatus) } }
// LaunchTask is called when the executor receives a request to launch a task. // The happens when the k8sm scheduler has decided to schedule the pod // (which corresponds to a Mesos Task) onto the node where this executor // is running, but the binding is not recorded in the Kubernetes store yet. // This function is invoked to tell the executor to record the binding in the // Kubernetes store and start the pod via the Kubelet. func (k *Executor) LaunchTask(driver bindings.ExecutorDriver, taskInfo *mesos.TaskInfo) { if k.isDone() { return } log.Infof("Launch task %v\n", taskInfo) taskID := taskInfo.GetTaskId().GetValue() if p := k.registry.pod(taskID); p != nil { log.Warningf("task %v already launched", taskID) // Not to send back TASK_RUNNING or TASK_FAILED here, because // may be duplicated messages return } if !k.isConnected() { log.Errorf("Ignore launch task because the executor is disconnected\n") k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED, messages.ExecutorUnregistered)) return } obj, err := kruntime.Decode(api.Codecs.UniversalDecoder(), taskInfo.GetData()) if err != nil { log.Errorf("failed to extract yaml data from the taskInfo.data %v", err) k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED, messages.UnmarshalTaskDataFailure)) return } pod, ok := obj.(*api.Pod) if !ok { log.Errorf("expected *api.Pod instead of %T: %+v", pod, pod) k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED, messages.UnmarshalTaskDataFailure)) return } k.resetSuicideWatch(driver) // run the next step aync because it calls out to apiserver and we don't want to block here go k.bindAndWatchTask(driver, taskInfo, time.NewTimer(k.launchGracePeriod), pod) }
func (e *DiegoExecutor) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Println("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetValue()) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_STARTING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } var works rep.Work json.Unmarshal(taskInfo.Data, &works) e.cellClient.Perform(works) e.lock.Lock() defer e.lock.Unlock() e.taskStateMap[taskInfo.TaskId.GetValue()] = mesos.TaskState_TASK_STARTING }
func (e *Executor) LaunchTask(driver executor.ExecutorDriver, task *mesos.TaskInfo) { Logger.Infof("[LaunchTask] %s", task) Config.Read(task) serializer := e.serializer(Config.Transform) producer, err := e.newProducer(serializer) //create producer before sending the running status if err != nil { Logger.Errorf("Failed to create producer: %s", err) os.Exit(1) } runStatus := &mesos.TaskStatus{ TaskId: task.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } if _, err := driver.SendStatusUpdate(runStatus); err != nil { Logger.Errorf("Failed to send status update: %s", runStatus) os.Exit(1) //TODO not sure if we should exit in this case, but probably yes } go func() { //TODO configs should come from scheduler e.reporter = NewMetricsReporter(task.GetSlaveId().GetValue(), e.slaveInfo.GetHostname(), e.slaveInfo.GetPort(), Config.ReportingInterval, producer, Config.Topic, Config.Transform) e.reporter.Start() // finish task Logger.Infof("Finishing task %s", task.GetName()) finStatus := &mesos.TaskStatus{ TaskId: task.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } if _, err := driver.SendStatusUpdate(finStatus); err != nil { Logger.Errorf("Failed to send status update: %s", finStatus) os.Exit(1) } Logger.Infof("Task %s has finished", task.GetName()) }() }
func (exec *exampleExecutor) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { log.Info("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetValue()) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { log.Error("Got error", err) } exec.tasksLaunched++ log.Info("Total tasks launched ", exec.tasksLaunched) log.Info("Executing drone-agent") s := string(taskInfo.Data) log.Info("Received data ", s) split := strings.Split(s, " ") droneCmd := exe.Command("drone-agent", split[0], split[1]) droneCmd.Stdout = os.Stdout droneCmd.Stderr = os.Stderr err = droneCmd.Run() if err != nil { panic(err) } log.Info("Completed drone-agent") // finish task log.Info("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } _, err = driver.SendStatusUpdate(finStatus) if err != nil { log.Error("Got error", err) } log.Info("Task finished", taskInfo.GetName()) }
// LaunchTask called when executor launch tasks func (runner *TaskRunner) LaunchTask(driver executor.ExecutorDriver, taskInfo *mesosproto.TaskInfo) { fmt.Printf("Launching task %v with ID %v\n", taskInfo.GetName(), taskInfo.GetTaskId().GetValue()) status := &mesosproto.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesosproto.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(status) if err != nil { fmt.Println("Send task running status error: ", err) } // TODO run job fmt.Println(taskInfo.GetData()) fmt.Println("Task finished", taskInfo.GetName()) status.State = mesosproto.TaskState_TASK_FINISHED.Enum() _, err = driver.SendStatusUpdate(status) if err != nil { fmt.Println("Send task finished status error: ", err) } }
// LaunchTask implements the LaunchTask handler. func (self *NebulaExecutor) LaunchTask(driver mesos_exec.ExecutorDriver, taskInfo *mesos_proto.TaskInfo) { fmt.Println("LaunchTask") runStatus := &mesos_proto.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos_proto.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } self.tasksLaunched++ fmt.Println("Total tasks launched ", self.tasksLaunched) go func(task *mesos_proto.TaskInfo) { // // this is where one would perform the requested task // task_data_json := task.GetData() fmt.Println("Received: %s", string(task_data_json)) task_data := make(map[string]interface{}) json.Unmarshal(task_data_json, &task_data) cmd_value := task_data["command_line"].(string) fmt.Printf("Running: %s\n", cmd_value) out, err := exec.Command("/bin/bash", "-c", cmd_value).Output() fmt.Print(string(out)) // finish task fmt.Println("Finishing task", task.GetName()) finStatus := &mesos_proto.TaskStatus{ TaskId: task.GetTaskId(), State: mesos_proto.TaskState_TASK_FINISHED.Enum(), } _, err = driver.SendStatusUpdate(finStatus) if err != nil { fmt.Println("Got error", err) } }(taskInfo) }
func NewRiakNode(taskInfo *mesos.TaskInfo, executor *ExecutorCore) *RiakNode { taskData, err := common.DeserializeTaskData(taskInfo.Data) if err != nil { log.Panic("Got error", err) } log.Infof("Deserialized task data: %+v", taskData) mgr := metamgr.NewMetadataManager(taskData.FrameworkName, taskData.Zookeepers) killStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FAILED.Enum(), } return &RiakNode{ executor: executor, taskInfo: taskInfo, running: false, metadataManager: mgr, taskData: taskData, killStatus: killStatus, } }
func (e *MirrorMakerExecutor) LaunchTask(driver executor.ExecutorDriver, task *mesos.TaskInfo) { Logger.Infof("[LaunchTask] %s", task) err := json.Unmarshal(task.GetData(), &e.config) if err != nil { Logger.Errorf("Could not unmarshal json data: %s", err) panic(err) } Logger.Info(e.config) runStatus := &mesos.TaskStatus{ TaskId: task.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } if _, err := driver.SendStatusUpdate(runStatus); err != nil { Logger.Errorf("Failed to send status update: %s", runStatus) os.Exit(1) //TODO not sure if we should exit in this case, but probably yes } go func() { e.startMirrorMaker() // finish task Logger.Infof("Finishing task %s", task.GetName()) finStatus := &mesos.TaskStatus{ TaskId: task.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } if _, err := driver.SendStatusUpdate(finStatus); err != nil { Logger.Errorf("Failed to send status update: %s", finStatus) os.Exit(1) } Logger.Infof("Task %s has finished", task.GetName()) }() }
func (e *Executor) LaunchTask(driver executor.ExecutorDriver, task *mesos.TaskInfo) { Logger.Infof("[LaunchTask] %s", task) Config.Read(task) transformFunc, exists := transformFunctions[Config.Transform] if !exists { Logger.Errorf("Invalid transformation mode: %s", Config.Transform) os.Exit(1) } transformSerializer := e.serializer(Config.Transform) producer, err := e.newProducer(transformSerializer) //create producer before sending the running status if err != nil { Logger.Errorf("Failed to create producer: %s", err) os.Exit(1) } runStatus := &mesos.TaskStatus{ TaskId: task.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } if _, err := driver.SendStatusUpdate(runStatus); err != nil { Logger.Errorf("Failed to send status update: %s", runStatus) os.Exit(1) //TODO not sure if we should exit in this case, but probably yes } go func() { e.server = NewStatsDServer("0.0.0.0:8125", producer, transformFunc, e.Host) //TODO I know we want to listen to 8125 only in our case but still this should be configurable e.server.Start() // finish task Logger.Infof("Finishing task %s", task.GetName()) finStatus := &mesos.TaskStatus{ TaskId: task.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } if _, err := driver.SendStatusUpdate(finStatus); err != nil { Logger.Errorf("Failed to send status update: %s", finStatus) os.Exit(1) } Logger.Infof("Task %s has finished", task.GetName()) }() }
func (s *MinerScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) { for i, offer := range offers { memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool { return res.GetName() == "mem" }) mems := 0.0 for _, res := range memResources { mems += res.GetScalar().GetValue() } cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool { return res.GetName() == "cpus" }) cpus := 0.0 for _, res := range cpuResources { cpus += res.GetScalar().GetValue() } portsResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool { return res.GetName() == "ports" }) var ports uint64 for _, res := range portsResources { port_ranges := res.GetRanges().GetRange() for _, port_range := range port_ranges { ports += port_range.GetEnd() - port_range.GetBegin() } } // If a miner server is running, we start a new miner daemon. Otherwise, we start a new miner server. tasks := make([]*mesos.TaskInfo, 0) if !s.minerServerRunning && mems >= MEM_PER_SERVER_TASK && cpus >= CPU_PER_SERVER_TASK && ports >= 2 { var taskId *mesos.TaskID var task *mesos.TaskInfo // we need two ports var p2pool_port uint64 var worker_port uint64 // A rather stupid algorithm for picking two ports // The difficulty here is that a range might only include one port, // in which case we will need to pick another port from another range. for _, res := range portsResources { r := res.GetRanges().GetRange()[0] begin := r.GetBegin() end := r.GetEnd() if p2pool_port == 0 { p2pool_port = begin if worker_port == 0 && (begin+1) <= end { worker_port = begin + 1 break } continue } if worker_port == 0 { worker_port = begin break } } taskId = &mesos.TaskID{ Value: proto.String("miner-server-" + strconv.Itoa(i)), } containerType := mesos.ContainerInfo_DOCKER task = &mesos.TaskInfo{ Name: proto.String("task-" + taskId.GetValue()), TaskId: taskId, SlaveId: offer.SlaveId, Container: &mesos.ContainerInfo{ Type: &containerType, Docker: &mesos.ContainerInfo_DockerInfo{ Image: proto.String(MINER_SERVER_DOCKER_IMAGE), }, }, Command: &mesos.CommandInfo{ Shell: proto.Bool(false), Arguments: []string{ // these arguments will be passed to run_p2pool.py "--bitcoind-address", *bitcoindAddr, "--p2pool-port", strconv.Itoa(int(p2pool_port)), "-w", strconv.Itoa(int(worker_port)), s.rpc_user, s.rpc_pass, }, }, Resources: []*mesos.Resource{ util.NewScalarResource("cpus", CPU_PER_SERVER_TASK), util.NewScalarResource("mem", MEM_PER_SERVER_TASK), }, } log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue()) cpus -= CPU_PER_SERVER_TASK mems -= MEM_PER_SERVER_TASK // update state s.minerServerHostname = offer.GetHostname() s.minerServerRunning = true s.minerServerPort = int(worker_port) tasks = append(tasks, task) } if s.minerServerRunning && mems >= MEM_PER_DAEMON_TASK { var taskId *mesos.TaskID var task *mesos.TaskInfo taskId = &mesos.TaskID{ Value: proto.String("miner-daemon-" + strconv.Itoa(i)), } containerType := mesos.ContainerInfo_DOCKER task = &mesos.TaskInfo{ Name: proto.String("task-" + taskId.GetValue()), TaskId: taskId, SlaveId: offer.SlaveId, Container: &mesos.ContainerInfo{ Type: &containerType, Docker: &mesos.ContainerInfo_DockerInfo{ Image: proto.String(MINER_DAEMON_DOCKER_IMAGE), }, }, Command: &mesos.CommandInfo{ Shell: proto.Bool(false), Arguments: []string{"-o", s.minerServerHostname + ":" + strconv.Itoa(s.minerServerPort)}, }, Resources: []*mesos.Resource{ util.NewScalarResource("cpus", cpus), util.NewScalarResource("mem", MEM_PER_DAEMON_TASK), }, } log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue()) tasks = append(tasks, task) } driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)}) } }
func (exec *exampleExecutor) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Println("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetValue()) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } exec.tasksLaunched++ fmt.Println("Total tasks launched ", exec.tasksLaunched) // // this is where one would perform the requested task // finishTask := func() { // finish task fmt.Println("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } if _, err := driver.SendStatusUpdate(finStatus); err != nil { fmt.Println("error sending FINISHED", err) } fmt.Println("Task finished", taskInfo.GetName()) } if *slowTasks { starting := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_STARTING.Enum(), } if _, err := driver.SendStatusUpdate(starting); err != nil { fmt.Println("error sending STARTING", err) } delay := time.Duration(rand.Intn(90)+10) * time.Second go func() { time.Sleep(delay) // TODO(jdef) add jitter finishTask() }() } else { finishTask() } }
func (exec *ghsvisExecutor) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Println("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetValue()) fmt.Println(taskInfo) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } exec.tasksLaunched++ fmt.Println("Total tasks launched ", exec.tasksLaunched) // // this is where one would perform the requested task // finishTask := func() { // finish task fmt.Println("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } if _, err := driver.SendStatusUpdate(finStatus); err != nil { fmt.Println("error sending FINISHED", err) } fmt.Println("Task finished", taskInfo.GetName()) } starting := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_STARTING.Enum(), } if _, err := driver.SendStatusUpdate(starting); err != nil { fmt.Println("error sending STARTING", err) } go func() { running := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } if _, err := driver.SendStatusUpdate(running); err != nil { fmt.Println("error sending RUNNING", err) } for { select { case <-time.Tick(5 * time.Second): fmt.Println("In loop", taskInfo.String) exec.Log(graph.GraphID{}, graph.AlgorithmID{}, taskInfo.GetTaskId().String(), "Hello") } } finishTask() }() }
func (mExecutor *migrationExecutor) LaunchTask(driver executor.ExecutorDriver, taskInfo *mesos.TaskInfo) { fmt.Printf("Launching task %v with data [%#x]\n", taskInfo.GetName(), taskInfo.Data) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } mExecutor.tasksLaunched++ /*** run task ***/ taskType, err := shared.GetValueFromLabels(taskInfo.Labels, shared.Tags.TASK_TYPE) if err != nil { fmt.Println("Got error", err) } url, err := shared.GetValueFromLabels(taskInfo.Labels, shared.Tags.FILESERVER_IP) if err != nil { fmt.Println("Got error", err) } containerName, err := shared.GetValueFromLabels(taskInfo.Labels, shared.Tags.CONTAINER_NAME) if err != nil { fmt.Println("Got error", err) } switch taskType { case shared.TaskTypes.RUN_CONTAINER: mExecutor.StartContainer(containerName, url) break case shared.TaskTypes.CHECKPOINT_CONTAINER: mExecutor.CheckpointContainer(containerName, url) break case shared.TaskTypes.RESTORE_CONTAINER: mExecutor.RestoreContainer(containerName, url) break case shared.TaskTypes.TEST_TASK: mExecutor.TestRunAndKillContainer(containerName, url) break case shared.TaskTypes.GET_LOGS: mExecutor.GetLogsFromContainer(containerName, url) break } /*** finish task ***/ fmt.Println("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), Labels: taskInfo.Labels, State: mesos.TaskState_TASK_FINISHED.Enum(), } _, err = driver.SendStatusUpdate(finStatus) if err != nil { fmt.Println("Got error", err) } fmt.Println("Task finished", taskInfo.GetName()) }
func (exec *exampleExecutor) LaunchTask(driver exec.ExecutorDriver, taskInfo *mesos.TaskInfo) { // * // Describes a task. Passed from the scheduler all the way to an // executor (see SchedulerDriver::launchTasks and // Executor::launchTask). Either ExecutorInfo or CommandInfo should be set. // A different executor can be used to launch this task, and subsequent tasks // meant for the same executor can reuse the same ExecutorInfo struct. // type TaskInfo struct { // // TaskInfoではExecutor か Commandを設定しないといけない。 // example実装だとExecutorが設定されているがValueがうまく渡っていないように見える // Executorの中のCommandInfoに入っていた // fmt.Println("<------------------ Executor start ------------------>") // https://github.com/mesos/mesos-go/blob/master/mesosproto/mesos.pb.go // type CommandInfoあたり // fmt.Println("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetUris()) // fmt.Println("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetValue()) // fmt.Println("Launching task", taskInfo.GetName(), "with command", taskInfo.Command.GetShell()) // ExecutorInfo cmd := taskInfo.Executor.GetCommand() fmt.Println("Launching task", taskInfo.GetName(), "with command[GetUris]", cmd.GetUris()) fmt.Println("Launching task", taskInfo.GetName(), "with command[GetValue]", cmd.GetValue()) fmt.Println("Launching task", taskInfo.GetName(), "with command[GetShell]", cmd.GetShell()) fmt.Println("Launching task", taskInfo.GetName(), "with command[GetArguments]", cmd.GetArguments()) fmt.Println("Launching task", taskInfo.GetName(), "with taskInfo.GetData()", string(taskInfo.GetData())) execcmd := string(taskInfo.GetData()) fmt.Printf("execcmd = %s\n", execcmd) runStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_RUNNING.Enum(), } _, err := driver.SendStatusUpdate(runStatus) if err != nil { fmt.Println("Got error", err) } exec.tasksLaunched++ fmt.Println("Total tasks launched ", exec.tasksLaunched) // // this is where one would perform the requested task // output, err := osexec.Command("sh", "-c", execcmd).Output() if err != nil { fmt.Println("Command exec error", err) } fmt.Println("Exec output>") fmt.Println(string(output)) // // this is where one would perform the requested task // // finish task fmt.Println("Finishing task", taskInfo.GetName()) finStatus := &mesos.TaskStatus{ TaskId: taskInfo.GetTaskId(), State: mesos.TaskState_TASK_FINISHED.Enum(), } _, err = driver.SendStatusUpdate(finStatus) if err != nil { fmt.Println("Got error", err) } fmt.Println("Task finished", taskInfo.GetName()) fmt.Println("<------------------ Executor finish ------------------>\n") }