示例#1
0
func (this *ElodinaTransportScheduler) tryKillTask(driver scheduler.SchedulerDriver, taskId *mesos.TaskID) error {
	log.Logger.Info("Trying to kill task %s", taskId.GetValue())
	var err error
	for i := 0; i <= this.config.KillTaskRetries; i++ {
		if _, err = driver.KillTask(taskId); err == nil {
			return nil
		}
	}
	return err
}
示例#2
0
func (this *TransformScheduler) tryKillTask(driver scheduler.SchedulerDriver, taskId *mesos.TaskID) error {
	fmt.Printf("Trying to kill task %s\n", taskId.GetValue())

	var err error
	for i := 0; i <= this.config.KillTaskRetries; i++ {
		if _, err = driver.KillTask(taskId); err == nil {
			return nil
		}
	}
	return err
}
示例#3
0
// KillTask is called when the executor receives a request to kill a task.
func (k *KubernetesExecutor) KillTask(driver bindings.ExecutorDriver, taskId *mesos.TaskID) {
	if k.isDone() {
		return
	}
	log.Infof("Kill task %v\n", taskId)

	if !k.isConnected() {
		//TODO(jdefelice) sent TASK_LOST here?
		log.Warningf("Ignore kill task because the executor is disconnected\n")
		return
	}

	k.lock.Lock()
	defer k.lock.Unlock()
	k.removePodTask(driver, taskId.GetValue(), messages.TaskKilled, mesos.TaskState_TASK_KILLED)
}
示例#4
0
// KillTask is called when the executor receives a request to kill a task.
func (k *Executor) KillTask(driver bindings.ExecutorDriver, taskId *mesos.TaskID) {
	k.killPodTask(driver, taskId.GetValue())
}
示例#5
0
func (e *Executor) KillTask(driver executor.ExecutorDriver, id *mesos.TaskID) {
	Logger.Infof("[KillTask] %s", id.GetValue())
	e.stop()
}
示例#6
0
func (this *ElodinaTransportScheduler) removeTask(id *mesos.TaskID) {
	delete(this.taskIdToTaskState, id.GetValue())
}
func (e *Executor) KillTask(driver executor.ExecutorDriver, id *mesos.TaskID) {
	Logger.Infof("[KillTask] %s", id.GetValue())
	e.producer.Stop()
	e.close <- struct{}{}
}
func (e *MirrorMakerExecutor) KillTask(driver executor.ExecutorDriver, id *mesos.TaskID) {
	Logger.Infof("[KillTask] %s", id.GetValue())
	e.mirrorMaker.Stop()
}
示例#9
0
func (s *MinerScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	for i, offer := range offers {
		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		portsResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "ports"
		})
		var ports uint64
		for _, res := range portsResources {
			port_ranges := res.GetRanges().GetRange()
			for _, port_range := range port_ranges {
				ports += port_range.GetEnd() - port_range.GetBegin()
			}
		}

		// If a miner server is running, we start a new miner daemon.  Otherwise, we start a new miner server.
		tasks := make([]*mesos.TaskInfo, 0)
		if !s.minerServerRunning && mems >= MEM_PER_SERVER_TASK && cpus >= CPU_PER_SERVER_TASK && ports >= 2 {
			var taskId *mesos.TaskID
			var task *mesos.TaskInfo

			// we need two ports
			var p2pool_port uint64
			var worker_port uint64
			// A rather stupid algorithm for picking two ports
			// The difficulty here is that a range might only include one port,
			// in which case we will need to pick another port from another range.
			for _, res := range portsResources {
				r := res.GetRanges().GetRange()[0]
				begin := r.GetBegin()
				end := r.GetEnd()
				if p2pool_port == 0 {
					p2pool_port = begin
					if worker_port == 0 && (begin+1) <= end {
						worker_port = begin + 1
						break
					}
					continue
				}
				if worker_port == 0 {
					worker_port = begin
					break
				}
			}

			taskId = &mesos.TaskID{
				Value: proto.String("miner-server-" + strconv.Itoa(i)),
			}

			containerType := mesos.ContainerInfo_DOCKER
			task = &mesos.TaskInfo{
				Name:    proto.String("task-" + taskId.GetValue()),
				TaskId:  taskId,
				SlaveId: offer.SlaveId,
				Container: &mesos.ContainerInfo{
					Type: &containerType,
					Docker: &mesos.ContainerInfo_DockerInfo{
						Image: proto.String(MINER_SERVER_DOCKER_IMAGE),
					},
				},
				Command: &mesos.CommandInfo{
					Shell: proto.Bool(false),
					Arguments: []string{
						// these arguments will be passed to run_p2pool.py
						"--bitcoind-address", *bitcoindAddr,
						"--p2pool-port", strconv.Itoa(int(p2pool_port)),
						"-w", strconv.Itoa(int(worker_port)),
						s.rpc_user, s.rpc_pass,
					},
				},
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", CPU_PER_SERVER_TASK),
					util.NewScalarResource("mem", MEM_PER_SERVER_TASK),
				},
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			cpus -= CPU_PER_SERVER_TASK
			mems -= MEM_PER_SERVER_TASK

			// update state
			s.minerServerHostname = offer.GetHostname()
			s.minerServerRunning = true
			s.minerServerPort = int(worker_port)

			tasks = append(tasks, task)
		}

		if s.minerServerRunning && mems >= MEM_PER_DAEMON_TASK {
			var taskId *mesos.TaskID
			var task *mesos.TaskInfo

			taskId = &mesos.TaskID{
				Value: proto.String("miner-daemon-" + strconv.Itoa(i)),
			}

			containerType := mesos.ContainerInfo_DOCKER
			task = &mesos.TaskInfo{
				Name:    proto.String("task-" + taskId.GetValue()),
				TaskId:  taskId,
				SlaveId: offer.SlaveId,
				Container: &mesos.ContainerInfo{
					Type: &containerType,
					Docker: &mesos.ContainerInfo_DockerInfo{
						Image: proto.String(MINER_DAEMON_DOCKER_IMAGE),
					},
				},
				Command: &mesos.CommandInfo{
					Shell:     proto.Bool(false),
					Arguments: []string{"-o", s.minerServerHostname + ":" + strconv.Itoa(s.minerServerPort)},
				},
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", cpus),
					util.NewScalarResource("mem", MEM_PER_DAEMON_TASK),
				},
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
		}

		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}
}