示例#1
0
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {

	for _, offer := range offers {
		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with cpus=", cpus, " mem=", mems)

		remainingCpus := cpus
		remainingMems := mems

		var tasks []*mesos.TaskInfo
		for sched.tasksLaunched < sched.totalTasks &&
			CPUS_PER_TASK <= remainingCpus &&
			MEM_PER_TASK <= remainingMems {

			sched.tasksLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
			}

			task := &mesos.TaskInfo{
				Name:     proto.String("go-task-" + taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: sched.executor,
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", CPUS_PER_TASK),
					util.NewScalarResource("mem", MEM_PER_TASK),
				},
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= CPUS_PER_TASK
			remainingMems -= MEM_PER_TASK
		}
		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}
}
示例#2
0
func (sched *ReflexScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	for _, offer := range offers {
		// CPUs
		cpuResources := util.FilterResources(
			offer.Resources,
			func(res *mesos.Resource) bool {
				return res.GetName() == "cpus"
			},
		)
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		// Mem
		memResources := util.FilterResources(
			offer.Resources,
			func(res *mesos.Resource) bool {
				return res.GetName() == "mem"
			},
		)
		mem := 0.0
		for _, res := range memResources {
			mem += res.GetScalar().GetValue()
		}

		logrus.WithFields(logrus.Fields{
			"cpus": cpus,
			"mem":  mem,
		}).Debug("got offer")

		for _, pair := range sched.waitingPairs {
			if pair.InProgress {
				continue
			}

			task := pair.Task
			if cpus >= task.CPU && mem >= task.Mem {
				logrus.WithField("task", task).Info("would have launched a task")
			}
		}

		driver.DeclineOffer(offer.GetId(), new(mesos.Filters))
	}
}
示例#3
0
func resourcesHaveVolume(resources []*mesos.Resource, persistenceId string) bool {
	filtered := util.FilterResources(resources, func(res *mesos.Resource) bool {
		return res.GetName() == "disk" &&
			res.Reservation != nil &&
			res.Disk != nil &&
			res.Disk.Persistence.GetId() == persistenceId
	})
	return len(filtered) > 0
}
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	for _, offer := range offers {
		taskId := &mesos.TaskID{
			Value: proto.String(fmt.Sprintf("basicdocker-task-%d", time.Now().Unix())),
		}

		ports := util.FilterResources(
			offer.Resources,
			func(res *mesos.Resource) bool {
				return res.GetName() == "ports"
			},
		)
		if len(ports) > 0 && len(ports[0].GetRanges().GetRange()) > 0 {

		} else {
			return
		}
		task := &mesos.TaskInfo{
			Name:    proto.String(taskId.GetValue()),
			TaskId:  taskId,
			SlaveId: offer.SlaveId,
			Container: &mesos.ContainerInfo{
				Type:     mesos.ContainerInfo_DOCKER.Enum(),
				Volumes:  nil,
				Hostname: nil,
				Docker: &mesos.ContainerInfo_DockerInfo{
					Image:   &DOCKER_IMAGE_DEFAULT,
					Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
				},
			},
			Command: &mesos.CommandInfo{
				Shell: proto.Bool(true),
				Value: proto.String("set -x ; /bin/date ; /bin/hostname ; sleep 200 ; echo done"),
			},
			Executor: nil,
			Resources: []*mesos.Resource{
				util.NewScalarResource("cpus", getOfferCpu(offer)),
				util.NewScalarResource("mem", getOfferMem(offer)),
				util.NewRangesResource("ports", []*mesos.Value_Range{
					util.NewValueRange(
						*ports[0].GetRanges().GetRange()[0].Begin,
						*ports[0].GetRanges().GetRange()[0].Begin+1,
					),
				}),
			},
		}

		log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

		var tasks []*mesos.TaskInfo = []*mesos.TaskInfo{task}
		log.Infoln("Launching ", len(tasks), " tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
		sched.tasksLaunched++
		time.Sleep(time.Second)
	}
}
示例#5
0
func getScalarResources(offer *mesos.Offer, resourceName string) float64 {
	resources := 0.0
	filteredResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
		return res.GetName() == resourceName
	})
	for _, res := range filteredResources {
		resources += res.GetScalar().GetValue()
	}
	return resources
}
示例#6
0
func getPersistenceIds(resources []*mesos.Resource) []string {
	filtered := util.FilterResources(resources, func(res *mesos.Resource) bool {
		return res.GetName() == "disk" && res.Reservation != nil && res.Disk != nil
	})
	val := []string{}
	for _, res := range filtered {
		val = append(val, res.Disk.Persistence.GetId())
	}
	return val
}
示例#7
0
func FilterReservedVolumes(immutableResources []*mesos.Resource) []*mesos.Resource {
	return util.FilterResources(immutableResources, func(res *mesos.Resource) bool {
		if res.Reservation != nil &&
			res.Disk != nil &&
			res.GetName() == "disk" {
			return true
		}
		return false
	})
}
示例#8
0
func ScalarResourceVal(name string, resources []*mesos.Resource) float64 {
	scalarResources := util.FilterResources(resources, func(res *mesos.Resource) bool {
		return res.GetType() == mesos.Value_SCALAR && res.GetName() == name
	})
	sum := 0.0
	for _, res := range scalarResources {
		sum += res.GetScalar().GetValue()
	}
	return sum
}
示例#9
0
func getRangeResources(offer *mesos.Offer, resourceName string) []*mesos.Value_Range {
	resources := make([]*mesos.Value_Range, 0)
	filteredResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
		return res.GetName() == resourceName
	})
	for _, res := range filteredResources {
		resources = append(resources, res.GetRanges().GetRange()...)
	}
	return resources
}
示例#10
0
func portIter(resources []*mesos.Resource) chan int64 {
	ports := make(chan int64)
	go func() {
		defer close(ports)
		for _, resource := range util.FilterResources(resources, func(res *mesos.Resource) bool { return res.GetName() == "ports" }) {
			for _, port := range common.RangesToArray(resource.GetRanges().GetRange()) {
				ports <- port
			}
		}
	}()
	return ports
}
示例#11
0
func getOfferScalar(offer *mesos.Offer, name string) float64 {
	resources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
		return res.GetName() == name
	})

	value := 0.0
	for _, res := range resources {
		value += res.GetScalar().GetValue()
	}

	return value
}
示例#12
0
func TestBadCPUAsk(t *testing.T) {
	assert := assert.New(t)
	offer2 := generateResourceOffer()
	offer := generateResourceOffer()
	askFun := AskForCPU(3.1)
	newOffer, _, success := askFun(offer)
	assert.Equal(false, success)
	assert.Equal(offer, offer2)
	cpuResources := util.FilterResources(newOffer, func(res *mesos.Resource) bool {
		return res.GetName() == "cpus"
	})
	assert.Equal(3.0, cpuResources[0].Scalar.GetValue())
}
示例#13
0
func getPorts(resources []*mesos.Resource, withReservation bool) []int64 {
	filtered := util.FilterResources(resources, func(res *mesos.Resource) bool {
		if withReservation {
			return res.GetName() == "ports" && res.Reservation != nil
		}
		return res.GetName() == "ports" && res.Reservation == nil
	})
	val := []int64{}
	for _, res := range filtered {
		val = append(val, RangesToArray(res.GetRanges().GetRange())...)
	}
	return val
}
示例#14
0
func getResource(name string, resources []*mesos.Resource, withReservation bool) float64 {
	filtered := util.FilterResources(resources, func(res *mesos.Resource) bool {
		if withReservation {
			return res.GetName() == name && res.Reservation != nil
		}
		return res.GetName() == name && res.Reservation == nil
	})
	val := 0.0
	for _, res := range filtered {
		val += res.GetScalar().GetValue()
	}
	return val
}
示例#15
0
func TestGoodPortAsk(t *testing.T) {
	rand.Seed(10)
	assert := assert.New(t)
	offer := generateResourceOffer()
	askFun := AskForPorts(100)
	remaining, resourceAsk, success := askFun(offer)
	assert.Equal(true, success)
	assert.Equal(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31105, 31204)}), resourceAsk)
	remainingPorts := util.FilterResources(remaining, func(res *mesos.Resource) bool {
		return res.GetName() == "ports"
	})
	assert.Equal([]*mesos.Resource{util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 31104), util.NewValueRange(31205, 32000)})}, remainingPorts)

}
示例#16
0
func TestGoodMemoryAsk(t *testing.T) {
	assert := assert.New(t)
	offer2 := generateResourceOffer()
	offer := generateResourceOffer()
	askFun := AskForMemory(100)
	newOffer, resourceAsk, success := askFun(offer)
	assert.Equal(true, success)
	assert.Equal(util.NewScalarResource("mem", 100), resourceAsk)
	assert.Equal(offer, offer2)
	cpuResources := util.FilterResources(newOffer, func(res *mesos.Resource) bool {
		return res.GetName() == "mem"
	})
	assert.Equal(1885.0, cpuResources[0].Scalar.GetValue())
}
示例#17
0
func TestTotalCPUAsk(t *testing.T) {
	assert := assert.New(t)
	offer2 := generateResourceOffer()
	offer := generateResourceOffer()
	askFun := AskForCPU(3)
	newOffer, resourceAsk, success := askFun(offer)
	assert.Equal(true, success)
	assert.Equal(util.NewScalarResource("cpus", 3), resourceAsk)
	assert.Equal(offer, offer2)
	cpuResources := util.FilterResources(newOffer, func(res *mesos.Resource) bool {
		return res.GetName() == "cpus"
	})
	assert.Equal(0.0, cpuResources[0].Scalar.GetValue())
}
示例#18
0
func parseOffer(offer *mesos.Offer) OfferResources {
	getResources := func(resourceName string) []*mesos.Resource {
		return util.FilterResources(
			offer.Resources,
			func(res *mesos.Resource) bool {
				return res.GetName() == resourceName
			},
		)
	}

	cpuResources := getResources("cpus")
	cpus := 0.0
	for _, res := range cpuResources {
		cpus += res.GetScalar().GetValue()
	}

	memResources := getResources("mem")
	mems := 0.0
	for _, res := range memResources {
		mems += res.GetScalar().GetValue()
	}

	portResources := getResources("ports")
	ports := make([]*mesos.Value_Range, 0, 10)
	for _, res := range portResources {
		ranges := res.GetRanges()
		ports = append(ports, ranges.GetRange()...)
	}

	diskResources := getResources("disk")
	disk := 0.0
	for _, res := range diskResources {
		disk += res.GetScalar().GetValue()
	}

	return OfferResources{
		cpus:  cpus,
		mems:  mems,
		disk:  disk,
		ports: ports,
	}
}
示例#19
0
文件: scheduler.go 项目: cbbs/swarm
func (driver *MesosSchedulerDriver) AcceptOffers(offerIds []*mesos.OfferID, operations []*mesos.Offer_Operation, filters *mesos.Filters) (mesos.Status, error) {
	driver.eventLock.Lock()
	defer driver.eventLock.Unlock()

	if stat := driver.status; stat != mesos.Status_DRIVER_RUNNING {
		return stat, fmt.Errorf("Unable to AcceptOffers, expected driver status %s, but got %s", mesos.Status_DRIVER_RUNNING, stat)
	}

	if !driver.connected {
		err := fmt.Errorf("Not connected to master.")
		for _, operation := range operations {
			if *operation.Type == mesos.Offer_Operation_LAUNCH {
				for _, task := range operation.Launch.TaskInfos {
					driver.pushLostTask(task, "Unable to launch tasks: "+err.Error())
				}
			}
		}
		log.Errorf("Failed to send LaunchTask message: %v\n", err)
		return driver.status, err
	}

	okOperations := make([]*mesos.Offer_Operation, 0, len(operations))

	for _, offerId := range offerIds {
		for _, operation := range operations {
			// Keep only the slave PIDs where we run tasks so we can send
			// framework messages directly.
			if !driver.cache.containsOffer(offerId) {
				log.Warningf("Attempting to accept offers with unknown offer %s\n", offerId.GetValue())
				continue
			}

			// Validate
			switch *operation.Type {
			case mesos.Offer_Operation_LAUNCH:
				tasks := []*mesos.TaskInfo{}
				// Set TaskInfo.executor.framework_id, if it's missing.
				for _, task := range operation.Launch.TaskInfos {
					newTask := *task
					if newTask.Executor != nil && newTask.Executor.FrameworkId == nil {
						newTask.Executor.FrameworkId = driver.frameworkInfo.Id
					}
					tasks = append(tasks, &newTask)
				}
				for _, task := range tasks {
					if driver.cache.getOffer(offerId).offer.SlaveId.Equal(task.SlaveId) {
						// cache the tasked slave, for future communication
						pid := driver.cache.getOffer(offerId).slavePid
						driver.cache.putSlavePid(task.SlaveId, pid)
					} else {
						log.Warningf("Attempting to launch task %s with the wrong slaveId offer %s\n", task.TaskId.GetValue(), task.SlaveId.GetValue())
					}
				}
				operation.Launch.TaskInfos = tasks
				okOperations = append(okOperations, operation)
			case mesos.Offer_Operation_RESERVE:
				// Only send reserved resources
				filtered := util.FilterResources(operation.Reserve.Resources, func(res *mesos.Resource) bool { return res.Reservation != nil })
				operation.Reserve.Resources = filtered
				okOperations = append(okOperations, operation)
			case mesos.Offer_Operation_UNRESERVE:
				// Only send reserved resources
				filtered := util.FilterResources(operation.Unreserve.Resources, func(res *mesos.Resource) bool { return res.Reservation != nil })
				operation.Unreserve.Resources = filtered
				okOperations = append(okOperations, operation)
			case mesos.Offer_Operation_CREATE:
				// Only send reserved resources disks with volumes
				filtered := util.FilterResources(operation.Create.Volumes, func(res *mesos.Resource) bool {
					return res.Reservation != nil && res.Disk != nil && res.GetName() == "disk"
				})
				operation.Create.Volumes = filtered
				okOperations = append(okOperations, operation)
			case mesos.Offer_Operation_DESTROY:
				// Only send reserved resources disks with volumes
				filtered := util.FilterResources(operation.Destroy.Volumes, func(res *mesos.Resource) bool {
					return res.Reservation != nil && res.Disk != nil && res.GetName() == "disk"
				})
				operation.Destroy.Volumes = filtered
				okOperations = append(okOperations, operation)
			}
		}

		driver.cache.removeOffer(offerId) // if offer
	}

	// Accept Offers
	message := &mesos.Call{
		FrameworkId: driver.frameworkInfo.Id,
		Type:        mesos.Call_ACCEPT.Enum(),
		Accept: &mesos.Call_Accept{
			OfferIds:   offerIds,
			Operations: okOperations,
			Filters:    filters,
		},
	}

	if err := driver.send(driver.masterPid, message); err != nil {
		for _, operation := range operations {
			if *operation.Type == mesos.Offer_Operation_LAUNCH {
				for _, task := range operation.Launch.TaskInfos {
					driver.pushLostTask(task, "Unable to launch tasks: "+err.Error())
				}
			}
		}
		log.Errorf("Failed to send LaunchTask message: %v\n", err)
		return driver.status, err
	}

	return driver.status, nil
}
示例#20
0
文件: main.go 项目: hirolovesbeer/sdc
func (sched *SdcScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	if sched.tasksLaunched >= sched.totalTasks {
		log.Info("decline all of the offers since all of our tasks are already launched")
		log.Infoln("sched.totalTasks ", sched.totalTasks)
		log.Infoln("sched.tasksFinished ", sched.tasksFinished)
		log.Infoln("sched.tasksLaunched ", sched.tasksLaunched)

		// cmdQueueからCommand.Argumentsをpopする
		// 将来的に外部のキューから取得できるように置き換える
		if sched.totalTasks == 0 && sched.tasksFinished == 0 && sched.tasksLaunched == 0 && cmdQueue.Len() != 0 {
			execinfo := cmdQueue.Remove(cmdQueue.Front()).(*mesos.ExecutorInfo)
			log.Infoln("execinfo ", execinfo.Command.Arguments)

			sched.totalTasks = len(execinfo.Command.Arguments)
			sched.executor.Command.Arguments = execinfo.Command.Arguments
		}

		if sched.totalTasks == 0 && sched.tasksFinished == 0 && sched.tasksLaunched == 0 {
			ids := make([]*mesos.OfferID, len(offers))
			for i, offer := range offers {
				ids[i] = offer.Id
			}
			driver.LaunchTasks(ids, []*mesos.TaskInfo{}, &mesos.Filters{RefuseSeconds: proto.Float64(5)})
			return
		}
	}

	log.Info("prepare pass args: ", sched.executor.Command.Arguments)
	cmds := sched.executor.Command.Arguments
	for _, v := range cmds {
		fmt.Println("v = ", v)
	}

	// [/bin/cat /var/tmp/1.txt /var/tmp/2.txt /var/tmp/3.txt | /bin/grep abe > /var/tmp/grep-result.txt]
	//
	// rebuild args
	// 1. /bin/cat /var/tmp/1.txt >> /var/tmp/intermediate.txt
	// 2. /bin/cat /var/tmp/2.txt >> /var/tmp/intermediate.txt
	// 3. /bin/cat /var/tmp/3.txt >> /var/tmp/intermediate.txt
	// 4. /bin/cat /var/tmp/intermediate.txt | /bin/grep abe > /var/tmp/grep-result.txt
	// 5. /bin/rm /var/tmp/intermediate.txt

	for _, offer := range offers {
		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with cpus=", cpus, " mem=", mems)

		remainingCpus := cpus
		remainingMems := mems

		var tasks []*mesos.TaskInfo

		// $ cat 1.txt 2.txt. 3.txt | wc -lの場合
		// 先に、後ろのタスクを上げておく必要がある?
		//
		// コンセプト実装はシンプルに中間ファイル方式で行く
		// 遅いけど

		for sched.tasksLaunched < sched.totalTasks &&
			CPUS_PER_TASK <= remainingCpus &&
			MEM_PER_TASK <= remainingMems {

			sched.tasksLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
			}

			// executionidの書き換え
			sched.executor.ExecutorId = util.NewExecutorID(taskId.GetValue())

			log.Infof("sched.tasksLaunched = %d\n", sched.tasksLaunched)
			log.Infof("sched.totalTasks = %d\n", sched.totalTasks)
			log.Infof("sched.executor.Command.Value = %s\n", sched.executor.Command.GetValue())
			log.Infof("sched.executor.GetExecutorId() = %s\n", sched.executor.GetExecutorId())

			// sched.executor.Command.Arguments で書き換えても保持されている
			// 値はポインタなので、LaunchTasksするときに、複数のタスクでまとめられているので、
			// 値は最後に上書きされた物になる
			// そこで、Argumentsがタスクごとにまとめられないように個別にオブジェクトを生成してタスクを
			// 起動する
			exec := &mesos.ExecutorInfo{
				ExecutorId: sched.executor.GetExecutorId(),
				Name:       proto.String(sched.executor.GetName()),
				Source:     proto.String(sched.executor.GetSource()),
				Command: &mesos.CommandInfo{
					Value: proto.String(sched.executor.Command.GetValue()),
					Uris:  sched.executor.Command.GetUris(),
				},
			}

			cmd := cmds[sched.tasksLaunched-1]
			log.Infof("cmd = %s\n", cmd)
			// Argumentsコマンドラインを使うと別Executorとみなされるので、色々面倒
			// 以下はやってはいけない例
			// exec.Command.Arguments = strings.Split(cmd, " ")

			task := &mesos.TaskInfo{
				Name:     proto.String("go-task-" + taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: exec,
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", CPUS_PER_TASK),
					util.NewScalarResource("mem", MEM_PER_TASK),
				},
				// 実行したいコマンドラインはDataパラメータを使って渡せす
				Data: []byte(cmd),
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= CPUS_PER_TASK
			remainingMems -= MEM_PER_TASK
		}

		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(5)})
	}
}
示例#21
0
func (s *MinerScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	for i, offer := range offers {
		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		portsResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "ports"
		})
		var ports uint64
		for _, res := range portsResources {
			port_ranges := res.GetRanges().GetRange()
			for _, port_range := range port_ranges {
				ports += port_range.GetEnd() - port_range.GetBegin()
			}
		}

		// If a miner server is running, we start a new miner daemon.  Otherwise, we start a new miner server.
		tasks := make([]*mesos.TaskInfo, 0)
		if !s.minerServerRunning && mems >= MEM_PER_SERVER_TASK && cpus >= CPU_PER_SERVER_TASK && ports >= 2 {
			var taskId *mesos.TaskID
			var task *mesos.TaskInfo

			// we need two ports
			var p2pool_port uint64
			var worker_port uint64
			// A rather stupid algorithm for picking two ports
			// The difficulty here is that a range might only include one port,
			// in which case we will need to pick another port from another range.
			for _, res := range portsResources {
				r := res.GetRanges().GetRange()[0]
				begin := r.GetBegin()
				end := r.GetEnd()
				if p2pool_port == 0 {
					p2pool_port = begin
					if worker_port == 0 && (begin+1) <= end {
						worker_port = begin + 1
						break
					}
					continue
				}
				if worker_port == 0 {
					worker_port = begin
					break
				}
			}

			taskId = &mesos.TaskID{
				Value: proto.String("miner-server-" + strconv.Itoa(i)),
			}

			containerType := mesos.ContainerInfo_DOCKER
			task = &mesos.TaskInfo{
				Name:    proto.String("task-" + taskId.GetValue()),
				TaskId:  taskId,
				SlaveId: offer.SlaveId,
				Container: &mesos.ContainerInfo{
					Type: &containerType,
					Docker: &mesos.ContainerInfo_DockerInfo{
						Image: proto.String(MINER_SERVER_DOCKER_IMAGE),
					},
				},
				Command: &mesos.CommandInfo{
					Shell: proto.Bool(false),
					Arguments: []string{
						// these arguments will be passed to run_p2pool.py
						"--bitcoind-address", *bitcoindAddr,
						"--p2pool-port", strconv.Itoa(int(p2pool_port)),
						"-w", strconv.Itoa(int(worker_port)),
						s.rpc_user, s.rpc_pass,
					},
				},
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", CPU_PER_SERVER_TASK),
					util.NewScalarResource("mem", MEM_PER_SERVER_TASK),
				},
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			cpus -= CPU_PER_SERVER_TASK
			mems -= MEM_PER_SERVER_TASK

			// update state
			s.minerServerHostname = offer.GetHostname()
			s.minerServerRunning = true
			s.minerServerPort = int(worker_port)

			tasks = append(tasks, task)
		}

		if s.minerServerRunning && mems >= MEM_PER_DAEMON_TASK {
			var taskId *mesos.TaskID
			var task *mesos.TaskInfo

			taskId = &mesos.TaskID{
				Value: proto.String("miner-daemon-" + strconv.Itoa(i)),
			}

			containerType := mesos.ContainerInfo_DOCKER
			task = &mesos.TaskInfo{
				Name:    proto.String("task-" + taskId.GetValue()),
				TaskId:  taskId,
				SlaveId: offer.SlaveId,
				Container: &mesos.ContainerInfo{
					Type: &containerType,
					Docker: &mesos.ContainerInfo_DockerInfo{
						Image: proto.String(MINER_DAEMON_DOCKER_IMAGE),
					},
				},
				Command: &mesos.CommandInfo{
					Shell:     proto.Bool(false),
					Arguments: []string{"-o", s.minerServerHostname + ":" + strconv.Itoa(s.minerServerPort)},
				},
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", cpus),
					util.NewScalarResource("mem", MEM_PER_DAEMON_TASK),
				},
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
		}

		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}
}
示例#22
0
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {

	if *slowLaunch {
		time.Sleep(3 * time.Second)
	}

	if (sched.tasksLaunched - sched.tasksErrored) >= sched.totalTasks {
		log.Info("decline all of the offers since all of our tasks are already launched")
		ids := make([]*mesos.OfferID, len(offers))
		for i, offer := range offers {
			ids[i] = offer.Id
		}
		driver.LaunchTasks(ids, []*mesos.TaskInfo{}, &mesos.Filters{RefuseSeconds: proto.Float64(120)})
		return
	}
	for _, offer := range offers {
		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with cpus=", cpus, " mem=", mems)

		remainingCpus := cpus
		remainingMems := mems

		// account for executor resources if there's not an executor already running on the slave
		if len(offer.ExecutorIds) == 0 {
			remainingCpus -= CPUS_PER_EXECUTOR
			remainingMems -= MEM_PER_EXECUTOR
		}

		var tasks []*mesos.TaskInfo
		for (sched.tasksLaunched-sched.tasksErrored) < sched.totalTasks &&
			CPUS_PER_TASK <= remainingCpus &&
			MEM_PER_TASK <= remainingMems {

			sched.tasksLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
			}

			task := &mesos.TaskInfo{
				Name:     proto.String("go-task-" + taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: sched.executor,
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", CPUS_PER_TASK),
					util.NewScalarResource("mem", MEM_PER_TASK),
				},
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= CPUS_PER_TASK
			remainingMems -= MEM_PER_TASK
		}
		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(5)})
	}
}
示例#23
0
func FilterReservedResources(immutableResources []*mesos.Resource) []*mesos.Resource {
	return util.FilterResources(immutableResources, func(res *mesos.Resource) bool {
		return res.Reservation != nil && res.GetRole() != ""
	})
}
示例#24
0
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {

	for _, offer := range offers {
		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with cpus=", cpus, " mem=", mems)

		remainingCpus := cpus
		remainingMems := mems
		var tasks []*mesos.TaskInfo
		queueItems := getQueue(sched)
		if len(queueItems) == 0 {
			fmt.Println("Queue is empty!")
		}

		// start a task of each item in the queue. before starting, check
		// if it has not already been launched.
		for i, item := range queueItems {
			if val, ok := launchedTasks[item.Commit.Sha]; ok {
				fmt.Println("item exists: ", val)
				continue
			}

			fmt.Println("Adding item: ", item.Commit.Sha)
			launchedTasks[item.Commit.Sha] = item.Commit.Sha

			if CPUS_PER_TASK <= remainingCpus && MEM_PER_TASK <= remainingMems {

				sched.tasksLaunched += i

				taskId := &mesos.TaskID{
					Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
				}

				stringArray := []string{"-addr=" + *droneServerIP, "-token=1"}
				dataString := strings.Join(stringArray, " ")

				task := &mesos.TaskInfo{
					Name:     proto.String("go-task-" + taskId.GetValue()),
					TaskId:   taskId,
					SlaveId:  offer.SlaveId,
					Executor: sched.executor,
					Data:     []byte(dataString),
					Resources: []*mesos.Resource{
						util.NewScalarResource("cpus", CPUS_PER_TASK),
						util.NewScalarResource("mem", MEM_PER_TASK),
					},
				}

				log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

				tasks = append(tasks, task)
				remainingCpus -= CPUS_PER_TASK
				remainingMems -= MEM_PER_TASK
			}

		}

		//		for sched.tasksLaunched < sched.totalTasks &&
		//		CPUS_PER_TASK <= remainingCpus &&
		//		MEM_PER_TASK <= remainingMems {
		//
		//			sched.tasksLaunched++
		//
		//			taskId := &mesos.TaskID{
		//				Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
		//			}
		//
		//			stringArray := []string{*droneServerIP, "-token=1"}
		//			dataString := strings.Join(stringArray, " ")
		//
		//			task := &mesos.TaskInfo{
		//				Name:     proto.String("go-task-" + taskId.GetValue()),
		//				TaskId:   taskId,
		//				SlaveId:  offer.SlaveId,
		//				Executor: sched.executor,
		//				Data: []byte(dataString),
		//				Resources: []*mesos.Resource{
		//					util.NewScalarResource("cpus", CPUS_PER_TASK),
		//					util.NewScalarResource("mem", MEM_PER_TASK),
		//				},
		//			}
		//
		//			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())
		//
		//			tasks = append(tasks, task)
		//			remainingCpus -= CPUS_PER_TASK
		//			remainingMems -= MEM_PER_TASK
		//		}
		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}
}
示例#25
0
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	for _, offer := range offers {
		operations := []*mesos.Offer_Operation{}
		resourcesToCreate := []*mesos.Resource{}
		resourcesToDestroy := []*mesos.Resource{}
		resourcesToReserve := []*mesos.Resource{}
		resourcesToUnreserve := []*mesos.Resource{}
		taskInfosToLaunch := []*mesos.TaskInfo{}
		totalUnreserved := 0

		unreservedCpus, unreservedMem, unreservedDisk := getUnreservedResources(offer.Resources)
		reservedCpus, reservedMem, reservedDisk := getReservedResources(offer.Resources)

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with unreserved cpus=", unreservedCpus, " mem=", unreservedMem, " disk=", unreservedDisk)
		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with reserved cpus=", reservedCpus, " mem=", reservedMem, " disk=", reservedDisk)

		for _, task := range sched.tasks {
			switch task.state {
			case InitState:
				if CPUS_PER_TASK <= unreservedCpus &&
					MEM_PER_TASK <= unreservedMem &&
					DISK_PER_TASK <= unreservedDisk {

					resourcesToReserve = append(resourcesToReserve, []*mesos.Resource{
						util.NewScalarResourceWithReservation("cpus", CPUS_PER_TASK, *mesosAuthPrincipal, *role),
						util.NewScalarResourceWithReservation("mem", MEM_PER_TASK, *mesosAuthPrincipal, *role),
						util.NewScalarResourceWithReservation("disk", DISK_PER_TASK, *mesosAuthPrincipal, *role),
					}...)
					resourcesToCreate = append(resourcesToCreate,
						util.NewVolumeResourceWithReservation(DISK_PER_TASK, task.containerPath, task.persistenceId, mesos.Volume_RW.Enum(), *mesosAuthPrincipal, *role))
					task.state = RequestedReservationState
					unreservedCpus = unreservedCpus - CPUS_PER_TASK
					unreservedMem = unreservedMem - MEM_PER_TASK
					unreservedDisk = unreservedDisk - DISK_PER_TASK
				}
			case RequestedReservationState:
				if CPUS_PER_TASK <= reservedCpus &&
					MEM_PER_TASK <= reservedMem &&
					DISK_PER_TASK <= reservedDisk &&
					resourcesHaveVolume(offer.Resources, task.persistenceId) {

					taskId := &mesos.TaskID{
						Value: proto.String(task.name),
					}

					taskInfo := &mesos.TaskInfo{
						Name:     proto.String("go-task-" + taskId.GetValue()),
						TaskId:   taskId,
						SlaveId:  offer.SlaveId,
						Executor: task.executor,
						Resources: []*mesos.Resource{
							util.NewScalarResourceWithReservation("cpus", CPUS_PER_TASK, *mesosAuthPrincipal, *role),
							util.NewScalarResourceWithReservation("mem", MEM_PER_TASK, *mesosAuthPrincipal, *role),
							util.NewVolumeResourceWithReservation(DISK_PER_TASK, task.containerPath, task.persistenceId, mesos.Volume_RW.Enum(), *mesosAuthPrincipal, *role),
						},
					}

					taskInfosToLaunch = append(taskInfosToLaunch, taskInfo)
					task.state = LaunchedState
					reservedCpus = reservedCpus - CPUS_PER_TASK
					reservedMem = reservedMem - MEM_PER_TASK
					reservedDisk = reservedDisk - DISK_PER_TASK

					log.Infof("Prepared task: %s with offer %s for launch\n", taskInfo.GetName(), offer.Id.GetValue())
				}
			case FinishedState:
				resourcesToDestroy = append(resourcesToDestroy,
					util.NewVolumeResourceWithReservation(DISK_PER_TASK, task.containerPath, task.persistenceId, mesos.Volume_RW.Enum(), *mesosAuthPrincipal, *role))
				resourcesToUnreserve = append(resourcesToUnreserve, []*mesos.Resource{
					util.NewScalarResourceWithReservation("cpus", CPUS_PER_TASK, *mesosAuthPrincipal, *role),
					util.NewScalarResourceWithReservation("mem", MEM_PER_TASK, *mesosAuthPrincipal, *role),
					util.NewScalarResourceWithReservation("disk", DISK_PER_TASK, *mesosAuthPrincipal, *role),
				}...)
				task.state = UnreservedState
			case UnreservedState:
				totalUnreserved = totalUnreserved + 1
			}
		}

		// Clean up reservations we no longer need
		if len(resourcesToReserve) == 0 && len(resourcesToCreate) == 0 && len(taskInfosToLaunch) == 0 {
			if reservedCpus >= 0.0 {
				resourcesToUnreserve = append(resourcesToUnreserve, util.NewScalarResourceWithReservation("cpus", reservedCpus, *mesosAuthPrincipal, *role))
			}
			if reservedMem >= 0.0 {
				resourcesToUnreserve = append(resourcesToUnreserve, util.NewScalarResourceWithReservation("mem", reservedCpus, *mesosAuthPrincipal, *role))
			}
			if reservedDisk >= 0.0 {
				filtered := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
					return res.GetName() == "disk" &&
						res.Reservation != nil &&
						res.Disk != nil
				})
				for _, volume := range filtered {
					resourcesToDestroy = append(resourcesToDestroy,
						util.NewVolumeResourceWithReservation(
							volume.GetScalar().GetValue(),
							volume.Disk.Volume.GetContainerPath(),
							volume.Disk.Persistence.GetId(),
							volume.Disk.Volume.Mode,
							*mesosAuthPrincipal,
							*role))
				}
				resourcesToUnreserve = append(resourcesToUnreserve, util.NewScalarResourceWithReservation("mem", reservedDisk, *mesosAuthPrincipal, *role))
			}
		}

		// Make a single operation per type
		if len(resourcesToReserve) > 0 {
			operations = append(operations, util.NewReserveOperation(resourcesToReserve))
		}
		if len(resourcesToCreate) > 0 {
			operations = append(operations, util.NewCreateOperation(resourcesToCreate))
		}
		if len(resourcesToUnreserve) > 0 {
			operations = append(operations, util.NewUnreserveOperation(resourcesToUnreserve))
		}
		if len(resourcesToDestroy) > 0 {
			operations = append(operations, util.NewDestroyOperation(resourcesToDestroy))
		}
		if len(taskInfosToLaunch) > 0 {
			operations = append(operations, util.NewLaunchOperation(taskInfosToLaunch))
		}

		log.Infoln("Accepting offers with ", len(operations), "operations for offer", offer.Id.GetValue())
		refuseSeconds := 5.0
		if len(operations) == 0 {
			refuseSeconds = 5.0
		}
		driver.AcceptOffers([]*mesos.OfferID{offer.Id}, operations, &mesos.Filters{RefuseSeconds: proto.Float64(refuseSeconds)})

		if totalUnreserved >= len(sched.tasks) {
			log.Infoln("Total tasks completed and unreserved, stopping framework.")
			driver.Stop(false)
		}
	}
}
示例#26
0
func (sched *MesosRunonceScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {

	if sched.tasksLaunched >= sched.totalTasks {
		log.V(1).Info("decline all of the offers since all of our tasks are already launched")
		ids := make([]*mesos.OfferID, len(offers))
		for i, offer := range offers {
			ids[i] = offer.Id
		}
		driver.LaunchTasks(ids, []*mesos.TaskInfo{}, &mesos.Filters{RefuseSeconds: proto.Float64(120)})
		return
	}
	for _, offer := range offers {
		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		log.V(1).Infoln("Received Offer <", offer.Id.GetValue(), "> on host ", *offer.Hostname, "with cpus=", cpus, " mem=", mems)

		remainingCpus := cpus
		remainingMems := mems

		dCpus := config.Task.Docker.Cpus
		dMem := config.Task.Docker.Mem

		var tasks []*mesos.TaskInfo
		for sched.tasksLaunched < sched.totalTasks &&
			dCpus <= remainingCpus &&
			dMem <= remainingMems {

			sched.tasksLaunched++

			tID := strconv.Itoa(sched.tasksLaunched)
			if config.Task.Id != "" {
				tID = config.Task.Id
			}

			tName := "mesos-runonce-" + tID
			if config.Task.Name != "" {
				tName = config.Task.Name
			}

			taskId := &mesos.TaskID{
				Value: proto.String(tID),
			}

			containerType := mesos.ContainerInfo_DOCKER
			task := &mesos.TaskInfo{
				Name:    proto.String(tName),
				TaskId:  taskId,
				SlaveId: offer.SlaveId,
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", dCpus),
					util.NewScalarResource("mem", dMem),
				},
				Command: &mesos.CommandInfo{
					Shell: proto.Bool(false),
				},
				Container: &mesos.ContainerInfo{
					Type: &containerType,
					Docker: &mesos.ContainerInfo_DockerInfo{
						Image:          proto.String(config.Task.Docker.Image),
						ForcePullImage: proto.Bool(config.Task.Docker.ForcePullImage),
					},
				},
			}
			fmt.Printf("Prepared task: [%s] with offer [%s] for launch on host [%s]\n", task.GetName(), offer.Id.GetValue(), *offer.Hostname)

			if len(config.Task.Docker.Env) != 0 || config.Task.Docker.EnvString != "" {
				task.Command.Environment = config.EnvVars()
			}

			// Allow arbitrary commands, else just use whatever the image defines in CMD
			if config.Task.Docker.Cmd != "" {
				task.Command.Value = proto.String(config.Task.Docker.Cmd)
				task.Command.Shell = proto.Bool(true)
			}

			tasks = append(tasks, task)
			remainingCpus -= dCpus
			remainingMems -= dMem
		}
		log.V(1).Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(5)})
	}
}
示例#27
0
文件: main.go 项目: tcolgate/radia
func (s *visghsScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	if (s.nodesLaunched - s.nodesErrored) >= s.nodeTasks {
		log.Info("decline all of the offers since all of our tasks are already launched")
		ids := make([]*mesos.OfferID, len(offers))
		for i, offer := range offers {
			ids[i] = offer.Id
		}
		driver.LaunchTasks(ids, []*mesos.TaskInfo{}, &mesos.Filters{RefuseSeconds: proto.Float64(120)})
		return
	}
	for _, offer := range offers {
		cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "cpus"
		})
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "mem"
		})
		mems := 0.0
		for _, res := range memResources {
			mems += res.GetScalar().GetValue()
		}

		portResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
			return res.GetName() == "ports"
		})
		ports := []*mesos.Value_Range{}
		portsCount := uint64(0)
		for _, res := range portResources {
			for _, rs := range res.GetRanges().GetRange() {
				ports = append(ports, rs)
				portsCount += 1 + rs.GetEnd() - rs.GetBegin()
			}
		}

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with cpus=", cpus, " mem=", mems, " ports=", ports)

		remainingCpus := cpus
		remainingMems := mems
		remainingPorts := ports
		remainingPortsCount := portsCount

		// account for executor resources if there's not an executor already running on the slave
		if len(offer.ExecutorIds) == 0 {
			remainingCpus -= CPUS_PER_EXECUTOR
			remainingMems -= MEM_PER_EXECUTOR
		}

		var tasks []*mesos.TaskInfo
		for (s.nodesLaunched-s.nodesErrored) < s.nodeTasks &&
			CPUS_PER_TASK <= remainingCpus &&
			MEM_PER_TASK <= remainingMems &&
			PORTS_PER_TASK <= remainingPortsCount {
			log.Infoln("Ports <", remainingPortsCount, remainingPorts)

			s.nodesLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(s.nodesLaunched)),
			}

			taskPorts := []*mesos.Value_Range{}
			leftOverPorts := []*mesos.Value_Range{}
			for t := 0; t < PORTS_PER_TASK; t++ {
				if len(remainingPorts) < 1 {
					// failed to allocate port, oh no!
				}
				ps := remainingPorts[0]
				//take the first port from the first
				pb := ps.GetBegin()
				pe := ps.GetEnd()
				//Create one range per port we need, it's easier this way
				tp := mesos.Value_Range{}
				p := pb
				tp.Begin = &p
				tp.End = &p
				taskPorts = append(taskPorts, &tp)

				pb++
				if pb <= pe {
					rpb := pb
					rpe := pe
					rtp := mesos.Value_Range{Begin: &rpb, End: &rpe}
					leftOverPorts = append(leftOverPorts, &rtp)
				}
				for _, ps := range remainingPorts[1:] {
					leftOverPorts = append(leftOverPorts, ps)
				}
			}

			radiaPort := (uint32)(taskPorts[0].GetBegin())
			task := &mesos.TaskInfo{
				Name:     proto.String("visghs-node-" + taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: s.nexec,
				Discovery: &mesos.DiscoveryInfo{
					Name: proto.String("visghs"),
					//Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
					Visibility: mesos.DiscoveryInfo_FRAMEWORK.Enum(),
					Ports: &mesos.Ports{
						Ports: []*mesos.Port{
							{Protocol: proto.String("UDP"),
								Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
								Name:       proto.String("udpprobe"),
								Number:     proto.Uint32(radiaPort)},
							{Protocol: proto.String("TCP"),
								Visibility: mesos.DiscoveryInfo_EXTERNAL.Enum(),
								Name:       proto.String("radiarpc"),
								Number:     proto.Uint32(radiaPort)},
						},
					},
				},
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", CPUS_PER_TASK),
					util.NewScalarResource("mem", MEM_PER_TASK),
					util.NewRangesResource("ports", taskPorts),
				},
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= CPUS_PER_TASK
			remainingMems -= MEM_PER_TASK
			remainingPorts = leftOverPorts
			remainingPortsCount -= PORTS_PER_TASK
		}
		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(5)})
	}
}
示例#28
0
func (rf *ResourceFilter) FilterResources(offer *mesos.Offer, name string) []*mesos.Resource {
	return util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
		return res.GetName() == name && rf.filterRole(res.GetRole())
	})
}