Ejemplo n.º 1
0
func (s *_MesosScheduler) ResourceOffers(driver *mesos.SchedulerDriver,
	_offers []mesos.Offer) {

	offerSize := len(_offers)
	rf := mesos.Filters{}
	if len(s.activeJobs) == 0 {
		var seconds float64 = 60 * 5
		rf.RefuseSeconds = &seconds
		for _, o := range _offers {
			driver.LaunchTasks(o.Id, nil, rf) //TODO:when error
		}
		return
	}
	start := time.Now()
	offers := make([]mesos.Offer, offerSize) //shuffle offers
	for idx, i := range rand.Perm(offerSize) {
		offers[i] = _offers[idx]
	}
	cpus := make([]float64, offerSize)
	mems := make([]float64, offerSize)
	for idx, o := range offers {
		oid := *o.SlaveId.Value
		cpus[idx] = *s.getResource(o.Resources, "cpus")
		var e_mem float64 = EXECUTOR_MEMORY
		if v, ok := s.slaveTasks[oid]; ok && v > 0 {
			e_mem = 0
		}
		mems[idx] = *s.getResource(o.Resources, "mem") - e_mem
	}

	glog.Infof("get %d offers (%.2f cpus, %.2f mem), %d jobs",
		len(offers), sumFloat64(cpus), sumFloat64(mems), len(s.activeJobs))

	tasks := make(map[string][]*mesos.TaskInfo)
	for _, job := range s.activeJobsQueue {
		for {
			launchedTask := false
			for idx, o := range offers {
				slaveId := *o.SlaveId.Value
				sgroup := s.getAttribute(o.Attributes, "group")
				group := "none"
				if sgroup != nil {
					group = *sgroup
				}
				next := false
				if s.group != nil && len(s.group) > 0 {
					next = true
					for _, g := range s.group {
						if group == g {
							next = false
							break
						}
					}
				}
				if next {
					continue
				}
				if v, ok := s.slaveFailed[slaveId]; ok && v >= MAX_FAILED {
					continue
				}
				if v, ok := s.slaveTasks[slaveId]; ok && v >= s.task_per_node {
					continue
				}
				if mems[idx] < s.mem || cpus[idx]+1e-4 < s.cpus {
					continue
				}
				t := job.slaveOffer(*o.Hostname, cpus[idx], mems[idx])
				if t == nil {
					continue
				}
				task := s.createTask(&o, job, t, cpus[idx])
				oid := *o.Id.Value
				tid := *task.TaskId.Value
				if _, ok := tasks[oid]; ok {
					tasks[oid] = append(tasks[oid], task)
				} else {
					tasks[oid] = []*mesos.TaskInfo{task}
				}

				glog.Infof("dispatch %s into %s", t, *o.Hostname)
				jobId := job.getId()
				s.taskIdToJobId[tid] = jobId
				s.taskIdToSlaveId[tid] = slaveId
				if v, ok := s.slaveTasks[slaveId]; ok {
					s.slaveTasks[slaveId] = v + 1
				} else {
					s.slaveTasks[slaveId] = 1
				}
				cpus[idx] -= math.Min(cpus[idx], t.getCpus())
				mems[idx] = t.getMem()
				launchedTask = true
			}
			if !launchedTask {
				break
			}
		}
	}
	now := time.Now()
	used := now.Sub(start)
	if used.Seconds() > 10 {
		glog.Errorf("use too much time in slaveOffer: %0.2fs", used.Seconds())
	}
	for _, o := range offers {
		oid := *o.Id.Value
		tt := tasks[oid]
		if len(tt) == 0 {
			if err := driver.LaunchTasks(o.Id, nil); err != nil {
				glog.Fatal(err)
			}
		} else {
			if err := driver.LaunchTasks(o.Id, tt); err != nil {
				glog.Fatal(err)
			}
		}
	}
	tsTotal := 0
	for _, ts := range tasks {
		tsTotal += len(ts)
	}
	glog.Infof("reply with %d tasks, %.2f cpus %.2f mem left",
		tsTotal, sumFloat64(cpus), sumFloat64(mems))
}
Ejemplo n.º 2
0
func main() {
	taskLimit := 1000
	taskId := 0
	exit := make(chan bool)
	localExecutor, _ := executorPath()

	master := flag.String("master", "localhost:5050", "Location of leading Mesos master")
	executorUri := flag.String("executor-uri", localExecutor, "URI of executor executable")
	flag.Parse()

	executable := false
	executor := &mesos.ExecutorInfo{
		ExecutorId: &mesos.ExecutorID{Value: proto.String("default")},
		Command: &mesos.CommandInfo{
			Value: proto.String("./example_executor"),
			Uris: []*mesos.CommandInfo_URI{
				&mesos.CommandInfo_URI{Value: executorUri,
					Executable: &executable},
			},
		},
		Name:   proto.String("Test Executor (Go)"),
		Source: proto.String("go_test"),
	}
	str1 := "abcd"
	fmt.Println(str1)
	i := 0
	str := ""
	for i < 100000 {
		str = str + str1
		i = i + 1
	}

	driver := mesos.SchedulerDriver{
		Master: *master,
		Framework: mesos.FrameworkInfo{
			Name: proto.String("GoFramework"),
			User: proto.String(""),
		},

		Scheduler: &mesos.Scheduler{
			ResourceOffers: func(driver *mesos.SchedulerDriver, offers []mesos.Offer) {
				for _, offer := range offers {
					taskId++
					tt := &Data{str}
					var buf bytes.Buffer
					enc := gob.NewEncoder(&buf)
					if err := enc.Encode(tt); err != nil {
						panic(err)
					}
					datas := buf.Bytes()

					fmt.Printf("Launching task: %d data:%d\n", taskId, len(datas))

					tasks := []*mesos.TaskInfo{
						&mesos.TaskInfo{
							Name: proto.String("go-task"),
							TaskId: &mesos.TaskID{
								Value: proto.String("go-task-" + strconv.Itoa(taskId)),
							},
							SlaveId:  offer.SlaveId,
							Executor: executor,
							Data:     datas,
							Resources: []*mesos.Resource{
								mesos.ScalarResource("cpus", 1),
								mesos.ScalarResource("mem", 128),
							},
						},
					}

					driver.LaunchTasks(offer.Id, tasks)
				}
			},

			StatusUpdate: func(driver *mesos.SchedulerDriver, status mesos.TaskStatus) {
				fmt.Println("Received task status: " + *status.Message)

				if *status.State == mesos.TaskState_TASK_FINISHED {
					taskLimit--
					if taskLimit <= 0 {
						exit <- true
					}
				}
			},
		},
	}

	driver.Init()
	defer driver.Destroy()

	driver.Start()
	<-exit
	driver.Stop(false)
}