예제 #1
0
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	logOffers(offers)
	jobs, err := getLaunchableJobs()
	if err != nil {
		log.Errorf("Unable to get pending jobs! %s\n", err.Error())
		return
	}

	offersAndTasks, err := packJobsInOffers(jobs, offers)
	if err != nil {
		log.Errorf("Unable to pack jobs into offers! %s\n", err.Error())
		return
	}

	for _, ot := range offersAndTasks {
		if len(ot.Tasks) == 0 {
			log.Infof("Declining unused offer %s", ot.Offer.Id.GetValue())
			driver.DeclineOffer(ot.Offer.Id, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
			continue
		} else {
			log.Infof("Launching %d tasks for offer %s\n", len(ot.Tasks), ot.Offer.Id.GetValue())
			driver.LaunchTasks([]*mesos.OfferID{ot.Offer.Id}, ot.Tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
			sched.tasksLaunched = sched.tasksLaunched + len(ot.Tasks)
		}
	}

}
예제 #2
0
func (s *MemcacheScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	log.Printf("Received %d resource offers", len(offers))
	for _, offer := range offers {
		select {
		case <-s.shutdown:
			log.Println("Shutting down: declining offer on [", offer.Hostname, "]")
			driver.DeclineOffer(offer.Id, defaultFilter)
			if s.tasksRunning == 0 {
				close(s.done)
			}
			continue
		default:
		}

		tasks := []*mesos.TaskInfo{}
		if canLaunchNewTask(offer) && s.shouldLaunchNewTask() {
			fmt.Println("Accepting Offer: ", offer)
			task := s.newMemcacheTask(offer)
			tasks = append(tasks, task)
		}

		if len(tasks) == 0 {
			driver.DeclineOffer(offer.Id, defaultFilter)
		} else {
			driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter)
			s.lastTaskCreatedAt = time.Now()
			return // limit one at a time
		}
	}
}
예제 #3
0
// ResourceOffers handles the Resource Offers
func (s *Scheduler) ResourceOffers(driver mesossched.SchedulerDriver, offers []*mesosproto.Offer) {
	logrus.WithField("offers", len(offers)).Debug("Received offers")
	var offer *mesosproto.Offer

loop:
	for len(offers) > 0 {
		select {
		case <-s.shutdown:
			logrus.Info("Shutting down: declining offers")
			break loop
		case tid := <-s.tasks:
			logrus.WithField("task_id", tid).Debug("Trying to find offer to launch task with")
			t, _ := s.database.ReadUnmaskedTask(tid)

			if t.IsTerminating() {
				logrus.Debug("Dropping terminating task.")
				t.UpdateStatus(eremetic.Status{
					Status: eremetic.TaskKilled,
					Time:   time.Now().Unix(),
				})
				s.database.PutTask(&t)

				continue
			}
			offer, offers = matchOffer(t, offers)

			if offer == nil {
				logrus.WithField("task_id", tid).Warn("Unable to find a matching offer")
				tasksDelayed.Inc()
				go func() { s.tasks <- tid }()
				break loop
			}

			logrus.WithFields(logrus.Fields{
				"task_id":  tid,
				"offer_id": offer.Id.GetValue(),
			}).Debug("Preparing to launch task")

			t, task := createTaskInfo(t, offer)
			t.UpdateStatus(eremetic.Status{
				Status: eremetic.TaskStaging,
				Time:   time.Now().Unix(),
			})
			s.database.PutTask(&t)
			driver.LaunchTasks([]*mesosproto.OfferID{offer.Id}, []*mesosproto.TaskInfo{task}, defaultFilter)
			tasksLaunched.Inc()
			queueSize.Dec()

			continue
		default:
			break loop
		}
	}

	logrus.Debug("No tasks to launch. Declining offers.")
	for _, offer := range offers {
		driver.DeclineOffer(offer.Id, defaultFilter)
	}
}
func (s *rancherScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	task := tasks.GetNextTask()
	if task == nil {
		for _, of := range offers {
			driver.DeclineOffer(of.Id, defaultFilter)
		}
		return
	}
	if task.RegistrationUrl == "" {
		tasks.AddTask(task)
		for _, of := range offers {
			driver.DeclineOffer(of.Id, defaultFilter)
		}
		return
	}
	taskBytes, err := task.Marshal()
	if err != nil {
		log.WithFields(log.Fields{
			"err": err,
		}).Error("Error Marshalling task")
		for _, of := range offers {
			driver.DeclineOffer(of.Id, defaultFilter)
		}
		return
	}
	for _, offer := range offers {
		inadequate := false
		for _, res := range offer.GetResources() {
			if res.GetName() == "cpus" && *res.GetScalar().Value < taskCPUs {
				driver.DeclineOffer(offer.Id, defaultFilter)
				inadequate = true
				continue
			}
			if res.GetName() == "mem" && *res.GetScalar().Value < taskMem {
				driver.DeclineOffer(offer.Id, defaultFilter)
				inadequate = true
				continue
			}
		}
		if inadequate {
			continue
		}
		mesosTask := &mesos.TaskInfo{
			TaskId: &mesos.TaskID{
				Value: proto.String(task.HostUuid),
			},
			SlaveId: offer.SlaveId,
			Resources: []*mesos.Resource{
				mesosutil.NewScalarResource("cpus", taskCPUs),
				mesosutil.NewScalarResource("mem", taskMem),
			},
			Data:     taskBytes,
			Name:     &task.Name,
			Executor: s.rancherExecutor,
		}
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{mesosTask}, defaultFilter)
	}
}
예제 #5
0
func (s *StackDeployScheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) {
	Logger.Debug("[ResourceOffers] %s", pretty.Offers(offers))

	for _, offer := range offers {
		declineReason := s.acceptOffer(driver, offer)
		if declineReason != "" {
			driver.DeclineOffer(offer.GetId(), &mesos.Filters{RefuseSeconds: proto.Float64(10)})
			Logger.Debug("Declined offer %s: %s", pretty.Offer(offer), declineReason)
		}
	}
}
예제 #6
0
파일: scheduler.go 프로젝트: elodina/syphon
// mesos.Scheduler interface method.
// Invoked when resources have been offered to this framework.
func (this *ElodinaTransportScheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) {
	log.Logger.Info("Received offers")
	offersAndTasks := make(map[*mesos.Offer][]*mesos.TaskInfo)
	remainingPartitions, err := this.GetTopicPartitions()
	if err != nil {
		return
	}
	remainingPartitions.RemoveAll(this.TakenTopicPartitions.GetArray())
	log.Logger.Debug("%v", remainingPartitions)
	tps := remainingPartitions.GetArray()

	offersAndResources := this.wrapInOfferAndResources(offers)
	for !remainingPartitions.IsEmpty() {
		log.Logger.Debug("Iteration %v", remainingPartitions)
		if this.hasEnoughInstances() {
			for _, transfer := range this.taskIdToTaskState {
				if len(transfer.assignment) < this.config.ThreadsPerTask {
					transfer.assignment = append(transfer.assignment, tps[0])
					remainingPartitions.Remove(tps[0])
					this.TakenTopicPartitions.Add(tps[0])
					if len(tps) > 1 {
						tps = tps[1:]
					} else {
						tps = []consumer.TopicAndPartition{}
					}
				}
			}
		} else {
			log.Logger.Debug("Trying to launch new task")
			offer, task := this.launchNewTask(offersAndResources)
			if offer != nil && task != nil {
				offersAndTasks[offer] = append(offersAndTasks[offer], task)
			} else {
				for _, offer := range offers {
					if _, exists := offersAndTasks[offer]; !exists {
						offersAndTasks[offer] = make([]*mesos.TaskInfo, 0)
					}
				}
				break
			}
		}
	}

	this.assignPendingPartitions()

	for _, offer := range offers {
		if tasks, ok := offersAndTasks[offer]; ok {
			driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
		} else {
			driver.DeclineOffer(offer.Id, &mesos.Filters{RefuseSeconds: proto.Float64(10)})
		}
	}
}
예제 #7
0
// decline declines an offer.
func (s *EtcdScheduler) decline(
	driver scheduler.SchedulerDriver,
	offer *mesos.Offer,
) {
	log.V(2).Infof("Declining offer %s.", offer.Id.GetValue())
	driver.DeclineOffer(
		offer.Id,
		&mesos.Filters{
			// Decline offers for 5 seconds.
			RefuseSeconds: proto.Float64(float64(5)),
		},
	)
}
예제 #8
0
func (s *Scheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) {
	Logger.Debugf("[ResourceOffers] %s", pretty.Offers(offers))

	for _, offer := range offers {
		declineReason := s.acceptOffer(driver, offer)
		if declineReason != "" {
			driver.DeclineOffer(offer.GetId(), &mesos.Filters{RefuseSeconds: proto.Float64(10)})
			Logger.Debugf("Declined offer: %s", declineReason)
		}
	}

	s.reconcileTasks(false)
	s.cluster.Save()
}
예제 #9
0
func (sched *ScraperScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	logOffers(offers)

	for _, offer := range offers {
		if sched.tasksLaunched >= sched.totalTasks || len(sched.urls) == 0 {
			log.Infof("Declining offer %s", offer.Id.GetValue())
			driver.DeclineOffer(offer.Id, &mesos.Filters{})
			continue
		}
		remainingCpus := getOfferCpu(offer)
		remainingMems := getOfferMem(offer)

		var tasks []*mesos.TaskInfo
		for sched.cpuPerTask <= remainingCpus &&
			sched.memPerTask <= remainingMems &&
			sched.tasksLaunched < sched.totalTasks {

			log.Infof("Processing url %v of %v\n", sched.tasksLaunched, sched.totalTasks)
			log.Infof("Total Tasks: %d", sched.totalTasks)
			log.Infof("Tasks Launched: %d", sched.tasksLaunched)
			uri := sched.urls[sched.tasksLaunched]
			log.Infof("URI: %s", uri)

			sched.tasksLaunched++

			taskId := &mesos.TaskID{
				Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
			}

			task := &mesos.TaskInfo{
				Name:     proto.String("go-task-" + taskId.GetValue()),
				TaskId:   taskId,
				SlaveId:  offer.SlaveId,
				Executor: sched.executor,
				Resources: []*mesos.Resource{
					util.NewScalarResource("cpus", sched.cpuPerTask),
					util.NewScalarResource("mem", sched.memPerTask),
				},
				Data: []byte(uri),
			}
			log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())

			tasks = append(tasks, task)
			remainingCpus -= sched.cpuPerTask
			remainingMems -= sched.memPerTask
		}
		log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
		driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
	}
}
예제 #10
0
func (sched *ReflexScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	for _, offer := range offers {
		// CPUs
		cpuResources := util.FilterResources(
			offer.Resources,
			func(res *mesos.Resource) bool {
				return res.GetName() == "cpus"
			},
		)
		cpus := 0.0
		for _, res := range cpuResources {
			cpus += res.GetScalar().GetValue()
		}

		// Mem
		memResources := util.FilterResources(
			offer.Resources,
			func(res *mesos.Resource) bool {
				return res.GetName() == "mem"
			},
		)
		mem := 0.0
		for _, res := range memResources {
			mem += res.GetScalar().GetValue()
		}

		logrus.WithFields(logrus.Fields{
			"cpus": cpus,
			"mem":  mem,
		}).Debug("got offer")

		for _, pair := range sched.waitingPairs {
			if pair.InProgress {
				continue
			}

			task := pair.Task
			if cpus >= task.CPU && mem >= task.Mem {
				logrus.WithField("task", task).Info("would have launched a task")
			}
		}

		driver.DeclineOffer(offer.GetId(), new(mesos.Filters))
	}
}
예제 #11
0
func (s *rendlerScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	log.Printf("Received %d resource offers", len(offers))
	for _, offer := range offers {
		select {
		case <-s.shutdown:
			log.Println("Shutting down: declining offer on [", offer.Hostname, "]")
			driver.DeclineOffer(offer.Id, defaultFilter)
			if s.tasksRunning == 0 {
				close(s.done)
			}
			continue
		default:
		}

		tasks := []*mesos.TaskInfo{}
		tasksToLaunch := maxTasksForOffer(offer)
		for tasksToLaunch > 0 {
			if s.crawlQueue.Front() != nil {
				url := s.crawlQueue.Front().Value.(string)
				s.crawlQueue.Remove(s.crawlQueue.Front())
				task := s.newCrawlTask(url, offer)
				tasks = append(tasks, task)
				tasksToLaunch--
			}
			if s.renderQueue.Front() != nil {
				url := s.renderQueue.Front().Value.(string)
				s.renderQueue.Remove(s.renderQueue.Front())
				task := s.newRenderTask(url, offer)
				tasks = append(tasks, task)
				tasksToLaunch--
			}
			if s.crawlQueue.Front() == nil && s.renderQueue.Front() == nil {
				break
			}
		}

		if len(tasks) == 0 {
			driver.DeclineOffer(offer.Id, defaultFilter)
		} else {
			driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter)
		}
	}
}
예제 #12
0
파일: scheduler.go 프로젝트: keis/eremetic
// ResourceOffers handles the Resource Offers
func (s *eremeticScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	log.Tracef("Received %d resource offers", len(offers))
	var offer *mesos.Offer

loop:
	for len(offers) > 0 {
		select {
		case <-s.shutdown:
			log.Info("Shutting down: declining offers")
			break loop
		case tid := <-s.tasks:
			log.Debugf("Trying to find offer to launch %s with", tid)
			t, _ := database.ReadTask(tid)
			offer, offers = matchOffer(t, offers)

			if offer == nil {
				log.Warnf("Could not find a matching offer for %s", tid)
				TasksDelayed.Inc()
				go func() { s.tasks <- tid }()
				break loop
			}

			log.Debugf("Preparing to launch task %s with offer %s", tid, offer.Id.GetValue())
			t, task := s.newTask(t, offer)
			database.PutTask(&t)
			driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{task}, defaultFilter)
			TasksLaunched.Inc()
			QueueSize.Dec()

			continue
		default:
			break loop
		}
	}

	log.Trace("No tasks to launch. Declining offers.")
	for _, offer := range offers {
		driver.DeclineOffer(offer.Id, defaultFilter)
	}
}
예제 #13
0
파일: scheduler.go 프로젝트: elodina/syscol
func (s *Scheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) {
	Logger.Debugf("[ResourceOffers] %s", offersString(offers))

	s.activeLock.Lock()
	defer s.activeLock.Unlock()

	if !s.active {
		Logger.Debug("Scheduler is inactive. Declining all offers.")
		for _, offer := range offers {
			driver.DeclineOffer(offer.GetId(), &mesos.Filters{RefuseSeconds: proto.Float64(1)})
		}
		return
	}

	for _, offer := range offers {
		declineReason := s.acceptOffer(driver, offer)
		if declineReason != "" {
			driver.DeclineOffer(offer.GetId(), &mesos.Filters{RefuseSeconds: proto.Float64(1)})
			Logger.Debugf("Declined offer: %s", declineReason)
		}
	}
}
예제 #14
0
// ResourceOffers handles the Resource Offers
func (s *eremeticScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	log.Tracef("Received %d resource offers", len(offers))
	for _, offer := range offers {
		select {
		case <-s.shutdown:
			log.Infof("Shutting down: declining offer on [%s]", offer.Hostname)
			driver.DeclineOffer(offer.Id, defaultFilter)
			continue
		case tid := <-s.tasks:
			log.Debugf("Preparing to launch task %s with offer %s", tid, offer.Id.GetValue())
			t, _ := database.ReadTask(tid)
			task := s.newTask(offer, &t)
			database.PutTask(&t)
			driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{task}, defaultFilter)
			continue
		default:
		}

		log.Trace("No tasks to launch. Declining offer.")
		driver.DeclineOffer(offer.Id, defaultFilter)
	}
}
예제 #15
0
파일: scheduler.go 프로젝트: kellrott/agro
func (self *MesosManager) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	log.Printf("ResourceOffers\n")
	computeCount := 0
	for _, offer := range offers {
		offer_cpus := 0
		for _, res := range offer.Resources {
			if *res.Name == "cpus" {
				computeCount += int(*res.Scalar.Value)
				offer_cpus += int(*res.Scalar.Value)
			}
		}

		tasks := make([]*mesos.TaskInfo, 0, computeCount)

		for offer_cpus_taken := 0; offer_cpus_taken < offer_cpus; {
			job := self.engine.GetJobToRun()
			if job != nil {
				log.Printf("Launch job: %s", job)
				mesos_taskinfo := self.BuildTaskInfo(job, offer)
				log.Printf("MesosTask: %s", mesos_taskinfo)
				tasks = append(tasks, mesos_taskinfo)
				offer_cpus_taken += 1
			} else {
				offer_cpus_taken = offer_cpus
			}
		}
		if len(tasks) > 0 {
			_, err := driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{})
			if err != nil {
				fmt.Print("Error: %s", err)
			}
		} else {
			driver.DeclineOffer(offer.Id, &mesos.Filters{})
		}
	}
	self.computeCount = computeCount
	log.Printf("CPUsOffered: %d", self.computeCount)
}
예제 #16
0
// Schedule handles Mesos resource offers and based on the received offers launches PENDING Tasks
//
// It monitors PENDING Tasks Queue and tries to launch as many Tasks as possible for a single offer.
// If no PENDING Tasks are available within QueueTimeout interval it declines the offer and retries Queue read again
func (bw *BasicWorker) ScheduleTasks(driver scheduler.SchedulerDriver, offers []*mesos.Offer) {
ReadOffers:
	for _, offer := range offers {
		remainingCpus := taurus.ScalarResourceVal("cpus", offer.Resources)
		remainingMems := taurus.ScalarResourceVal("mem", offer.Resources)

		log.Println("Taurus Received Offer <", offer.Id.GetValue(), "> with cpus=", remainingCpus, " mem=", remainingMems)

		// Map to avoid launching duplicate tasks in the same batch slice
		launchTaskMap := make(map[string]bool)
		launchTasks := make([]*mesos.TaskInfo, 0, 0)
		var taskCpu, taskMem float64
		var retryCount int
	ReadTasks:
		for {
			task, err := bw.pending.ReadTask(QueueTimeout)
			if err != nil {
				retryCount += 1
				switch {
				case bw.pending.TimedOut(err):
					log.Printf("No %s tasks available", taurus.PENDING)
				case bw.pending.Closed(err):
					break ReadTasks
				default:
					log.Printf("Failed to read from %s queue: %s", taurus.PENDING, err)
				}
				if retryCount == QueueRetry {
					break ReadTasks
				}
				continue ReadTasks
			}
			if task != nil {
				taskId := task.Info.TaskId.GetValue()
				// Don't add the same task bwice into launchTasks slice
				if launchTaskMap[taskId] {
					log.Printf("Skipping already queued Task %s", taskId)
					continue ReadTasks
				}
				taskCpu = taurus.ScalarResourceVal("cpus", task.Info.Resources)
				taskMem = taurus.ScalarResourceVal("mem", task.Info.Resources)
				if remainingCpus >= taskCpu && remainingMems >= taskMem {
					task.Info.SlaveId = offer.SlaveId
					launchTasks = append(launchTasks, task.Info)
					launchTaskMap[taskId] = true
					remainingCpus -= taskCpu
					remainingMems -= taskMem
				} else {
					break ReadTasks
				}
			}
		}

		if len(launchTasks) > 0 {
			log.Printf("Launching %d tasks for offer %s", len(launchTasks), offer.Id.GetValue())
			launchStatus, err := driver.LaunchTasks(
				[]*mesos.OfferID{offer.Id},
				launchTasks,
				&mesos.Filters{RefuseSeconds: proto.Float64(1)})
			if err != nil {
				log.Printf("Mesos status: %#v Failed to launch Tasks %s: %s", launchStatus, launchTasks, err)
				continue ReadOffers
			}
		} else {
			log.Println("Declining offer ", offer.Id.GetValue())
			declineStatus, err := driver.DeclineOffer(
				offer.Id,
				&mesos.Filters{RefuseSeconds: proto.Float64(1)})
			if err != nil {
				log.Printf("Error declining offer for mesos status %#v: %s", declineStatus, err)
			}
		}
	}
}
예제 #17
0
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	glog.Infof("received %d offer(s)", len(offers))
	for _, offer := range offers {
		driver.DeclineOffer(offer.GetId(), refuseFor(10*time.Second))
	}
}