func (s *DiegoScheduler) scheduleMatched(driver sched.SchedulerDriver, matches map[string]*OfferMatch) auctiontypes.AuctionResults { results := auctiontypes.AuctionResults{} for slaveId, match := range matches { if slaveId != "" { offers := match.Offers taskInfos := []*mesos.TaskInfo{} for _, lrpAuction := range match.LrpAuctions { taskInfo := s.createLrpTaskInfo(util.NewSlaveID(slaveId), &lrpAuction) taskInfos = append(taskInfos, taskInfo) results.SuccessfulLRPs = append(results.SuccessfulLRPs, lrpAuction) log.Infof("+scheduled lrp, lrp: %v/%v mem: %v, offers: mem: %v", lrpAuction.ProcessGuid, lrpAuction.Index, lrpAuction.MemoryMB, getOffersMem(offers)) } for _, taskAuction := range match.TaskAuctions { taskInfo := s.createTaskTaskInfo(util.NewSlaveID(slaveId), &taskAuction) taskInfos = append(taskInfos, taskInfo) results.SuccessfulTasks = append(results.SuccessfulTasks, taskAuction) log.Infof("+scheduled task, task: %v mem: %v, offers: mem: %v", taskAuction.TaskGuid, taskAuction.MemoryMB, getOffersMem(offers)) } driver.LaunchTasks(extractOfferIds(offers), taskInfos, // offer getting declied if no tasks &mesos.Filters{RefuseSeconds: proto.Float64(30)}) } else { for _, lrpAuction := range match.LrpAuctions { results.FailedLRPs = append(results.FailedLRPs, lrpAuction) log.Warningf("+schedule lrp failed, lrp: %v/%v mem: %v, offers: mem: %v", lrpAuction.GetProcessGuid(), lrpAuction.Index, lrpAuction.MemoryMB, getOffersMem(match.Offers)) } for _, taskAuction := range match.TaskAuctions { results.FailedTasks = append(results.FailedTasks, taskAuction) log.Warningf("+schedule task failed, task: %v mem: %v, offers: mem: %v", taskAuction.TaskGuid, taskAuction.MemoryMB, getOffersMem(match.Offers)) } } } return results }
/* Schedule takes in a set of job requests (LRP start auctions and task starts) and assigns the work to available cells according to the diego scoring algorithm. The scheduler is single-threaded. It determines scheduling of jobs one at a time so that each calculation reflects available resources correctly. It commits the work in batches at the end, for better network performance. Schedule returns AuctionResults, indicating the success or failure of each requested job. */ func (s *Scheduler) Schedule(auctionRequest auctiontypes.AuctionRequest) auctiontypes.AuctionResults { results := auctiontypes.AuctionResults{} if len(s.zones) == 0 { results.FailedLRPs = auctionRequest.LRPs for i, _ := range results.FailedLRPs { results.FailedLRPs[i].PlacementError = auctiontypes.ErrorCellCommunication.Error() } results.FailedTasks = auctionRequest.Tasks for i, _ := range results.FailedTasks { results.FailedTasks[i].PlacementError = auctiontypes.ErrorCellCommunication.Error() } return s.markResults(results) } var successfulLRPs = map[string]*auctiontypes.LRPAuction{} var lrpStartAuctionLookup = map[string]*auctiontypes.LRPAuction{} var successfulTasks = map[string]*auctiontypes.TaskAuction{} var taskAuctionLookup = map[string]*auctiontypes.TaskAuction{} sort.Sort(SortableLRPAuctions(auctionRequest.LRPs)) sort.Sort(SortableTaskAuctions(auctionRequest.Tasks)) lrpsBeforeTasks, lrpsAfterTasks := splitLRPS(auctionRequest.LRPs) auctionLRP := func(lrpsToAuction []auctiontypes.LRPAuction) { for i := range lrpsToAuction { lrpAuction := &lrpsToAuction[i] lrpStartAuctionLookup[lrpAuction.Identifier()] = lrpAuction successfulStart, err := s.scheduleLRPAuction(lrpAuction) if err != nil { lrpAuction.PlacementError = err.Error() results.FailedLRPs = append(results.FailedLRPs, *lrpAuction) } else { successfulLRPs[successfulStart.Identifier()] = successfulStart } } } auctionLRP(lrpsBeforeTasks) for i := range auctionRequest.Tasks { taskAuction := &auctionRequest.Tasks[i] taskAuctionLookup[taskAuction.Identifier()] = taskAuction successfulTask, err := s.scheduleTaskAuction(taskAuction, s.startingContainerWeight) if err != nil { taskAuction.PlacementError = err.Error() results.FailedTasks = append(results.FailedTasks, *taskAuction) } else { successfulTasks[successfulTask.Identifier()] = successfulTask } } auctionLRP(lrpsAfterTasks) failedWorks := s.commitCells() for _, failedWork := range failedWorks { for _, failedStart := range failedWork.LRPs { identifier := failedStart.Identifier() delete(successfulLRPs, identifier) s.logger.Info("lrp-failed-to-be-placed", lager.Data{"lrp-guid": failedStart.Identifier()}) results.FailedLRPs = append(results.FailedLRPs, *lrpStartAuctionLookup[identifier]) } for _, failedTask := range failedWork.Tasks { identifier := failedTask.Identifier() delete(successfulTasks, identifier) s.logger.Info("task-failed-to-be-placed", lager.Data{"task-guid": failedTask.Identifier()}) results.FailedTasks = append(results.FailedTasks, *taskAuctionLookup[identifier]) } } for _, successfulStart := range successfulLRPs { s.logger.Info("lrp-added-to-cell", lager.Data{"lrp-guid": successfulStart.Identifier(), "cell-guid": successfulStart.Winner}) results.SuccessfulLRPs = append(results.SuccessfulLRPs, *successfulStart) } for _, successfulTask := range successfulTasks { s.logger.Info("task-added-to-cell", lager.Data{"task-guid": successfulTask.Identifier(), "cell-guid": successfulTask.Winner}) results.SuccessfulTasks = append(results.SuccessfulTasks, *successfulTask) } return s.markResults(results) }