func (frc *FrameworkRiakCluster) ApplyOffer(offerHelper *common.OfferHelper, sc *SchedulerCore) bool { stateDirty := false clusterNeedsReconciliation := false for _, riakNode := range frc.Nodes { if riakNode.NeedsToBeReconciled() { clusterNeedsReconciliation = true continue } // Try to lanch, compatibilityMode is true if riakNode.CanBeScheduled() && sc.compatibilityMode { log.Infof("Adding Riak node for scheduling (compatibilityMode): %+v", riakNode.CurrentID()) if riakNode.ApplyReservedOffer(offerHelper, sc) { stateDirty = true } continue } // Reserved node, if the persistenceID matches, go ahead and launch! if riakNode.CanBeScheduled() && riakNode.HasRequestedReservation() && offerHelper.HasPersistenceId(riakNode.PersistenceID()) { log.Infof("Adding Riak node for scheduling (HasRequestedReservation, persistenceId match): %+v", riakNode.CurrentID()) if riakNode.ApplyReservedOffer(offerHelper, sc) { stateDirty = true } else { clusterNeedsReconciliation = true } continue } // Reserved node, if the slaveID / hostname matches but the apply fails, we need to unreserve the node if riakNode.CanBeScheduled() && riakNode.HasRequestedReservation() && (riakNode.SlaveID.GetValue() == offerHelper.MesosOffer.SlaveId.GetValue() || riakNode.Hostname == offerHelper.MesosOffer.GetHostname()) { log.Infof("Adding Riak node for scheduling (HasRequestedReservation, slaveId/hostname match): %+v", riakNode.CurrentID()) if !riakNode.ApplyReservedOffer(offerHelper, sc) { log.Infof("Riak node has reservation, but slave no longer has it's reservation, unreserving node: %+v", riakNode.CurrentID()) riakNode.Unreserve() } stateDirty = true continue } // New node, needs reservation if riakNode.CanBeScheduled() && !riakNode.HasRequestedReservation() && !sc.compatibilityMode { log.Infof("Adding Riak node for scheduling (no reservations): %+v", riakNode.CurrentID()) if riakNode.ApplyUnreservedOffer(offerHelper) { stateDirty = true } } } if stateDirty { sc.schedulerState.Persist() } return clusterNeedsReconciliation }
func (frn *FrameworkRiakNode) ApplyUnreservedOffer(offerHelper *common.OfferHelper) bool { if !offerHelper.CanFitUnreserved(frn.Cpus+CPUS_PER_EXECUTOR, frn.Mem+MEM_PER_EXECUTOR, frn.Disk, frn.Ports) { return false } log.Infof("Found a new offer for a node. OfferID: %+v, NodeID: %+v", offerHelper.OfferIDStr, frn.CurrentID()) // Remove the ports and executor requirements from offerHelper, but don't reserve _ = offerHelper.ApplyUnreserved(CPUS_PER_EXECUTOR, MEM_PER_EXECUTOR, 0, frn.Ports) // Create reservation + volumes, add to offerHelper offerHelper.MakeReservation(frn.Cpus, frn.Mem, frn.Disk, 0, *frn.Principal, *frn.Role) offerHelper.MakeVolume(frn.Disk, *frn.Principal, *frn.Role, frn.PersistenceID(), frn.ContainerPath) // Update state frn.SlaveID = offerHelper.MesosOffer.SlaveId frn.Hostname = offerHelper.MesosOffer.GetHostname() frn.CurrentState = process_state.Reserved return true }
func (frn *FrameworkRiakNode) ApplyReservedOffer(offerHelper *common.OfferHelper, sc *SchedulerCore) bool { taskAsk := []*mesos.Resource{} execAsk := []*mesos.Resource{} if sc.compatibilityMode { if !offerHelper.CanFitUnreserved(frn.Cpus+CPUS_PER_EXECUTOR, frn.Mem+MEM_PER_EXECUTOR, frn.Disk, frn.Ports) { return false } taskAsk = offerHelper.ApplyUnreserved(frn.Cpus, frn.Mem, frn.Disk, frn.Ports) execAsk = offerHelper.ApplyUnreserved(CPUS_PER_EXECUTOR, MEM_PER_EXECUTOR, 0, 0) } else { if !offerHelper.CanFitReserved(frn.Cpus, frn.Mem, frn.Disk, 0) || !offerHelper.CanFitUnreserved(CPUS_PER_EXECUTOR, MEM_PER_EXECUTOR, 0, frn.Ports) { return false } taskAsk = offerHelper.ApplyReserved(frn.Cpus, frn.Mem, frn.Disk, 0, *frn.Principal, *frn.Role, frn.PersistenceID(), frn.ContainerPath) taskAsk = append(taskAsk, offerHelper.ApplyUnreserved(0, 0, 0, frn.Ports)...) execAsk = offerHelper.ApplyUnreserved(CPUS_PER_EXECUTOR, MEM_PER_EXECUTOR, 0, 0) } log.Infof("Found an offer for a launchable node. OfferID: %+v, NodeID: %+v", offerHelper.OfferIDStr, frn.CurrentID()) frn.SlaveID = offerHelper.MesosOffer.SlaveId frn.Hostname = offerHelper.MesosOffer.GetHostname() frn.Generation = frn.Generation + 1 frn.TaskStatus = nil frn.CurrentState = process_state.Starting taskId := frn.CreateTaskID() nodename := frn.CurrentID() + "@" + frn.Hostname if !strings.Contains(frn.Hostname, ".") { nodename = nodename + "." } ports := common.PortIterator(taskAsk) taskData := common.TaskData{ FullyQualifiedNodeName: nodename, Host: frn.Hostname, Zookeepers: sc.zookeepers, FrameworkName: sc.frameworkName, URI: sc.schedulerHTTPServer.GetURI(), ClusterName: frn.ClusterName, UseSuperChroot: os.Getenv("USE_SUPER_CHROOT") != "false", HTTPPort: <-ports, PBPort: <-ports, DisterlPort: <-ports, } frn.TaskData = taskData binTaskData, err := taskData.Serialize() if err != nil { log.Panic(err) } execName := fmt.Sprintf("%s Executor", frn.CurrentID()) taskInfo := &mesos.TaskInfo{ Name: proto.String(frn.Name()), TaskId: taskId, SlaveId: frn.SlaveID, Executor: &mesos.ExecutorInfo{ ExecutorId: frn.CreateExecutorID(), Name: proto.String(execName), Source: proto.String(frn.FrameworkName), Command: &mesos.CommandInfo{ Value: proto.String(ExecutorValue()), Uris: []*mesos.CommandInfo_URI{ &mesos.CommandInfo_URI{ Value: &(sc.schedulerHTTPServer.hostURI), Executable: proto.Bool(false), }, &mesos.CommandInfo_URI{ Value: &(sc.schedulerHTTPServer.riakURI), Executable: proto.Bool(false), }, &mesos.CommandInfo_URI{ Value: &(sc.schedulerHTTPServer.cepmdURI), Executable: proto.Bool(true), }, }, Shell: proto.Bool(ExecutorShell()), Arguments: ExecutorArgs(frn.CurrentID()), }, Resources: execAsk, }, Resources: taskAsk, Data: binTaskData, } offerHelper.TasksToLaunch = append(offerHelper.TasksToLaunch, taskInfo) return true }