Ejemplo n.º 1
0
func (offerHelper *OfferHelper) apply(against *ResourceGroup, cpus float64, mem float64, disk float64, ports int,
	principal string, role string, persistenceID string, containerPath string) []*mesos.Resource {

	ask := []*mesos.Resource{}

	if cpus > 0 {
		against.Cpus = against.Cpus - cpus
		if principal != "" && role != "" {
			ask = append(ask, util.NewScalarResourceWithReservation("cpus", cpus, principal, role))
		} else {
			ask = append(ask, util.NewScalarResource("cpus", cpus))
		}
	}

	if mem > 0 {
		against.Mem = against.Mem - mem
		if principal != "" && role != "" {
			ask = append(ask, util.NewScalarResourceWithReservation("mem", mem, principal, role))
		} else {
			ask = append(ask, util.NewScalarResource("mem", mem))
		}
	}

	if disk > 0 {
		against.Disk = against.Disk - disk
		if principal != "" && role != "" && containerPath != "" && persistenceID != "" {
			ask = append(ask, util.NewVolumeResourceWithReservation(disk, containerPath, persistenceID, mesos.Volume_RW.Enum(), principal, role))
		} else if principal != "" && role != "" {
			ask = append(ask, util.NewScalarResourceWithReservation("disk", disk, principal, role))
		} else {
			ask = append(ask, util.NewScalarResource("disk", disk))
		}
	}

	if ports > 0 {
		sliceLoc := 0
		if len(against.Ports)-ports > 0 {
			sliceLoc = rand.Intn(len(against.Ports) - ports)
		}
		takingPorts := make([]int64, ports)
		copy(takingPorts, against.Ports[sliceLoc:(sliceLoc+ports)])
		leavingPorts := make([]int64, len(against.Ports)-ports)
		copy(leavingPorts, against.Ports[:sliceLoc])
		copy(leavingPorts[sliceLoc:], against.Ports[(sliceLoc+ports):])

		against.Ports = leavingPorts
		if principal != "" && role != "" {
			ask = append(ask, util.AddResourceReservation(util.NewRangesResource("ports", ArrayToRanges(takingPorts)), principal, role))
		} else {
			ask = append(ask, util.NewRangesResource("ports", ArrayToRanges(takingPorts)))
		}
	}

	return ask
}
Ejemplo n.º 2
0
func (suite *SchedulerTestSuite) TestSchdulerDriverAcceptOffersWithError() {
	sched := mock_scheduler.New()
	sched.On("StatusUpdate").Return(nil)
	sched.On("Error").Return()

	msgr := mockedMessenger()
	driver := newTestDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, msgr))
	driver.OnDispatch(func(_ context.Context, _ *upid.UPID, _ proto.Message) error {
		return fmt.Errorf("Unable to send message")
	})

	go func() {
		driver.Run()
	}()
	<-driver.Started()
	driver.SetConnected(true) // simulated
	suite.True(driver.Running())

	// setup an offer
	offer := util.NewOffer(
		util.NewOfferID("test-offer-001"),
		suite.framework.Id,
		util.NewSlaveID("test-slave-001"),
		"test-slave(1)@localhost:5050",
	)

	pid, err := upid.Parse("test-slave(1)@localhost:5050")
	suite.NoError(err)
	driver.CacheOffer(offer, pid)

	// launch task
	task := util.NewTaskInfo(
		"simple-task",
		util.NewTaskID("simpe-task-1"),
		util.NewSlaveID("test-slave-001"),
		[]*mesos.Resource{util.NewScalarResourceWithReservation("mem", 400, "principal", "role")},
	)
	task.Command = util.NewCommandInfo("pwd")
	task.Executor = util.NewExecutorInfo(util.NewExecutorID("test-exec"), task.Command)
	tasks := []*mesos.TaskInfo{task}

	operations := []*mesos.Offer_Operation{util.NewLaunchOperation(tasks)}

	stat, err := driver.AcceptOffers(
		[]*mesos.OfferID{offer.Id},
		operations,
		&mesos.Filters{},
	)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
	suite.Error(err)
}
Ejemplo n.º 3
0
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
	for _, offer := range offers {
		operations := []*mesos.Offer_Operation{}
		resourcesToCreate := []*mesos.Resource{}
		resourcesToDestroy := []*mesos.Resource{}
		resourcesToReserve := []*mesos.Resource{}
		resourcesToUnreserve := []*mesos.Resource{}
		taskInfosToLaunch := []*mesos.TaskInfo{}
		totalUnreserved := 0

		unreservedCpus, unreservedMem, unreservedDisk := getUnreservedResources(offer.Resources)
		reservedCpus, reservedMem, reservedDisk := getReservedResources(offer.Resources)

		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with unreserved cpus=", unreservedCpus, " mem=", unreservedMem, " disk=", unreservedDisk)
		log.Infoln("Received Offer <", offer.Id.GetValue(), "> with reserved cpus=", reservedCpus, " mem=", reservedMem, " disk=", reservedDisk)

		for _, task := range sched.tasks {
			switch task.state {
			case InitState:
				if CPUS_PER_TASK <= unreservedCpus &&
					MEM_PER_TASK <= unreservedMem &&
					DISK_PER_TASK <= unreservedDisk {

					resourcesToReserve = append(resourcesToReserve, []*mesos.Resource{
						util.NewScalarResourceWithReservation("cpus", CPUS_PER_TASK, *mesosAuthPrincipal, *role),
						util.NewScalarResourceWithReservation("mem", MEM_PER_TASK, *mesosAuthPrincipal, *role),
						util.NewScalarResourceWithReservation("disk", DISK_PER_TASK, *mesosAuthPrincipal, *role),
					}...)
					resourcesToCreate = append(resourcesToCreate,
						util.NewVolumeResourceWithReservation(DISK_PER_TASK, task.containerPath, task.persistenceId, mesos.Volume_RW.Enum(), *mesosAuthPrincipal, *role))
					task.state = RequestedReservationState
					unreservedCpus = unreservedCpus - CPUS_PER_TASK
					unreservedMem = unreservedMem - MEM_PER_TASK
					unreservedDisk = unreservedDisk - DISK_PER_TASK
				}
			case RequestedReservationState:
				if CPUS_PER_TASK <= reservedCpus &&
					MEM_PER_TASK <= reservedMem &&
					DISK_PER_TASK <= reservedDisk &&
					resourcesHaveVolume(offer.Resources, task.persistenceId) {

					taskId := &mesos.TaskID{
						Value: proto.String(task.name),
					}

					taskInfo := &mesos.TaskInfo{
						Name:     proto.String("go-task-" + taskId.GetValue()),
						TaskId:   taskId,
						SlaveId:  offer.SlaveId,
						Executor: task.executor,
						Resources: []*mesos.Resource{
							util.NewScalarResourceWithReservation("cpus", CPUS_PER_TASK, *mesosAuthPrincipal, *role),
							util.NewScalarResourceWithReservation("mem", MEM_PER_TASK, *mesosAuthPrincipal, *role),
							util.NewVolumeResourceWithReservation(DISK_PER_TASK, task.containerPath, task.persistenceId, mesos.Volume_RW.Enum(), *mesosAuthPrincipal, *role),
						},
					}

					taskInfosToLaunch = append(taskInfosToLaunch, taskInfo)
					task.state = LaunchedState
					reservedCpus = reservedCpus - CPUS_PER_TASK
					reservedMem = reservedMem - MEM_PER_TASK
					reservedDisk = reservedDisk - DISK_PER_TASK

					log.Infof("Prepared task: %s with offer %s for launch\n", taskInfo.GetName(), offer.Id.GetValue())
				}
			case FinishedState:
				resourcesToDestroy = append(resourcesToDestroy,
					util.NewVolumeResourceWithReservation(DISK_PER_TASK, task.containerPath, task.persistenceId, mesos.Volume_RW.Enum(), *mesosAuthPrincipal, *role))
				resourcesToUnreserve = append(resourcesToUnreserve, []*mesos.Resource{
					util.NewScalarResourceWithReservation("cpus", CPUS_PER_TASK, *mesosAuthPrincipal, *role),
					util.NewScalarResourceWithReservation("mem", MEM_PER_TASK, *mesosAuthPrincipal, *role),
					util.NewScalarResourceWithReservation("disk", DISK_PER_TASK, *mesosAuthPrincipal, *role),
				}...)
				task.state = UnreservedState
			case UnreservedState:
				totalUnreserved = totalUnreserved + 1
			}
		}

		// Clean up reservations we no longer need
		if len(resourcesToReserve) == 0 && len(resourcesToCreate) == 0 && len(taskInfosToLaunch) == 0 {
			if reservedCpus >= 0.0 {
				resourcesToUnreserve = append(resourcesToUnreserve, util.NewScalarResourceWithReservation("cpus", reservedCpus, *mesosAuthPrincipal, *role))
			}
			if reservedMem >= 0.0 {
				resourcesToUnreserve = append(resourcesToUnreserve, util.NewScalarResourceWithReservation("mem", reservedCpus, *mesosAuthPrincipal, *role))
			}
			if reservedDisk >= 0.0 {
				filtered := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
					return res.GetName() == "disk" &&
						res.Reservation != nil &&
						res.Disk != nil
				})
				for _, volume := range filtered {
					resourcesToDestroy = append(resourcesToDestroy,
						util.NewVolumeResourceWithReservation(
							volume.GetScalar().GetValue(),
							volume.Disk.Volume.GetContainerPath(),
							volume.Disk.Persistence.GetId(),
							volume.Disk.Volume.Mode,
							*mesosAuthPrincipal,
							*role))
				}
				resourcesToUnreserve = append(resourcesToUnreserve, util.NewScalarResourceWithReservation("mem", reservedDisk, *mesosAuthPrincipal, *role))
			}
		}

		// Make a single operation per type
		if len(resourcesToReserve) > 0 {
			operations = append(operations, util.NewReserveOperation(resourcesToReserve))
		}
		if len(resourcesToCreate) > 0 {
			operations = append(operations, util.NewCreateOperation(resourcesToCreate))
		}
		if len(resourcesToUnreserve) > 0 {
			operations = append(operations, util.NewUnreserveOperation(resourcesToUnreserve))
		}
		if len(resourcesToDestroy) > 0 {
			operations = append(operations, util.NewDestroyOperation(resourcesToDestroy))
		}
		if len(taskInfosToLaunch) > 0 {
			operations = append(operations, util.NewLaunchOperation(taskInfosToLaunch))
		}

		log.Infoln("Accepting offers with ", len(operations), "operations for offer", offer.Id.GetValue())
		refuseSeconds := 5.0
		if len(operations) == 0 {
			refuseSeconds = 5.0
		}
		driver.AcceptOffers([]*mesos.OfferID{offer.Id}, operations, &mesos.Filters{RefuseSeconds: proto.Float64(refuseSeconds)})

		if totalUnreserved >= len(sched.tasks) {
			log.Infoln("Total tasks completed and unreserved, stopping framework.")
			driver.Stop(false)
		}
	}
}