示例#1
0
func TestSchedCachePutSlavePid(t *testing.T) {
	cache := newSchedCache()

	pid01, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)
	pid02, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)
	pid03, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)

	cache.putSlavePid(util.NewSlaveID("slave01"), pid01)
	cache.putSlavePid(util.NewSlaveID("slave02"), pid02)
	cache.putSlavePid(util.NewSlaveID("slave03"), pid03)

	assert.Equal(t, len(cache.savedSlavePids), 3)
	cachedSlavePid1, ok := cache.savedSlavePids["slave01"]
	assert.True(t, ok)
	cachedSlavePid2, ok := cache.savedSlavePids["slave02"]
	assert.True(t, ok)
	cachedSlavePid3, ok := cache.savedSlavePids["slave03"]
	assert.True(t, ok)

	assert.True(t, cachedSlavePid1.Equal(pid01))
	assert.True(t, cachedSlavePid2.Equal(pid02))
	assert.True(t, cachedSlavePid3.Equal(pid03))
}
示例#2
0
//test adding of ressource offer, should be added to offer registry and slavesf
func TestResourceOffer_Add_Rescind(t *testing.T) {
	assert := assert.New(t)

	testFramework := &framework{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			DeclineOffer: func(offerId string) <-chan error {
				return proc.ErrorChan(nil)
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames: newSlaveRegistry(),
		sched:          mockScheduler(),
	}

	hostname := "h1"
	offerID1 := util.NewOfferID("test1")
	offer1 := &mesos.Offer{Id: offerID1, Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testFramework.ResourceOffers(nil, offers1)

	assert.Equal(1, getNumberOffers(testFramework.offers))

	//check slave hostname
	assert.Equal(1, len(testFramework.slaveHostNames.SlaveIDs()))

	//add another offer
	hostname2 := "h2"
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers2 := []*mesos.Offer{offer2}
	testFramework.ResourceOffers(nil, offers2)

	assert.Equal(2, getNumberOffers(testFramework.offers))

	//check slave hostnames
	assert.Equal(2, len(testFramework.slaveHostNames.SlaveIDs()))

	//next whether offers can be rescinded
	testFramework.OfferRescinded(nil, offerID1)
	assert.Equal(1, getNumberOffers(testFramework.offers))

	//next whether offers can be rescinded
	testFramework.OfferRescinded(nil, util.NewOfferID("test2"))
	//walk offers again and check it is removed from registry
	assert.Equal(0, getNumberOffers(testFramework.offers))

	//remove non existing ID
	testFramework.OfferRescinded(nil, util.NewOfferID("notExist"))
}
func (suite *SchedulerTestSuite) TestSchdulerDriverAcceptOffersWithError() {
	sched := mock_scheduler.New()
	sched.On("StatusUpdate").Return(nil)
	sched.On("Error").Return()

	msgr := mockedMessenger()
	driver := newTestDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, msgr))
	driver.OnDispatch(func(_ context.Context, _ *upid.UPID, _ proto.Message) error {
		return fmt.Errorf("Unable to send message")
	})

	go func() {
		driver.Run()
	}()
	<-driver.Started()
	driver.SetConnected(true) // simulated
	suite.True(driver.Running())

	// setup an offer
	offer := util.NewOffer(
		util.NewOfferID("test-offer-001"),
		suite.framework.Id,
		util.NewSlaveID("test-slave-001"),
		"test-slave(1)@localhost:5050",
	)

	pid, err := upid.Parse("test-slave(1)@localhost:5050")
	suite.NoError(err)
	driver.CacheOffer(offer, pid)

	// launch task
	task := util.NewTaskInfo(
		"simple-task",
		util.NewTaskID("simpe-task-1"),
		util.NewSlaveID("test-slave-001"),
		[]*mesos.Resource{util.NewScalarResourceWithReservation("mem", 400, "principal", "role")},
	)
	task.Command = util.NewCommandInfo("pwd")
	task.Executor = util.NewExecutorInfo(util.NewExecutorID("test-exec"), task.Command)
	tasks := []*mesos.TaskInfo{task}

	operations := []*mesos.Offer_Operation{util.NewLaunchOperation(tasks)}

	stat, err := driver.AcceptOffers(
		[]*mesos.OfferID{offer.Id},
		operations,
		&mesos.Filters{},
	)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
	suite.Error(err)
}
示例#4
0
func TestSchedCacheContainsSlavePid(t *testing.T) {
	cache := newSchedCache()

	pid01, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)
	pid02, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)

	cache.putSlavePid(util.NewSlaveID("slave01"), pid01)
	cache.putSlavePid(util.NewSlaveID("slave02"), pid02)

	assert.True(t, cache.containsSlavePid(util.NewSlaveID("slave01")))
	assert.True(t, cache.containsSlavePid(util.NewSlaveID("slave02")))
	assert.False(t, cache.containsSlavePid(util.NewSlaveID("slave05")))
}
func (suite *SchedulerTestSuite) TestSchdulerDriverLaunchTasks() {
	driver := newTestDriver(suite.T(), driverConfigMessenger(mock_scheduler.New(), suite.framework, suite.master, nil, mockedMessenger()))

	go func() {
		driver.Run()
	}()
	<-driver.Started()
	driver.SetConnected(true) // simulated
	suite.True(driver.Running())

	task := util.NewTaskInfo(
		"simple-task",
		util.NewTaskID("simpe-task-1"),
		util.NewSlaveID("slave-1"),
		[]*mesos.Resource{util.NewScalarResource("mem", 400)},
	)
	task.Command = util.NewCommandInfo("pwd")
	tasks := []*mesos.TaskInfo{task}

	stat, err := driver.LaunchTasks(
		[]*mesos.OfferID{{}},
		tasks,
		&mesos.Filters{},
	)
	suite.NoError(err)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
}
示例#6
0
func TestOffer(t *testing.T) {
	offer := util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0")

	if Offer(offer) != "slave0#30c49" {
		t.Errorf(`util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"), util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0") != "slave0#30c49"; actual %s`, Offer(offer))
	}

	offer.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})}
	if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]" {
		t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]; actual %s", Offer(offer))
	}

	offer.Attributes = []*mesos.Attribute{&mesos.Attribute{
		Name:   proto.String("rack"),
		Type:   mesos.Value_SCALAR.Enum(),
		Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
	}}
	if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00" {
		t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00; actual %s", Offer(offer))
	}

	offer.Resources = nil
	if Offer(offer) != "slave0#30c49 rack:2.00" {
		t.Errorf("Expected slave0#30c49 rack:2.00; actual %s", Offer(offer))
	}
}
示例#7
0
// executorRefs returns a slice of known references to running executors known to this framework
func (k *framework) executorRefs() []executorRef {
	slaves := k.slaveHostNames.SlaveIDs()
	refs := make([]executorRef, 0, len(slaves))

	for _, slaveID := range slaves {
		hostname := k.slaveHostNames.HostName(slaveID)
		if hostname == "" {
			log.Warningf("hostname lookup for slaveID %q failed", slaveID)
			continue
		}

		node := k.lookupNode(hostname)
		if node == nil {
			log.Warningf("node lookup for slaveID %q failed", slaveID)
			continue
		}

		eid, ok := node.Annotations[meta.ExecutorIdKey]
		if !ok {
			log.Warningf("unable to find %q annotation for node %v", meta.ExecutorIdKey, node)
			continue
		}

		refs = append(refs, executorRef{
			executorID: mutil.NewExecutorID(eid),
			slaveID:    mutil.NewSlaveID(slaveID),
		})
	}

	return refs
}
func (suite *SchedulerTestSuite) TestSchdulerDriverSendFrameworkMessage() {
	messenger := messenger.NewMockedMessenger()
	messenger.On("Start").Return(nil)
	messenger.On("UPID").Return(&upid.UPID{})
	messenger.On("Send").Return(nil)
	messenger.On("Stop").Return(nil)
	messenger.On("Route").Return(nil)

	driver, err := newTestSchedulerDriver(NewMockScheduler(), suite.framework, suite.master, nil)
	driver.messenger = messenger
	suite.NoError(err)
	suite.True(driver.Stopped())

	driver.Start()
	driver.setConnected(true) // simulated
	suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())

	stat, err := driver.SendFrameworkMessage(
		util.NewExecutorID("test-exec-001"),
		util.NewSlaveID("test-slave-001"),
		"Hello!",
	)
	suite.NoError(err)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
}
func (suite *SchedulerTestSuite) TestSchdulerDriverRequestResources() {
	messenger := messenger.NewMockedMessenger()
	messenger.On("Start").Return(nil)
	messenger.On("UPID").Return(&upid.UPID{})
	messenger.On("Send").Return(nil)
	messenger.On("Stop").Return(nil)
	messenger.On("Route").Return(nil)

	driver := newTestSchedulerDriver(suite.T(), NewMockScheduler(), suite.framework, suite.master, nil)
	driver.messenger = messenger
	suite.True(driver.Stopped())

	driver.Start()
	driver.setConnected(true) // simulated
	suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())

	stat, err := driver.RequestResources(
		[]*mesos.Request{
			{
				SlaveId: util.NewSlaveID("test-slave-001"),
				Resources: []*mesos.Resource{
					util.NewScalarResource("test-res-001", 33.00),
				},
			},
		},
	)
	suite.NoError(err)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
}
示例#10
0
func createTestOffer(idSuffix string) *mesos.Offer {
	return util.NewOffer(
		util.NewOfferID("test-offer-"+idSuffix),
		util.NewFrameworkID("test-framework-"+idSuffix),
		util.NewSlaveID("test-slave-"+idSuffix),
		"localhost."+idSuffix,
	)
}
示例#11
0
//test adding of ressource offer, should be added to offer registry and slaves
func TestResourceOffer_Add(t *testing.T) {
	assert := assert.New(t)

	registrator := &mockRegistrator{cache.NewStore(cache.MetaNamespaceKeyFunc)}
	testFramework := &framework{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			DeclineOffer: func(offerId string) <-chan error {
				return proc.ErrorChan(nil)
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames:  newSlaveRegistry(),
		nodeRegistrator: registrator,
		sched:           mockScheduler(),
	}

	hostname := "h1"
	offerID1 := util.NewOfferID("test1")
	offer1 := &mesos.Offer{Id: offerID1, Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testFramework.ResourceOffers(nil, offers1)
	assert.Equal(1, len(registrator.store.List()))

	assert.Equal(1, getNumberOffers(testFramework.offers))
	//check slave hostname
	assert.Equal(1, len(testFramework.slaveHostNames.SlaveIDs()))

	//add another offer
	hostname2 := "h2"
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers2 := []*mesos.Offer{offer2}
	testFramework.ResourceOffers(nil, offers2)

	//check it is stored in registry
	assert.Equal(2, getNumberOffers(testFramework.offers))

	//check slave hostnames
	assert.Equal(2, len(testFramework.slaveHostNames.SlaveIDs()))
}
示例#12
0
func TestOffers(t *testing.T) {
	offer1 := util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0")
	offer1.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})}

	offer2 := util.NewOffer(util.NewOfferID("26d5b34c-ef81-638d-5ad5-32c743c9c033"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0037"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S0"), "master")
	offer2.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 2), util.NewScalarResource("mem", 1024), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(4000, 7000)})}
	offer2.Attributes = []*mesos.Attribute{&mesos.Attribute{
		Name:   proto.String("rack"),
		Type:   mesos.Value_SCALAR.Enum(),
		Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
	}}

	offers := Offers([]*mesos.Offer{offer1, offer2})
	if len(strings.Split(offers, "\n")) != 2 {
		t.Errorf("Offers([]*mesos.Offer{offer1, offer2}) should contain two offers split by new line, actual: %s", offers)
	}
}
示例#13
0
func (s *DiegoScheduler) scheduleMatched(driver sched.SchedulerDriver, matches map[string]*OfferMatch) auctiontypes.AuctionResults {
	results := auctiontypes.AuctionResults{}

	for slaveId, match := range matches {
		if slaveId != "" {
			offers := match.Offers

			taskInfos := []*mesos.TaskInfo{}
			for _, lrpAuction := range match.LrpAuctions {
				taskInfo := s.createLrpTaskInfo(util.NewSlaveID(slaveId), &lrpAuction)
				taskInfos = append(taskInfos, taskInfo)
				results.SuccessfulLRPs = append(results.SuccessfulLRPs, lrpAuction)
				log.Infof("+scheduled lrp, lrp: %v/%v mem: %v, offers: mem: %v",
					lrpAuction.ProcessGuid, lrpAuction.Index, lrpAuction.MemoryMB, getOffersMem(offers))
			}
			for _, taskAuction := range match.TaskAuctions {
				taskInfo := s.createTaskTaskInfo(util.NewSlaveID(slaveId), &taskAuction)
				taskInfos = append(taskInfos, taskInfo)
				results.SuccessfulTasks = append(results.SuccessfulTasks, taskAuction)
				log.Infof("+scheduled task, task: %v mem: %v, offers: mem: %v",
					taskAuction.TaskGuid, taskAuction.MemoryMB, getOffersMem(offers))
			}

			driver.LaunchTasks(extractOfferIds(offers), taskInfos, // offer getting declied if no tasks
				&mesos.Filters{RefuseSeconds: proto.Float64(30)})

		} else {
			for _, lrpAuction := range match.LrpAuctions {
				results.FailedLRPs = append(results.FailedLRPs, lrpAuction)
				log.Warningf("+schedule lrp failed, lrp: %v/%v mem: %v, offers: mem: %v",
					lrpAuction.GetProcessGuid(), lrpAuction.Index, lrpAuction.MemoryMB, getOffersMem(match.Offers))
			}
			for _, taskAuction := range match.TaskAuctions {
				results.FailedTasks = append(results.FailedTasks, taskAuction)
				log.Warningf("+schedule task failed, task: %v mem: %v, offers: mem: %v",
					taskAuction.TaskGuid, taskAuction.MemoryMB, getOffersMem(match.Offers))
			}
		}
	}

	return results
}
示例#14
0
func TestSchedCacheGetSlavePid(t *testing.T) {
	cache := newSchedCache()

	pid01, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)
	pid02, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)

	cache.putSlavePid(util.NewSlaveID("slave01"), pid01)
	cache.putSlavePid(util.NewSlaveID("slave02"), pid02)

	cachedSlavePid1 := cache.getSlavePid(util.NewSlaveID("slave01"))
	cachedSlavePid2 := cache.getSlavePid(util.NewSlaveID("slave02"))

	assert.NotNil(t, cachedSlavePid1)
	assert.NotNil(t, cachedSlavePid2)
	assert.True(t, pid01.Equal(cachedSlavePid1))
	assert.True(t, pid02.Equal(cachedSlavePid2))
	assert.False(t, pid01.Equal(cachedSlavePid2))
}
示例#15
0
//test adding of ressource offer, should be added to offer registry and slavesf
func TestResourceOffer_Add(t *testing.T) {
	assert := assert.New(t)

	testScheduler := &KubernetesScheduler{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			DeclineOffer: func(offerId string) <-chan error {
				return proc.ErrorChan(nil)
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaves: newSlaveStorage(),
	}

	hostname := "h1"
	offerID1 := util.NewOfferID("test1")
	offer1 := &mesos.Offer{Id: offerID1, Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testScheduler.ResourceOffers(nil, offers1)

	assert.Equal(1, getNumberOffers(testScheduler.offers))
	//check slave hostname
	assert.Equal(1, len(testScheduler.slaves.getSlaveIds()))

	//add another offer
	hostname2 := "h2"
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers2 := []*mesos.Offer{offer2}
	testScheduler.ResourceOffers(nil, offers2)

	//check it is stored in registry
	assert.Equal(2, getNumberOffers(testScheduler.offers))

	//check slave hostnames
	assert.Equal(2, len(testScheduler.slaves.getSlaveIds()))
}
示例#16
0
func TestExecutorDriverRunTaskEvent(t *testing.T) {
	setTestEnv(t)
	ch := make(chan bool, 2)
	// Mock Slave process to respond to registration event.
	server := testutil.NewMockSlaveHttpServer(t, func(rsp http.ResponseWriter, req *http.Request) {
		reqPath, err := url.QueryUnescape(req.URL.String())
		assert.NoError(t, err)
		log.Infoln("RCVD request", reqPath)
		rsp.WriteHeader(http.StatusAccepted)
	})

	defer server.Close()

	exec := newTestExecutor(t)
	exec.ch = ch
	exec.t = t

	// start
	driver := newIntegrationTestDriver(t, exec)
	stat, err := driver.Start()
	assert.NoError(t, err)
	assert.Equal(t, mesos.Status_DRIVER_RUNNING, stat)
	driver.setConnected(true)
	defer driver.Stop()

	// send runtask event to driver
	pbMsg := &mesos.RunTaskMessage{
		FrameworkId: util.NewFrameworkID(frameworkID),
		Framework: util.NewFrameworkInfo(
			"test", "test-framework-001", util.NewFrameworkID(frameworkID),
		),
		Pid: proto.String(server.PID.String()),
		Task: util.NewTaskInfo(
			"test-task",
			util.NewTaskID("test-task-001"),
			util.NewSlaveID(slaveID),
			[]*mesos.Resource{
				util.NewScalarResource("mem", 112),
				util.NewScalarResource("cpus", 2),
			},
		),
	}

	c := testutil.NewMockMesosClient(t, server.PID)
	c.SendMessage(driver.self, pbMsg)

	select {
	case <-ch:
	case <-time.After(time.Second * 2):
		log.Errorf("Tired of waiting...")
	}

}
示例#17
0
//test when we loose connection to master we invalidate all cached offers
func TestDisconnect(t *testing.T) {
	assert := assert.New(t)

	//
	testFramework := &framework{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames: newSlaveRegistry(),
		sched:          mockScheduler(),
	}

	hostname := "h1"
	offer1 := &mesos.Offer{Id: util.NewOfferID("test1"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testFramework.ResourceOffers(nil, offers1)
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers2 := []*mesos.Offer{offer2}
	testFramework.ResourceOffers(nil, offers2)

	//add another offer from different slaveID
	hostname2 := "h2"
	offer3 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers3 := []*mesos.Offer{offer3}
	testFramework.ResourceOffers(nil, offers3)

	//disconnect
	testFramework.Disconnected(nil)

	//all offers should be removed
	assert.Equal(0, getNumberOffers(testFramework.offers))
	//slave hostnames should still be all present
	assert.Equal(2, len(testFramework.slaveHostNames.SlaveIDs()))
}
func (suite *SchedulerIntegrationTestSuite) TestSchedulerDriverLostSlaveEvent() {
	ok := suite.configureServerWithRegisteredFramework()
	suite.True(ok, "failed to establish running test server and driver")

	// Send a event to this SchedulerDriver (via http) to test handlers.	offer := util.NewOffer(
	pbMsg := &mesos.LostSlaveMessage{
		SlaveId: util.NewSlaveID("test-slave-001"),
	}

	c := suite.newMockClient()
	c.SendMessage(suite.driver.UPID(), pbMsg)
	suite.sched.waitForCallback(0)
}
示例#19
0
// execute an explicit task reconciliation, as per http://mesos.apache.org/documentation/latest/reconciliation/
func (k *KubernetesScheduler) explicitlyReconcileTasks(driver bindings.SchedulerDriver, taskToSlave map[string]string, cancel <-chan struct{}) error {
	log.Info("explicit reconcile tasks")

	// tell mesos to send us the latest status updates for all the non-terminal tasks that we know about
	statusList := []*mesos.TaskStatus{}
	remaining := sets.KeySet(reflect.ValueOf(taskToSlave))
	for taskId, slaveId := range taskToSlave {
		if slaveId == "" {
			delete(taskToSlave, taskId)
			continue
		}
		statusList = append(statusList, &mesos.TaskStatus{
			TaskId:  mutil.NewTaskID(taskId),
			SlaveId: mutil.NewSlaveID(slaveId),
			State:   mesos.TaskState_TASK_RUNNING.Enum(), // req'd field, doesn't have to reflect reality
		})
	}

	select {
	case <-cancel:
		return reconciliationCancelledErr
	default:
		if _, err := driver.ReconcileTasks(statusList); err != nil {
			return err
		}
	}

	start := time.Now()
	first := true
	for backoff := 1 * time.Second; first || remaining.Len() > 0; backoff = backoff * 2 {
		first = false
		// nothing to do here other than wait for status updates..
		if backoff > k.schedcfg.ExplicitReconciliationMaxBackoff.Duration {
			backoff = k.schedcfg.ExplicitReconciliationMaxBackoff.Duration
		}
		select {
		case <-cancel:
			return reconciliationCancelledErr
		case <-time.After(backoff):
			for taskId := range remaining {
				if task, _ := k.taskRegistry.Get(taskId); task != nil && explicitTaskFilter(task) && task.UpdatedTime.Before(start) {
					// keep this task in remaining list
					continue
				}
				remaining.Delete(taskId)
			}
		}
	}
	return nil
}
示例#20
0
func TestFilterResources(t *testing.T) {
	rf := ResourceFilter{}
	o := util.NewOffer(util.NewOfferID("offerid"), util.NewFrameworkID("frameworkid"), util.NewSlaveID("slaveId"), "hostname")
	o.Resources = []*mesos.Resource{
		util.NewScalarResource("name", 1.0),
		util.NewScalarResource("ub0r-resource", 2.0),
		util.NewScalarResource("ub0r-resource", 3.0),
	}

	res := rf.FilterResources(o, "ub0r-resource")

	assert.Equal(t, 2, len(res))
	assert.Equal(t, "ub0r-resource", res[0].GetName())
}
示例#21
0
func TestExecutorDriverReconnectEvent(t *testing.T) {
	setTestEnv(t)
	ch := make(chan bool, 2)
	// Mock Slave process to respond to registration event.
	server := testutil.NewMockSlaveHttpServer(t, func(rsp http.ResponseWriter, req *http.Request) {
		reqPath, err := url.QueryUnescape(req.URL.String())
		assert.NoError(t, err)
		log.Infoln("RCVD request", reqPath)

		// exec registration request
		if strings.Contains(reqPath, "RegisterExecutorMessage") {
			log.Infoln("Got Executor registration request")
		}

		if strings.Contains(reqPath, "ReregisterExecutorMessage") {
			log.Infoln("Got Executor Re-registration request")
			ch <- true
		}

		rsp.WriteHeader(http.StatusAccepted)
	})

	defer server.Close()

	exec := newTestExecutor(t)
	exec.t = t

	// start
	driver := newIntegrationTestDriver(t, exec)
	stat, err := driver.Start()
	assert.NoError(t, err)
	assert.Equal(t, mesos.Status_DRIVER_RUNNING, stat)
	driver.setConnected(true)
	defer driver.Stop()

	// send "reconnect" event to driver
	pbMsg := &mesos.ReconnectExecutorMessage{
		SlaveId: util.NewSlaveID(slaveID),
	}
	c := testutil.NewMockMesosClient(t, server.PID)
	c.SendMessage(driver.self, pbMsg)

	select {
	case <-ch:
	case <-time.After(time.Second * 2):
		log.Errorf("Tired of waiting...")
	}

}
示例#22
0
// Offering some cpus and memory and the 8000-9000 port range
func NewTestOffer(id string) *mesos.Offer {
	hostname := "some_hostname"
	cpus := util.NewScalarResource("cpus", 3.75)
	mem := util.NewScalarResource("mem", 940)
	var port8000 uint64 = 8000
	var port9000 uint64 = 9000
	ports8000to9000 := mesos.Value_Range{Begin: &port8000, End: &port9000}
	ports := util.NewRangesResource("ports", []*mesos.Value_Range{&ports8000to9000})
	return &mesos.Offer{
		Id:        util.NewOfferID(id),
		Hostname:  &hostname,
		SlaveId:   util.NewSlaveID(hostname),
		Resources: []*mesos.Resource{cpus, mem, ports},
	}
}
示例#23
0
func (suite *SchedulerTestSuite) TestSchdulerDriverSendFrameworkMessage() {
	driver := newTestDriver(suite.T(), driverConfigMessenger(mock_scheduler.New(), suite.framework, suite.master, nil, mockedMessenger()))

	driver.Start()
	driver.SetConnected(true) // simulated
	suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())

	stat, err := driver.SendFrameworkMessage(
		util.NewExecutorID("test-exec-001"),
		util.NewSlaveID("test-slave-001"),
		"Hello!",
	)
	suite.NoError(err)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
}
示例#24
0
func (t *T) BuildTaskInfo() (*mesos.TaskInfo, error) {
	if t.Spec == nil {
		return nil, errors.New("no podtask.T.Spec given, cannot build task info")
	}

	info := &mesos.TaskInfo{
		Name:      proto.String(generateTaskName(&t.Pod)),
		TaskId:    mutil.NewTaskID(t.ID),
		Executor:  t.Spec.Executor,
		Data:      t.Spec.Data,
		Resources: t.Spec.Resources,
		SlaveId:   mutil.NewSlaveID(t.Spec.SlaveID),
	}

	return info, nil
}
示例#25
0
func (suite *SchedulerIntegrationTestSuite) TestSchedulerDriverFrameworkMessageEvent() {
	ok := suite.configureServerWithRegisteredFramework()
	suite.True(ok, "failed to establish running test server and driver")

	// Send a event to this SchedulerDriver (via http) to test handlers.	offer := util.NewOffer(
	pbMsg := &mesos.ExecutorToFrameworkMessage{
		SlaveId:     util.NewSlaveID("test-slave-001"),
		FrameworkId: suite.registeredFrameworkId,
		ExecutorId:  util.NewExecutorID("test-executor-001"),
		Data:        []byte("test-data-999"),
	}

	c := suite.newMockClient()
	c.SendMessage(suite.driver.self, pbMsg)
	suite.sched.waitForCallback(0)
}
示例#26
0
func NewOffer(id string) *mesos.Offer {
	return &mesos.Offer{
		Id:          util.NewOfferID(id),
		FrameworkId: util.NewFrameworkID("test-etcd-framework"),
		SlaveId:     util.NewSlaveID("slave-" + id),
		Hostname:    proto.String("localhost"),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", 1),
			util.NewScalarResource("mem", 256),
			util.NewScalarResource("disk", 4096),
			util.NewRangesResource("ports", []*mesos.Value_Range{
				util.NewValueRange(uint64(0), uint64(65535)),
			}),
		},
	}
}
示例#27
0
//test that when a slave is lost we remove all offers
func TestSlave_Lost(t *testing.T) {
	assert := assert.New(t)

	//
	testScheduler := &KubernetesScheduler{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames: slave.NewRegistry(),
	}

	hostname := "h1"
	offer1 := &mesos.Offer{Id: util.NewOfferID("test1"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testScheduler.ResourceOffers(nil, offers1)
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers2 := []*mesos.Offer{offer2}
	testScheduler.ResourceOffers(nil, offers2)

	//add another offer from different slaveID
	hostname2 := "h2"
	offer3 := &mesos.Offer{Id: util.NewOfferID("test3"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers3 := []*mesos.Offer{offer3}
	testScheduler.ResourceOffers(nil, offers3)

	//test precondition
	assert.Equal(3, getNumberOffers(testScheduler.offers))
	assert.Equal(2, len(testScheduler.slaveHostNames.SlaveIDs()))

	//remove first slave
	testScheduler.SlaveLost(nil, util.NewSlaveID(hostname))

	//offers should be removed
	assert.Equal(1, getNumberOffers(testScheduler.offers))
	//slave hostnames should still be all present
	assert.Equal(2, len(testScheduler.slaveHostNames.SlaveIDs()))

	//remove second slave
	testScheduler.SlaveLost(nil, util.NewSlaveID(hostname2))

	//offers should be removed
	assert.Equal(0, getNumberOffers(testScheduler.offers))
	//slave hostnames should still be all present
	assert.Equal(2, len(testScheduler.slaveHostNames.SlaveIDs()))

	//try to remove non existing slave
	testScheduler.SlaveLost(nil, util.NewSlaveID("notExist"))

}
示例#28
0
func (k *KubernetesScheduler) InstallDebugHandlers(mux *http.ServeMux) {
	wrappedHandler := func(uri string, h http.Handler) {
		mux.HandleFunc(uri, func(w http.ResponseWriter, r *http.Request) {
			ch := make(chan struct{})
			closer := runtime.Closer(ch)
			proc.OnError(k.asMaster().Do(func() {
				defer closer()
				h.ServeHTTP(w, r)
			}), func(err error) {
				defer closer()
				log.Warningf("failed HTTP request for %s: %v", uri, err)
				w.WriteHeader(http.StatusServiceUnavailable)
			}, k.terminate)
			select {
			case <-time.After(k.schedcfg.HttpHandlerTimeout.Duration):
				log.Warningf("timed out waiting for request to be processed")
				w.WriteHeader(http.StatusServiceUnavailable)
				return
			case <-ch: // noop
			}
		})
	}
	requestReconciliation := func(uri string, requestAction func()) {
		wrappedHandler(uri, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
			requestAction()
			w.WriteHeader(http.StatusNoContent)
		}))
	}
	requestReconciliation("/debug/actions/requestExplicit", k.reconciler.RequestExplicit)
	requestReconciliation("/debug/actions/requestImplicit", k.reconciler.RequestImplicit)

	wrappedHandler("/debug/actions/kamikaze", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		slaves := k.slaves.getSlaveIds()
		for _, slaveId := range slaves {
			_, err := k.driver.SendFrameworkMessage(
				k.executor.ExecutorId,
				mutil.NewSlaveID(slaveId),
				messages.Kamikaze)
			if err != nil {
				log.Warningf("failed to send kamikaze message to slave %s: %v", slaveId, err)
			} else {
				io.WriteString(w, fmt.Sprintf("kamikaze slave %s\n", slaveId))
			}
		}
		io.WriteString(w, "OK")
	}))
}
示例#29
0
func (t *T) BuildTaskInfo() *mesos.TaskInfo {
	info := &mesos.TaskInfo{
		Name:     proto.String(generateTaskName(&t.Pod)),
		TaskId:   mutil.NewTaskID(t.ID),
		SlaveId:  mutil.NewSlaveID(t.Spec.SlaveID),
		Executor: t.executor,
		Data:     t.Spec.Data,
		Resources: []*mesos.Resource{
			mutil.NewScalarResource("cpus", float64(t.Spec.CPU)),
			mutil.NewScalarResource("mem", float64(t.Spec.Memory)),
		},
	}
	if portsResource := rangeResource("ports", t.Spec.Ports); portsResource != nil {
		info.Resources = append(info.Resources, portsResource)
	}
	return info
}
示例#30
0
func TestExecutorDriverFrameworkToExecutorMessageEvent(t *testing.T) {
	setTestEnv(t)
	ch := make(chan bool, 2)
	// Mock Slave process to respond to registration event.
	server := testutil.NewMockSlaveHttpServer(t, func(rsp http.ResponseWriter, req *http.Request) {
		reqPath, err := url.QueryUnescape(req.URL.String())
		assert.NoError(t, err)
		log.Infoln("RCVD request", reqPath)
		rsp.WriteHeader(http.StatusAccepted)
	})

	defer server.Close()

	exec := newTestExecutor(t)
	exec.ch = ch
	exec.t = t

	// start
	driver := newIntegrationTestDriver(t, exec)
	stat, err := driver.Start()
	assert.NoError(t, err)
	assert.Equal(t, mesos.Status_DRIVER_RUNNING, stat)
	driver.setConnected(true)
	defer driver.Stop()

	// send runtask event to driver
	pbMsg := &mesos.FrameworkToExecutorMessage{
		SlaveId:     util.NewSlaveID(slaveID),
		ExecutorId:  util.NewExecutorID(executorID),
		FrameworkId: util.NewFrameworkID(frameworkID),
		Data:        []byte("Hello-Test"),
	}

	c := testutil.NewMockMesosClient(t, server.PID)
	c.SendMessage(driver.self, pbMsg)

	select {
	case <-ch:
	case <-time.After(time.Second * 1):
		log.Errorf("Tired of waiting...")
	}
}