Esempio n. 1
0
//test that when a slave is lost we remove all offers
func TestSlave_Lost(t *testing.T) {
	assert := assert.New(t)

	//
	testScheduler := &KubernetesScheduler{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames: slave.NewRegistry(),
	}

	hostname := "h1"
	offer1 := &mesos.Offer{Id: util.NewOfferID("test1"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testScheduler.ResourceOffers(nil, offers1)
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers2 := []*mesos.Offer{offer2}
	testScheduler.ResourceOffers(nil, offers2)

	//add another offer from different slaveID
	hostname2 := "h2"
	offer3 := &mesos.Offer{Id: util.NewOfferID("test3"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers3 := []*mesos.Offer{offer3}
	testScheduler.ResourceOffers(nil, offers3)

	//test precondition
	assert.Equal(3, getNumberOffers(testScheduler.offers))
	assert.Equal(2, len(testScheduler.slaveHostNames.SlaveIDs()))

	//remove first slave
	testScheduler.SlaveLost(nil, util.NewSlaveID(hostname))

	//offers should be removed
	assert.Equal(1, getNumberOffers(testScheduler.offers))
	//slave hostnames should still be all present
	assert.Equal(2, len(testScheduler.slaveHostNames.SlaveIDs()))

	//remove second slave
	testScheduler.SlaveLost(nil, util.NewSlaveID(hostname2))

	//offers should be removed
	assert.Equal(0, getNumberOffers(testScheduler.offers))
	//slave hostnames should still be all present
	assert.Equal(2, len(testScheduler.slaveHostNames.SlaveIDs()))

	//try to remove non existing slave
	testScheduler.SlaveLost(nil, util.NewSlaveID("notExist"))

}
Esempio n. 2
0
//test adding of ressource offer, should be added to offer registry and slavesf
func TestResourceOffer_Add_Rescind(t *testing.T) {
	assert := assert.New(t)

	testScheduler := &KubernetesScheduler{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			DeclineOffer: func(offerId string) <-chan error {
				return proc.ErrorChan(nil)
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames: slave.NewRegistry(),
	}

	hostname := "h1"
	offerID1 := util.NewOfferID("test1")
	offer1 := &mesos.Offer{Id: offerID1, Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testScheduler.ResourceOffers(nil, offers1)

	assert.Equal(1, getNumberOffers(testScheduler.offers))

	//check slave hostname
	assert.Equal(1, len(testScheduler.slaveHostNames.SlaveIDs()))

	//add another offer
	hostname2 := "h2"
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers2 := []*mesos.Offer{offer2}
	testScheduler.ResourceOffers(nil, offers2)

	assert.Equal(2, getNumberOffers(testScheduler.offers))

	//check slave hostnames
	assert.Equal(2, len(testScheduler.slaveHostNames.SlaveIDs()))

	//next whether offers can be rescinded
	testScheduler.OfferRescinded(nil, offerID1)
	assert.Equal(1, getNumberOffers(testScheduler.offers))

	//next whether offers can be rescinded
	testScheduler.OfferRescinded(nil, util.NewOfferID("test2"))
	//walk offers again and check it is removed from registry
	assert.Equal(0, getNumberOffers(testScheduler.offers))

	//remove non existing ID
	testScheduler.OfferRescinded(nil, util.NewOfferID("notExist"))
}
Esempio n. 3
0
// New creates a new KubernetesScheduler
func New(config Config) *KubernetesScheduler {
	var k *KubernetesScheduler
	k = &KubernetesScheduler{
		schedcfg:          &config.Schedcfg,
		RWMutex:           new(sync.RWMutex),
		executor:          config.Executor,
		executorGroup:     uid.Parse(config.Executor.ExecutorId.GetValue()).Group(),
		PodScheduler:      config.Scheduler,
		client:            config.Client,
		etcdClient:        config.EtcdClient,
		failoverTimeout:   config.FailoverTimeout,
		reconcileInterval: config.ReconcileInterval,
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				// filter the offers: the executor IDs must not identify a kubelet-
				// executor with a group that doesn't match ours
				for _, eid := range o.GetExecutorIds() {
					execuid := uid.Parse(eid.GetValue())
					if execuid.Name() == execcfg.DefaultInfoID && execuid.Group() != k.executorGroup {
						return false
					}
				}
				return true
			},
			DeclineOffer: func(id string) <-chan error {
				errOnce := proc.NewErrorOnce(k.terminate)
				errOuter := k.asRegisteredMaster.Do(func() {
					var err error
					defer errOnce.Report(err)
					offerId := mutil.NewOfferID(id)
					filters := &mesos.Filters{}
					_, err = k.driver.DeclineOffer(offerId, filters)
				})
				return errOnce.Send(errOuter).Err()
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     config.Schedcfg.OfferLingerTTL.Duration,
			TTL:           config.Schedcfg.OfferTTL.Duration,
			ListenerDelay: config.Schedcfg.ListenerDelay.Duration,
		}),
		slaveHostNames:    slave.NewRegistry(),
		taskRegistry:      podtask.NewInMemoryRegistry(),
		reconcileCooldown: config.ReconcileCooldown,
		registration:      make(chan struct{}),
		asRegisteredMaster: proc.DoerFunc(func(proc.Action) <-chan error {
			return proc.ErrorChanf("cannot execute action with unregistered scheduler")
		}),
	}
	return k
}
Esempio n. 4
0
//test we can handle different status updates, TODO check state transitions
func TestStatus_Update(t *testing.T) {

	mockdriver := MockSchedulerDriver{}
	// setup expectations
	mockdriver.On("KillTask", util.NewTaskID("test-task-001")).Return(mesos.Status_DRIVER_RUNNING, nil)

	testScheduler := &KubernetesScheduler{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames: slave.NewRegistry(),
		driver:         &mockdriver,
		taskRegistry:   podtask.NewInMemoryRegistry(),
	}

	taskStatus_task_starting := util.NewTaskStatus(
		util.NewTaskID("test-task-001"),
		mesos.TaskState_TASK_RUNNING,
	)
	testScheduler.StatusUpdate(testScheduler.driver, taskStatus_task_starting)

	taskStatus_task_running := util.NewTaskStatus(
		util.NewTaskID("test-task-001"),
		mesos.TaskState_TASK_RUNNING,
	)
	testScheduler.StatusUpdate(testScheduler.driver, taskStatus_task_running)

	taskStatus_task_failed := util.NewTaskStatus(
		util.NewTaskID("test-task-001"),
		mesos.TaskState_TASK_FAILED,
	)
	testScheduler.StatusUpdate(testScheduler.driver, taskStatus_task_failed)

	//assert that mock was invoked
	mockdriver.AssertExpectations(t)
}
Esempio n. 5
0
func TestInMemoryRegistry_Update(t *testing.T) {
	assert := assert.New(t)

	// create offers registry
	ttl := time.Second / 4
	config := offers.RegistryConfig{
		DeclineOffer: func(offerId string) <-chan error {
			return proc.ErrorChan(nil)
		},
		Compat: func(o *mesos.Offer) bool {
			return true
		},
		TTL:       ttl,
		LingerTTL: 2 * ttl,
	}
	storage := offers.CreateRegistry(config)

	// Add offer
	offerId := mesosutil.NewOfferID("foo")
	mesosOffer := &mesos.Offer{Id: offerId}
	storage.Add([]*mesos.Offer{mesosOffer})
	offer, ok := storage.Get(offerId.GetValue())
	assert.True(ok)

	// create registry
	registry := NewInMemoryRegistry()
	a, _ := fakePodTask("a")
	registry.Register(a.Clone(), nil) // here clone a because we change it below

	// state changes are ignored
	a.State = StateRunning
	err := registry.Update(a)
	assert.NoError(err)
	a_clone, _ := registry.Get(a.ID)
	assert.Equal(StatePending, a_clone.State)

	// offer is updated while pending
	a.Offer = offer
	err = registry.Update(a)
	assert.NoError(err)
	a_clone, _ = registry.Get(a.ID)
	assert.Equal(offer.Id(), a_clone.Offer.Id())

	// spec is updated while pending
	a.Spec = Spec{SlaveID: "slave-1"}
	err = registry.Update(a)
	assert.NoError(err)
	a_clone, _ = registry.Get(a.ID)
	assert.Equal("slave-1", a_clone.Spec.SlaveID)

	// flags are updated while pending
	a.Flags[Launched] = struct{}{}
	err = registry.Update(a)
	assert.NoError(err)
	a_clone, _ = registry.Get(a.ID)

	_, found_launched := a_clone.Flags[Launched]
	assert.True(found_launched)

	// flags are updated while running
	registry.UpdateStatus(fakeStatusUpdate(a.ID, mesos.TaskState_TASK_RUNNING))
	a.Flags[Bound] = struct{}{}
	err = registry.Update(a)
	assert.NoError(err)
	a_clone, _ = registry.Get(a.ID)

	_, found_launched = a_clone.Flags[Launched]
	assert.True(found_launched)
	_, found_bound := a_clone.Flags[Bound]
	assert.True(found_bound)

	// spec is ignored while running
	a.Spec = Spec{SlaveID: "slave-2"}
	err = registry.Update(a)
	assert.NoError(err)
	a_clone, _ = registry.Get(a.ID)
	assert.Equal("slave-1", a_clone.Spec.SlaveID)

	// error when finished
	registry.UpdateStatus(fakeStatusUpdate(a.ID, mesos.TaskState_TASK_FINISHED))
	err = registry.Update(a)
	assert.Error(err)

	// update unknown task
	unknown_task, _ := fakePodTask("unknown-task")
	err = registry.Update(unknown_task)
	assert.Error(err)

	// update nil task
	err = registry.Update(nil)
	assert.Nil(err)
}