예제 #1
0
//test that when a slave is lost we remove all offers
func TestSlave_Lost(t *testing.T) {
	assert := assert.New(t)

	//
	testFramework := &framework{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames: newSlaveRegistry(),
		sched:          mockScheduler(),
	}

	hostname := "h1"
	offer1 := &mesos.Offer{Id: util.NewOfferID("test1"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testFramework.ResourceOffers(nil, offers1)
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers2 := []*mesos.Offer{offer2}
	testFramework.ResourceOffers(nil, offers2)

	//add another offer from different slaveID
	hostname2 := "h2"
	offer3 := &mesos.Offer{Id: util.NewOfferID("test3"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers3 := []*mesos.Offer{offer3}
	testFramework.ResourceOffers(nil, offers3)

	//test precondition
	assert.Equal(3, getNumberOffers(testFramework.offers))
	assert.Equal(2, len(testFramework.slaveHostNames.SlaveIDs()))

	//remove first slave
	testFramework.SlaveLost(nil, util.NewSlaveID(hostname))

	//offers should be removed
	assert.Equal(1, getNumberOffers(testFramework.offers))
	//slave hostnames should still be all present
	assert.Equal(2, len(testFramework.slaveHostNames.SlaveIDs()))

	//remove second slave
	testFramework.SlaveLost(nil, util.NewSlaveID(hostname2))

	//offers should be removed
	assert.Equal(0, getNumberOffers(testFramework.offers))
	//slave hostnames should still be all present
	assert.Equal(2, len(testFramework.slaveHostNames.SlaveIDs()))

	//try to remove non existing slave
	testFramework.SlaveLost(nil, util.NewSlaveID("notExist"))

}
예제 #2
0
//test adding of ressource offer, should be added to offer registry and slavesf
func TestResourceOffer_Add_Rescind(t *testing.T) {
	assert := assert.New(t)

	testFramework := &framework{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			DeclineOffer: func(offerId string) <-chan error {
				return proc.ErrorChan(nil)
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames: newSlaveRegistry(),
		sched:          mockScheduler(),
	}

	hostname := "h1"
	offerID1 := util.NewOfferID("test1")
	offer1 := &mesos.Offer{Id: offerID1, Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testFramework.ResourceOffers(nil, offers1)

	assert.Equal(1, getNumberOffers(testFramework.offers))

	//check slave hostname
	assert.Equal(1, len(testFramework.slaveHostNames.SlaveIDs()))

	//add another offer
	hostname2 := "h2"
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers2 := []*mesos.Offer{offer2}
	testFramework.ResourceOffers(nil, offers2)

	assert.Equal(2, getNumberOffers(testFramework.offers))

	//check slave hostnames
	assert.Equal(2, len(testFramework.slaveHostNames.SlaveIDs()))

	//next whether offers can be rescinded
	testFramework.OfferRescinded(nil, offerID1)
	assert.Equal(1, getNumberOffers(testFramework.offers))

	//next whether offers can be rescinded
	testFramework.OfferRescinded(nil, util.NewOfferID("test2"))
	//walk offers again and check it is removed from registry
	assert.Equal(0, getNumberOffers(testFramework.offers))

	//remove non existing ID
	testFramework.OfferRescinded(nil, util.NewOfferID("notExist"))
}
예제 #3
0
func TestSchedCacheContainsOffer(t *testing.T) {
	cache := newSchedCache()
	offer01 := createTestOffer("01")
	pid01, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)
	offer02 := createTestOffer("02")
	pid02, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)

	cache.putOffer(offer01, pid01)
	cache.putOffer(offer02, pid02)

	assert.True(t, cache.containsOffer(util.NewOfferID("test-offer-01")))
	assert.True(t, cache.containsOffer(util.NewOfferID("test-offer-02")))
	assert.False(t, cache.containsOffer(util.NewOfferID("test-offer-05")))
}
예제 #4
0
func TestOffer(t *testing.T) {
	offer := util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0")

	if Offer(offer) != "slave0#30c49" {
		t.Errorf(`util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"), util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0") != "slave0#30c49"; actual %s`, Offer(offer))
	}

	offer.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})}
	if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]" {
		t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000]; actual %s", Offer(offer))
	}

	offer.Attributes = []*mesos.Attribute{&mesos.Attribute{
		Name:   proto.String("rack"),
		Type:   mesos.Value_SCALAR.Enum(),
		Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
	}}
	if Offer(offer) != "slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00" {
		t.Errorf("Expected slave0#30c49 cpus:4.00 mem:512.00 ports:[31000..32000] rack:2.00; actual %s", Offer(offer))
	}

	offer.Resources = nil
	if Offer(offer) != "slave0#30c49 rack:2.00" {
		t.Errorf("Expected slave0#30c49 rack:2.00; actual %s", Offer(offer))
	}
}
예제 #5
0
func TestAddOffer(t *testing.T) {
	s := newSlave("SID", nil)

	assert.Empty(t, s.offers)
	assert.True(t, s.empty())

	s.addOffer(mesosutil.NewOffer(mesosutil.NewOfferID("ID1"), nil, nil, "hostname1"))
	s.addOffer(mesosutil.NewOffer(mesosutil.NewOfferID("ID2"), nil, nil, "hostname1"))
	assert.Equal(t, len(s.offers), 2)
	assert.Equal(t, len(s.getOffers()), 2)
	assert.False(t, s.empty())

	s.addOffer(mesosutil.NewOffer(mesosutil.NewOfferID("ID1"), nil, nil, "hostname1"))
	assert.Equal(t, len(s.offers), 2)
	assert.Equal(t, len(s.getOffers()), 2)
}
예제 #6
0
func TestRescind(t *testing.T) {
	for i, tt := range []struct {
		offers   []string
		rescinds []string
		want     int
	}{
		{[]string{"a"}, []string{"a"}, 0},
		{[]string{"a"}, []string{"b"}, 1},
		{[]string{}, []string{"a"}, 0},
		// with 7 valid offers (2 more than cap), and invalidation of one
		// of the non-rejected ones as well as one of the accepted ones,
		// we should have 4 offers cached.
		{[]string{"a", "b", "c", "d", "e", "f", "g"}, []string{"a", "g"}, 4},
	} {
		oc := New(5, false)
		for _, o := range tt.offers {
			oc.Push(newOffer(o, o))
		}
		for _, r := range tt.rescinds {
			oc.Rescind(util.NewOfferID(r))
		}
		if got := oc.Len(); got != tt.want {
			t.Errorf("test #%d: got : %s, want: %s", i, got, tt.want)
		}
	}

}
예제 #7
0
파일: agent_test.go 프로젝트: rkazak/swarm
func TestRemoveOffer(t *testing.T) {
	s := newAgent("SID", nil)

	assert.Empty(t, s.offers)

	s.addOffer(mesosutil.NewOffer(mesosutil.NewOfferID("ID1"), nil, nil, "hostname1"))
	s.addOffer(mesosutil.NewOffer(mesosutil.NewOfferID("ID2"), nil, nil, "hostname1"))
	assert.Equal(t, len(s.offers), 2)
	assert.Equal(t, len(s.getOffers()), 2)

	assert.True(t, s.removeOffer("ID1"))
	assert.Equal(t, len(s.offers), 1)
	assert.Equal(t, len(s.getOffers()), 1)

	assert.False(t, s.removeOffer("ID1"))
}
예제 #8
0
func Test_gc(t *testing.T) {
	oc := New(5, false)
	for i := 0; i < 5000; i++ {
		oc.Rescind(util.NewOfferID(string(i - 50)))
		oc.Push(newOffer(string(i), string(i)))
	}
	assert.Equal(t, 5, oc.Len())
}
예제 #9
0
func createTestOffer(idSuffix string) *mesos.Offer {
	return util.NewOffer(
		util.NewOfferID("test-offer-"+idSuffix),
		util.NewFrameworkID("test-framework-"+idSuffix),
		util.NewSlaveID("test-slave-"+idSuffix),
		"localhost."+idSuffix,
	)
}
예제 #10
0
//test adding of ressource offer, should be added to offer registry and slaves
func TestResourceOffer_Add(t *testing.T) {
	assert := assert.New(t)

	registrator := &mockRegistrator{cache.NewStore(cache.MetaNamespaceKeyFunc)}
	testFramework := &framework{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			DeclineOffer: func(offerId string) <-chan error {
				return proc.ErrorChan(nil)
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames:  newSlaveRegistry(),
		nodeRegistrator: registrator,
		sched:           mockScheduler(),
	}

	hostname := "h1"
	offerID1 := util.NewOfferID("test1")
	offer1 := &mesos.Offer{Id: offerID1, Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testFramework.ResourceOffers(nil, offers1)
	assert.Equal(1, len(registrator.store.List()))

	assert.Equal(1, getNumberOffers(testFramework.offers))
	//check slave hostname
	assert.Equal(1, len(testFramework.slaveHostNames.SlaveIDs()))

	//add another offer
	hostname2 := "h2"
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers2 := []*mesos.Offer{offer2}
	testFramework.ResourceOffers(nil, offers2)

	//check it is stored in registry
	assert.Equal(2, getNumberOffers(testFramework.offers))

	//check slave hostnames
	assert.Equal(2, len(testFramework.slaveHostNames.SlaveIDs()))
}
예제 #11
0
// New creates a new KubernetesScheduler
func New(config Config) *KubernetesScheduler {
	var k *KubernetesScheduler
	k = &KubernetesScheduler{
		schedcfg:          &config.Schedcfg,
		RWMutex:           new(sync.RWMutex),
		executor:          config.Executor,
		executorGroup:     uid.Parse(config.Executor.ExecutorId.GetValue()).Group(),
		PodScheduler:      config.Scheduler,
		client:            config.Client,
		etcdClient:        config.EtcdClient,
		failoverTimeout:   config.FailoverTimeout,
		reconcileInterval: config.ReconcileInterval,
		nodeRegistrator:   node.NewRegistrator(config.Client, config.LookupNode),
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				// the node must be registered and have up-to-date labels
				n := config.LookupNode(o.GetHostname())
				if n == nil || !node.IsUpToDate(n, node.SlaveAttributesToLabels(o.GetAttributes())) {
					return false
				}

				// the executor IDs must not identify a kubelet-executor with a group that doesn't match ours
				for _, eid := range o.GetExecutorIds() {
					execuid := uid.Parse(eid.GetValue())
					if execuid.Name() == execcfg.DefaultInfoID && execuid.Group() != k.executorGroup {
						return false
					}
				}

				return true
			},
			DeclineOffer: func(id string) <-chan error {
				errOnce := proc.NewErrorOnce(k.terminate)
				errOuter := k.asRegisteredMaster.Do(func() {
					var err error
					defer errOnce.Report(err)
					offerId := mutil.NewOfferID(id)
					filters := &mesos.Filters{}
					_, err = k.driver.DeclineOffer(offerId, filters)
				})
				return errOnce.Send(errOuter).Err()
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     config.Schedcfg.OfferLingerTTL.Duration,
			TTL:           config.Schedcfg.OfferTTL.Duration,
			ListenerDelay: config.Schedcfg.ListenerDelay.Duration,
		}),
		slaveHostNames:    slave.NewRegistry(),
		taskRegistry:      podtask.NewInMemoryRegistry(),
		reconcileCooldown: config.ReconcileCooldown,
		registration:      make(chan struct{}),
		asRegisteredMaster: proc.DoerFunc(func(proc.Action) <-chan error {
			return proc.ErrorChanf("cannot execute action with unregistered scheduler")
		}),
	}
	return k
}
예제 #12
0
func TestOffers(t *testing.T) {
	offer1 := util.NewOffer(util.NewOfferID("487c73d8-9951-f23c-34bd-8085bfd30c49"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0053"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S1"), "slave0")
	offer1.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 4), util.NewScalarResource("mem", 512), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})}

	offer2 := util.NewOffer(util.NewOfferID("26d5b34c-ef81-638d-5ad5-32c743c9c033"), util.NewFrameworkID("20150903-065451-84125888-5050-10715-0037"),
		util.NewSlaveID("20150903-065451-84125888-5050-10715-S0"), "master")
	offer2.Resources = []*mesos.Resource{util.NewScalarResource("cpus", 2), util.NewScalarResource("mem", 1024), util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(4000, 7000)})}
	offer2.Attributes = []*mesos.Attribute{&mesos.Attribute{
		Name:   proto.String("rack"),
		Type:   mesos.Value_SCALAR.Enum(),
		Scalar: &mesos.Value_Scalar{Value: proto.Float64(2)},
	}}

	offers := Offers([]*mesos.Offer{offer1, offer2})
	if len(strings.Split(offers, "\n")) != 2 {
		t.Errorf("Offers([]*mesos.Offer{offer1, offer2}) should contain two offers split by new line, actual: %s", offers)
	}
}
예제 #13
0
func TestSchedCacheGetOffer(t *testing.T) {
	cache := newSchedCache()
	offer01 := createTestOffer("01")
	pid01, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)
	offer02 := createTestOffer("02")
	pid02, err := upid.Parse("[email protected]:5050")
	assert.NoError(t, err)

	cache.putOffer(offer01, pid01)
	cache.putOffer(offer02, pid02)

	cachedOffer01 := cache.getOffer(util.NewOfferID("test-offer-01")).offer
	cachedOffer02 := cache.getOffer(util.NewOfferID("test-offer-02")).offer
	assert.NotEqual(t, offer01, cachedOffer02)
	assert.Equal(t, offer01, cachedOffer01)
	assert.Equal(t, offer02, cachedOffer02)

}
예제 #14
0
//test adding of ressource offer, should be added to offer registry and slavesf
func TestResourceOffer_Add(t *testing.T) {
	assert := assert.New(t)

	testScheduler := &KubernetesScheduler{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			DeclineOffer: func(offerId string) <-chan error {
				return proc.ErrorChan(nil)
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaves: newSlaveStorage(),
	}

	hostname := "h1"
	offerID1 := util.NewOfferID("test1")
	offer1 := &mesos.Offer{Id: offerID1, Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testScheduler.ResourceOffers(nil, offers1)

	assert.Equal(1, getNumberOffers(testScheduler.offers))
	//check slave hostname
	assert.Equal(1, len(testScheduler.slaves.getSlaveIds()))

	//add another offer
	hostname2 := "h2"
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers2 := []*mesos.Offer{offer2}
	testScheduler.ResourceOffers(nil, offers2)

	//check it is stored in registry
	assert.Equal(2, getNumberOffers(testScheduler.offers))

	//check slave hostnames
	assert.Equal(2, len(testScheduler.slaves.getSlaveIds()))
}
예제 #15
0
//test when we loose connection to master we invalidate all cached offers
func TestDisconnect(t *testing.T) {
	assert := assert.New(t)

	//
	testScheduler := &KubernetesScheduler{
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				return true
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     schedcfg.DefaultOfferLingerTTL,
			TTL:           schedcfg.DefaultOfferTTL,
			ListenerDelay: schedcfg.DefaultListenerDelay,
		}),
		slaveHostNames: slave.NewRegistry(),
	}

	hostname := "h1"
	offer1 := &mesos.Offer{Id: util.NewOfferID("test1"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers1 := []*mesos.Offer{offer1}
	testScheduler.ResourceOffers(nil, offers1)
	offer2 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname, SlaveId: util.NewSlaveID(hostname)}
	offers2 := []*mesos.Offer{offer2}
	testScheduler.ResourceOffers(nil, offers2)

	//add another offer from different slaveID
	hostname2 := "h2"
	offer3 := &mesos.Offer{Id: util.NewOfferID("test2"), Hostname: &hostname2, SlaveId: util.NewSlaveID(hostname2)}
	offers3 := []*mesos.Offer{offer3}
	testScheduler.ResourceOffers(nil, offers3)

	//disconnect
	testScheduler.Disconnected(nil)

	//all offers should be removed
	assert.Equal(0, getNumberOffers(testScheduler.offers))
	//slave hostnames should still be all present
	assert.Equal(2, len(testScheduler.slaveHostNames.SlaveIDs()))
}
예제 #16
0
func (suite *SchedulerIntegrationTestSuite) TestSchedulerDriverRescindOfferEvent() {
	ok := suite.configureServerWithRegisteredFramework()
	suite.True(ok, "failed to establish running test server and driver")

	// Send a event to this SchedulerDriver (via http) to test handlers.
	pbMsg := &mesos.RescindResourceOfferMessage{
		OfferId: util.NewOfferID("test-offer-001"),
	}

	c := suite.newMockClient()
	c.SendMessage(suite.driver.UPID(), pbMsg)
	suite.sched.waitForCallback(0)
}
예제 #17
0
func (suite *SchedulerTestSuite) TestSchdulerDriverAcceptOffersWithError() {
	sched := mock_scheduler.New()
	sched.On("StatusUpdate").Return(nil)
	sched.On("Error").Return()

	msgr := mockedMessenger()
	driver := newTestDriver(suite.T(), driverConfigMessenger(sched, suite.framework, suite.master, nil, msgr))
	driver.OnDispatch(func(_ context.Context, _ *upid.UPID, _ proto.Message) error {
		return fmt.Errorf("Unable to send message")
	})

	go func() {
		driver.Run()
	}()
	<-driver.Started()
	driver.SetConnected(true) // simulated
	suite.True(driver.Running())

	// setup an offer
	offer := util.NewOffer(
		util.NewOfferID("test-offer-001"),
		suite.framework.Id,
		util.NewSlaveID("test-slave-001"),
		"test-slave(1)@localhost:5050",
	)

	pid, err := upid.Parse("test-slave(1)@localhost:5050")
	suite.NoError(err)
	driver.CacheOffer(offer, pid)

	// launch task
	task := util.NewTaskInfo(
		"simple-task",
		util.NewTaskID("simpe-task-1"),
		util.NewSlaveID("test-slave-001"),
		[]*mesos.Resource{util.NewScalarResourceWithReservation("mem", 400, "principal", "role")},
	)
	task.Command = util.NewCommandInfo("pwd")
	task.Executor = util.NewExecutorInfo(util.NewExecutorID("test-exec"), task.Command)
	tasks := []*mesos.TaskInfo{task}

	operations := []*mesos.Offer_Operation{util.NewLaunchOperation(tasks)}

	stat, err := driver.AcceptOffers(
		[]*mesos.OfferID{offer.Id},
		operations,
		&mesos.Filters{},
	)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)
	suite.Error(err)
}
예제 #18
0
// New creates a new QingYuanScheduler
func New(config Config) *QingYuanScheduler {
	var k *QingYuanScheduler
	k = &QingYuanScheduler{
		schedcfg:          &config.Schedcfg,
		RWMutex:           new(sync.RWMutex),
		executor:          config.Executor,
		executorGroup:     uid.Parse(config.Executor.ExecutorId.GetValue()).Group(),
		scheduleFunc:      config.ScheduleFunc,
		client:            config.Client,
		etcdClient:        config.EtcdClient,
		failoverTimeout:   config.FailoverTimeout,
		reconcileInterval: config.ReconcileInterval,
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				// filter the offers: the executor IDs must not identify a qinglet-
				// executor with a group that doesn't match ours
				for _, eid := range o.GetExecutorIds() {
					execuid := uid.Parse(eid.GetValue())
					if execuid.Name() == execcfg.DefaultInfoID && execuid.Group() != k.executorGroup {
						return false
					}
				}
				return true
			},
			DeclineOffer: func(id string) <-chan error {
				errOnce := proc.NewErrorOnce(k.terminate)
				errOuter := k.asRegisteredMaster.Do(func() {
					var err error
					defer errOnce.Report(err)
					offerId := mutil.NewOfferID(id)
					filters := &mesos.Filters{}
					_, err = k.driver.DeclineOffer(offerId, filters)
				})
				return errOnce.Send(errOuter).Err()
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     config.Schedcfg.OfferLingerTTL.Duration,
			TTL:           config.Schedcfg.OfferTTL.Duration,
			ListenerDelay: config.Schedcfg.ListenerDelay.Duration,
		}),
		slaves:            newSlaveStorage(),
		taskRegistry:      podtask.NewInMemoryRegistry(),
		reconcileCooldown: config.ReconcileCooldown,
		registration:      make(chan struct{}),
		asRegisteredMaster: proc.DoerFunc(func(proc.Action) <-chan error {
			return proc.ErrorChanf("cannot execute action with unregistered scheduler")
		}),
	}
	return k
}
예제 #19
0
func TestFilterResources(t *testing.T) {
	rf := ResourceFilter{}
	o := util.NewOffer(util.NewOfferID("offerid"), util.NewFrameworkID("frameworkid"), util.NewSlaveID("slaveId"), "hostname")
	o.Resources = []*mesos.Resource{
		util.NewScalarResource("name", 1.0),
		util.NewScalarResource("ub0r-resource", 2.0),
		util.NewScalarResource("ub0r-resource", 3.0),
	}

	res := rf.FilterResources(o, "ub0r-resource")

	assert.Equal(t, 2, len(res))
	assert.Equal(t, "ub0r-resource", res[0].GetName())
}
예제 #20
0
func TestListen(t *testing.T) {
	ttl := time.Second / 4
	config := RegistryConfig{
		DeclineOffer: func(offerId string) <-chan error {
			return proc.ErrorChan(nil)
		},
		Compat: func(o *mesos.Offer) bool {
			return true
		},
		TTL:           ttl,
		ListenerDelay: ttl / 2,
	}
	storage := CreateRegistry(config)

	done := make(chan struct{})
	storage.Init(done)

	// Create two listeners with a hostname filter
	hostname1 := "hostname1"
	hostname2 := "hostname2"
	listener1 := storage.Listen("listener1", func(offer *mesos.Offer) bool {
		return offer.GetHostname() == hostname1
	})
	listener2 := storage.Listen("listener2", func(offer *mesos.Offer) bool {
		return offer.GetHostname() == hostname2
	})

	// Add hostname1 offer
	id := util.NewOfferID("foo")
	o := &mesos.Offer{Id: id, Hostname: &hostname1}
	storage.Add([]*mesos.Offer{o})

	// listener1 is notified by closing channel
	select {
	case _, more := <-listener1:
		if more {
			t.Error("listener1 is not closed")
		}
	}

	// listener2 is not notified within ttl
	select {
	case <-listener2:
		t.Error("listener2 is notified")
	case <-time.After(ttl):
	}

	close(done)
} // TestListen
예제 #21
0
// Offering some cpus and memory and the 8000-9000 port range
func NewTestOffer(id string) *mesos.Offer {
	hostname := "some_hostname"
	cpus := util.NewScalarResource("cpus", 3.75)
	mem := util.NewScalarResource("mem", 940)
	var port8000 uint64 = 8000
	var port9000 uint64 = 9000
	ports8000to9000 := mesos.Value_Range{Begin: &port8000, End: &port9000}
	ports := util.NewRangesResource("ports", []*mesos.Value_Range{&ports8000to9000})
	return &mesos.Offer{
		Id:        util.NewOfferID(id),
		Hostname:  &hostname,
		SlaveId:   util.NewSlaveID(hostname),
		Resources: []*mesos.Resource{cpus, mem, ports},
	}
}
예제 #22
0
func NewOffer(id string) *mesos.Offer {
	return &mesos.Offer{
		Id:          util.NewOfferID(id),
		FrameworkId: util.NewFrameworkID("test-etcd-framework"),
		SlaveId:     util.NewSlaveID("slave-" + id),
		Hostname:    proto.String("localhost"),
		Resources: []*mesos.Resource{
			util.NewScalarResource("cpus", 1),
			util.NewScalarResource("mem", 256),
			util.NewScalarResource("disk", 4096),
			util.NewRangesResource("ports", []*mesos.Value_Range{
				util.NewValueRange(uint64(0), uint64(65535)),
			}),
		},
	}
}
예제 #23
0
func TestBlockingPop(t *testing.T) {
	for i, tt := range []struct {
		offers   []string
		rescinds []string
		want     int
	}{
		// with one valid offer, we should pop once
		{[]string{"a"}, []string{"b"}, 1},
		// with 7 valid offers (2 more than cap), and invalidation of one
		// of the non-rejected ones as well as one of the accepted ones,
		// we should be able to pop 4 offers
		{[]string{"a", "b", "c", "d", "e", "f", "g"}, []string{"a", "g"}, 4},
	} {
		oc := New(5, false)
		for _, o := range tt.offers {
			oc.Push(newOffer(o, o))
		}
		for _, r := range tt.rescinds {
			oc.Rescind(util.NewOfferID(r))
		}

		got := func() int {
			n := 0
			for oc.Len() > 0 {
				c := make(chan struct{})
				go func() {
					oc.BlockingPop()
					c <- struct{}{}
				}()
				select {
				case <-c:
					n += 1
				case <-time.After(time.Second):
					return n
				}
			}
			return n
		}()

		if got != tt.want {
			t.Errorf("test #%d: got : %s, want: %s", i, got, tt.want)
		}
	}
}
예제 #24
0
func (suite *SchedulerIntegrationTestSuite) TestSchedulerDriverResourceOffersEvent() {
	ok := suite.configureServerWithRegisteredFramework()
	suite.True(ok, "failed to establish running test server and driver")

	// Send a event to this SchedulerDriver (via http) to test handlers.
	offer := util.NewOffer(
		util.NewOfferID("test-offer-001"),
		suite.registeredFrameworkId,
		util.NewSlaveID("test-slave-001"),
		"test-localhost",
	)
	pbMsg := &mesos.ResourceOffersMessage{
		Offers: []*mesos.Offer{offer},
		Pids:   []string{"test-offer-001@test-slave-001:5051"},
	}

	c := suite.newMockClient()
	c.SendMessage(suite.driver.UPID(), pbMsg)
	suite.sched.waitForCallback(0)
}
예제 #25
0
func TestInMemoryRegistry_Update(t *testing.T) {
	assert := assert.New(t)

	// create offers registry
	ttl := time.Second / 4
	config := offers.RegistryConfig{
		DeclineOffer: func(offerId string) <-chan error {
			return proc.ErrorChan(nil)
		},
		Compat: func(o *mesos.Offer) bool {
			return true
		},
		TTL:       ttl,
		LingerTTL: 2 * ttl,
	}
	storage := offers.CreateRegistry(config)

	// Add offer
	offerId := mesosutil.NewOfferID("foo")
	mesosOffer := &mesos.Offer{Id: offerId}
	storage.Add([]*mesos.Offer{mesosOffer})
	offer, ok := storage.Get(offerId.GetValue())
	assert.True(ok)

	// create registry
	registry := NewInMemoryRegistry()
	a, _ := fakePodTask("a")
	registry.Register(a.Clone(), nil) // here clone a because we change it below

	// state changes are ignored
	a.State = StateRunning
	err := registry.Update(a)
	assert.NoError(err)
	a_clone, _ := registry.Get(a.ID)
	assert.Equal(StatePending, a_clone.State)

	// offer is updated while pending
	a.Offer = offer
	err = registry.Update(a)
	assert.NoError(err)
	a_clone, _ = registry.Get(a.ID)
	assert.Equal(offer.Id(), a_clone.Offer.Id())

	// spec is updated while pending
	a.Spec = Spec{SlaveID: "slave-1"}
	err = registry.Update(a)
	assert.NoError(err)
	a_clone, _ = registry.Get(a.ID)
	assert.Equal("slave-1", a_clone.Spec.SlaveID)

	// flags are updated while pending
	a.Flags[Launched] = struct{}{}
	err = registry.Update(a)
	assert.NoError(err)
	a_clone, _ = registry.Get(a.ID)

	_, found_launched := a_clone.Flags[Launched]
	assert.True(found_launched)

	// flags are updated while running
	registry.UpdateStatus(fakeStatusUpdate(a.ID, mesos.TaskState_TASK_RUNNING))
	a.Flags[Bound] = struct{}{}
	err = registry.Update(a)
	assert.NoError(err)
	a_clone, _ = registry.Get(a.ID)

	_, found_launched = a_clone.Flags[Launched]
	assert.True(found_launched)
	_, found_bound := a_clone.Flags[Bound]
	assert.True(found_bound)

	// spec is ignored while running
	a.Spec = Spec{SlaveID: "slave-2"}
	err = registry.Update(a)
	assert.NoError(err)
	a_clone, _ = registry.Get(a.ID)
	assert.Equal("slave-1", a_clone.Spec.SlaveID)

	// error when finished
	registry.UpdateStatus(fakeStatusUpdate(a.ID, mesos.TaskState_TASK_FINISHED))
	err = registry.Update(a)
	assert.Error(err)

	// update unknown task
	unknown_task, _ := fakePodTask("unknown-task")
	err = registry.Update(unknown_task)
	assert.Error(err)

	// update nil task
	err = registry.Update(nil)
	assert.Nil(err)
}
예제 #26
0
func newOffer(offer, slave string) *mesos.Offer {
	return &mesos.Offer{
		Id:      util.NewOfferID(offer),
		SlaveId: util.NewSlaveID(slave),
	}
}
예제 #27
0
func TestWalk(t *testing.T) {
	t.Parallel()
	config := RegistryConfig{
		DeclineOffer: func(offerId string) <-chan error {
			return proc.ErrorChan(nil)
		},
		TTL:           0 * time.Second,
		LingerTTL:     0 * time.Second,
		ListenerDelay: 0 * time.Second,
	}
	storage := CreateRegistry(config)
	acceptedOfferId := ""
	walked := 0
	walker1 := func(p Perishable) (bool, error) {
		walked++
		if p.Acquire() {
			acceptedOfferId = p.Details().Id.GetValue()
			return true, nil
		}
		return false, nil
	}
	// sanity check
	err := storage.Walk(walker1)
	if err != nil {
		t.Fatalf("received impossible error %v", err)
	}
	if walked != 0 {
		t.Fatal("walked empty storage")
	}
	if acceptedOfferId != "" {
		t.Fatal("somehow found an offer when registry was empty")
	}
	impl, ok := storage.(*offerStorage)
	if !ok {
		t.Fatal("unexpected offer storage impl")
	}
	// single offer
	ttl := 2 * time.Second
	now := time.Now()
	o := &liveOffer{&mesos.Offer{Id: util.NewOfferID("foo")}, now.Add(ttl), 0}

	impl.offers.Add(o)
	err = storage.Walk(walker1)
	if err != nil {
		t.Fatalf("received impossible error %v", err)
	}
	if walked != 1 {
		t.Fatalf("walk count %d", walked)
	}
	if acceptedOfferId != "foo" {
		t.Fatalf("found offer %v", acceptedOfferId)
	}

	acceptedOfferId = ""
	err = storage.Walk(walker1)
	if err != nil {
		t.Fatalf("received impossible error %v", err)
	}
	if walked != 2 {
		t.Fatalf("walk count %d", walked)
	}
	if acceptedOfferId != "" {
		t.Fatalf("found offer %v", acceptedOfferId)
	}

	walker2 := func(p Perishable) (bool, error) {
		walked++
		return true, nil
	}
	err = storage.Walk(walker2)
	if err != nil {
		t.Fatalf("received impossible error %v", err)
	}
	if walked != 3 {
		t.Fatalf("walk count %d", walked)
	}
	if acceptedOfferId != "" {
		t.Fatalf("found offer %v", acceptedOfferId)
	}

	walker3 := func(p Perishable) (bool, error) {
		walked++
		return true, errors.New("baz")
	}
	err = storage.Walk(walker3)
	if err == nil {
		t.Fatal("expected error")
	}
	if walked != 4 {
		t.Fatalf("walk count %d", walked)
	}
}
예제 #28
0
func TestOfferStorage(t *testing.T) {
	ttl := time.Second / 4
	var declinedNum int32
	getDeclinedNum := func() int32 { return atomic.LoadInt32(&declinedNum) }
	config := RegistryConfig{
		DeclineOffer: func(offerId string) <-chan error {
			atomic.AddInt32(&declinedNum, 1)
			return proc.ErrorChan(nil)
		},
		Compat: func(o *mesos.Offer) bool {
			return o.Hostname == nil || *o.Hostname != "incompatiblehost"
		},
		TTL:       ttl,
		LingerTTL: 2 * ttl,
	}
	storage := CreateRegistry(config)

	done := make(chan struct{})
	storage.Init(done)

	// Add offer
	id := util.NewOfferID("foo")
	o := &mesos.Offer{Id: id}
	storage.Add([]*mesos.Offer{o})

	// Added offer should be in the storage
	if obj, ok := storage.Get(id.GetValue()); obj == nil || !ok {
		t.Error("offer not added")
	}
	if obj, _ := storage.Get(id.GetValue()); obj.Details() != o {
		t.Error("added offer differs from returned offer")
	}

	// Not-added offer is not in storage
	if obj, ok := storage.Get("bar"); obj != nil || ok {
		t.Error("offer bar should not exist in storage")
	}

	// Deleted offer lingers in storage, is acquired and declined
	offer, _ := storage.Get(id.GetValue())
	declinedNumBefore := getDeclinedNum()
	storage.Delete(id.GetValue(), "deleted for test")
	if obj, _ := storage.Get(id.GetValue()); obj == nil {
		t.Error("deleted offer is not lingering")
	}
	if obj, _ := storage.Get(id.GetValue()); !obj.HasExpired() {
		t.Error("deleted offer is no expired")
	}
	if ok := offer.Acquire(); ok {
		t.Error("deleted offer can be acquired")
	}
	if getDeclinedNum() <= declinedNumBefore {
		t.Error("deleted offer was not declined")
	}

	// Acquired offer is only declined after 2*ttl
	id = util.NewOfferID("foo2")
	o = &mesos.Offer{Id: id}
	storage.Add([]*mesos.Offer{o})
	offer, _ = storage.Get(id.GetValue())
	declinedNumBefore = getDeclinedNum()
	offer.Acquire()
	storage.Delete(id.GetValue(), "deleted for test")
	if getDeclinedNum() > declinedNumBefore {
		t.Error("acquired offer is declined")
	}

	offer.Release()
	time.Sleep(3 * ttl)
	if getDeclinedNum() <= declinedNumBefore {
		t.Error("released offer is not declined after 2*ttl")
	}

	// Added offer should be expired after ttl, but lingering
	id = util.NewOfferID("foo3")
	o = &mesos.Offer{Id: id}
	storage.Add([]*mesos.Offer{o})

	time.Sleep(2 * ttl)
	obj, ok := storage.Get(id.GetValue())
	if obj == nil || !ok {
		t.Error("offer not lingering after ttl")
	}
	if !obj.HasExpired() {
		t.Error("offer is not expired after ttl")
	}

	// Should be deleted when waiting longer than LingerTTL
	time.Sleep(2 * ttl)
	if obj, ok := storage.Get(id.GetValue()); obj != nil || ok {
		t.Error("offer not deleted after LingerTTL")
	}

	// Incompatible offer is declined
	id = util.NewOfferID("foo4")
	incompatibleHostname := "incompatiblehost"
	o = &mesos.Offer{Id: id, Hostname: &incompatibleHostname}
	declinedNumBefore = getDeclinedNum()
	storage.Add([]*mesos.Offer{o})
	if obj, ok := storage.Get(id.GetValue()); obj != nil || ok {
		t.Error("incompatible offer not rejected")
	}
	if getDeclinedNum() <= declinedNumBefore {
		t.Error("incompatible offer is not declined")
	}

	// Invalidated offer are not declined, but expired
	id = util.NewOfferID("foo5")
	o = &mesos.Offer{Id: id}
	storage.Add([]*mesos.Offer{o})
	offer, _ = storage.Get(id.GetValue())
	declinedNumBefore = getDeclinedNum()
	storage.Invalidate(id.GetValue())
	if obj, _ := storage.Get(id.GetValue()); !obj.HasExpired() {
		t.Error("invalidated offer is not expired")
	}
	if getDeclinedNum() > declinedNumBefore {
		t.Error("invalidated offer is declined")
	}
	if ok := offer.Acquire(); ok {
		t.Error("invalidated offer can be acquired")
	}

	// Invalidate "" will invalidate all offers
	id = util.NewOfferID("foo6")
	o = &mesos.Offer{Id: id}
	storage.Add([]*mesos.Offer{o})
	id2 := util.NewOfferID("foo7")
	o2 := &mesos.Offer{Id: id2}
	storage.Add([]*mesos.Offer{o2})
	storage.Invalidate("")
	if obj, _ := storage.Get(id.GetValue()); !obj.HasExpired() {
		t.Error("invalidated offer is not expired")
	}
	if obj2, _ := storage.Get(id2.GetValue()); !obj2.HasExpired() {
		t.Error("invalidated offer is not expired")
	}

	// InvalidateForSlave invalides all offers for that slave, but only those
	id = util.NewOfferID("foo8")
	slaveId := util.NewSlaveID("test-slave")
	o = &mesos.Offer{Id: id, SlaveId: slaveId}
	storage.Add([]*mesos.Offer{o})
	id2 = util.NewOfferID("foo9")
	o2 = &mesos.Offer{Id: id2}
	storage.Add([]*mesos.Offer{o2})
	storage.InvalidateForSlave(slaveId.GetValue())
	if obj, _ := storage.Get(id.GetValue()); !obj.HasExpired() {
		t.Error("invalidated offer for test-slave is not expired")
	}
	if obj2, _ := storage.Get(id2.GetValue()); obj2.HasExpired() {
		t.Error("invalidated offer another slave is expired")
	}

	close(done)
} // TestOfferStorage
예제 #29
0
// New creates a new Framework
func New(config Config) Framework {
	var k *framework
	k = &framework{
		schedulerConfig:   &config.SchedulerConfig,
		RWMutex:           new(sync.RWMutex),
		client:            config.Client,
		failoverTimeout:   config.FailoverTimeout,
		reconcileInterval: config.ReconcileInterval,
		nodeRegistrator:   node.NewRegistrator(config.Client, config.LookupNode),
		executorId:        config.ExecutorId,
		offers: offers.CreateRegistry(offers.RegistryConfig{
			Compat: func(o *mesos.Offer) bool {
				// the node must be registered and have up-to-date labels
				n := config.LookupNode(o.GetHostname())
				if n == nil || !node.IsUpToDate(n, node.SlaveAttributesToLabels(o.GetAttributes())) {
					return false
				}

				eids := len(o.GetExecutorIds())
				switch {
				case eids > 1:
					// at most one executor id expected. More than one means that
					// the given node is seriously in trouble.
					return false

				case eids == 1:
					// the executor id must match, otherwise the running executor
					// is incompatible with the current scheduler configuration.
					if eid := o.GetExecutorIds()[0]; eid.GetValue() != config.ExecutorId.GetValue() {
						return false
					}
				}

				return true
			},
			DeclineOffer: func(id string) <-chan error {
				errOnce := proc.NewErrorOnce(k.terminate)
				errOuter := k.asRegisteredMaster.Do(func() {
					var err error
					defer errOnce.Report(err)
					offerId := mutil.NewOfferID(id)
					filters := &mesos.Filters{}
					_, err = k.driver.DeclineOffer(offerId, filters)
				})
				return errOnce.Send(errOuter).Err()
			},
			// remember expired offers so that we can tell if a previously scheduler offer relies on one
			LingerTTL:     config.SchedulerConfig.OfferLingerTTL.Duration,
			TTL:           config.SchedulerConfig.OfferTTL.Duration,
			ListenerDelay: config.SchedulerConfig.ListenerDelay.Duration,
		}),
		slaveHostNames:    newSlaveRegistry(),
		reconcileCooldown: config.ReconcileCooldown,
		registration:      make(chan struct{}),
		asRegisteredMaster: proc.DoerFunc(func(proc.Action) <-chan error {
			return proc.ErrorChanf("cannot execute action with unregistered scheduler")
		}),
		storeFrameworkId: config.StoreFrameworkId,
		lookupNode:       config.LookupNode,
	}
	return k
}
func (suite *SchedulerTestSuite) TestSchdulerDriverLaunchTasksWithError() {
	sched := NewMockScheduler()
	sched.On("StatusUpdate").Return(nil)
	sched.On("Error").Return()

	msgr := messenger.NewMockedMessenger()
	msgr.On("Start").Return(nil)
	msgr.On("Send").Return(nil)
	msgr.On("UPID").Return(&upid.UPID{})
	msgr.On("Stop").Return(nil)
	msgr.On("Route").Return(nil)

	driver := newTestSchedulerDriver(suite.T(), sched, suite.framework, suite.master, nil)
	driver.messenger = msgr
	suite.True(driver.Stopped())

	go func() {
		driver.Run()
	}()
	time.Sleep(time.Millisecond * 1)
	driver.setConnected(true) // simulated
	suite.False(driver.Stopped())
	suite.Equal(mesos.Status_DRIVER_RUNNING, driver.Status())

	// to trigger error
	msgr2 := messenger.NewMockedMessenger()
	msgr2.On("Start").Return(nil)
	msgr2.On("UPID").Return(&upid.UPID{})
	msgr2.On("Send").Return(fmt.Errorf("Unable to send message"))
	msgr2.On("Stop").Return(nil)
	msgr.On("Route").Return(nil)
	driver.messenger = msgr2

	// setup an offer
	offer := util.NewOffer(
		util.NewOfferID("test-offer-001"),
		suite.framework.Id,
		util.NewSlaveID("test-slave-001"),
		"test-slave(1)@localhost:5050",
	)

	pid, err := upid.Parse("test-slave(1)@localhost:5050")
	suite.NoError(err)
	driver.cache.putOffer(offer, pid)

	// launch task
	task := util.NewTaskInfo(
		"simple-task",
		util.NewTaskID("simpe-task-1"),
		util.NewSlaveID("test-slave-001"),
		[]*mesos.Resource{util.NewScalarResource("mem", 400)},
	)
	task.Command = util.NewCommandInfo("pwd")
	task.Executor = util.NewExecutorInfo(util.NewExecutorID("test-exec"), task.Command)
	tasks := []*mesos.TaskInfo{task}

	stat, err := driver.LaunchTasks(
		[]*mesos.OfferID{offer.Id},
		tasks,
		&mesos.Filters{},
	)
	suite.Error(err)
	suite.Equal(mesos.Status_DRIVER_RUNNING, stat)

}