// New creates a new KubernetesScheduler func New(config Config) *KubernetesScheduler { var k *KubernetesScheduler k = &KubernetesScheduler{ schedcfg: &config.Schedcfg, RWMutex: new(sync.RWMutex), executor: config.Executor, executorGroup: uid.Parse(config.Executor.ExecutorId.GetValue()).Group(), PodScheduler: config.Scheduler, client: config.Client, etcdClient: config.EtcdClient, failoverTimeout: config.FailoverTimeout, reconcileInterval: config.ReconcileInterval, nodeRegistrator: node.NewRegistrator(config.Client, config.LookupNode), offers: offers.CreateRegistry(offers.RegistryConfig{ Compat: func(o *mesos.Offer) bool { // the node must be registered and have up-to-date labels n := config.LookupNode(o.GetHostname()) if n == nil || !node.IsUpToDate(n, node.SlaveAttributesToLabels(o.GetAttributes())) { return false } // the executor IDs must not identify a kubelet-executor with a group that doesn't match ours for _, eid := range o.GetExecutorIds() { execuid := uid.Parse(eid.GetValue()) if execuid.Name() == execcfg.DefaultInfoID && execuid.Group() != k.executorGroup { return false } } return true }, DeclineOffer: func(id string) <-chan error { errOnce := proc.NewErrorOnce(k.terminate) errOuter := k.asRegisteredMaster.Do(func() { var err error defer errOnce.Report(err) offerId := mutil.NewOfferID(id) filters := &mesos.Filters{} _, err = k.driver.DeclineOffer(offerId, filters) }) return errOnce.Send(errOuter).Err() }, // remember expired offers so that we can tell if a previously scheduler offer relies on one LingerTTL: config.Schedcfg.OfferLingerTTL.Duration, TTL: config.Schedcfg.OfferTTL.Duration, ListenerDelay: config.Schedcfg.ListenerDelay.Duration, }), slaveHostNames: slave.NewRegistry(), taskRegistry: podtask.NewInMemoryRegistry(), reconcileCooldown: config.ReconcileCooldown, registration: make(chan struct{}), asRegisteredMaster: proc.DoerFunc(func(proc.Action) <-chan error { return proc.ErrorChanf("cannot execute action with unregistered scheduler") }), } return k }
func New(c *config.Config, fw framework.Framework, ps podschedulers.PodScheduler, client *client.Client, recorder record.EventRecorder, terminate <-chan struct{}, mux *http.ServeMux, lw *cache.ListWatch) scheduler.Scheduler { core := &sched{ framework: fw, taskRegistry: podtask.NewInMemoryRegistry(), } // Watch and queue pods that need scheduling. podUpdatesBypass := make(chan queue.Entry, c.UpdatesBacklog) podUpdates := &podStoreAdapter{queue.NewHistorical(podUpdatesBypass)} reflector := cache.NewReflector(lw, &api.Pod{}, podUpdates, 0) q := queuer.New(queue.NewDelayFIFO(), podUpdates) algorithm := algorithm.New(core, podUpdates, ps) podDeleter := deleter.New(core, q) core.podReconciler = podreconciler.New(core, client, q, podDeleter) bo := backoff.New(c.InitialPodBackoff.Duration, c.MaxPodBackoff.Duration) newBC := func(podKey string) queue.BreakChan { return queue.BreakChan(core.Offers().Listen(podKey, func(offer *mesos.Offer) bool { core.Lock() defer core.Unlock() switch task, state := core.Tasks().ForPod(podKey); state { case podtask.StatePending: // Assess fitness of pod with the current offer. The scheduler normally // "backs off" when it can't find an offer that matches up with a pod. // The backoff period for a pod can terminate sooner if an offer becomes // available that matches up. return !task.Has(podtask.Launched) && ps.FitPredicate()(task, offer, nil) default: // no point in continuing to check for matching offers return true } })) } errorHandler := errorhandler.New(core, bo, q, newBC) binder := binder.New(core) startLatch := make(chan struct{}) runtime.On(startLatch, func() { reflector.Run() // TODO(jdef) should listen for termination podDeleter.Run(podUpdatesBypass, terminate) q.Run(terminate) q.InstallDebugHandlers(mux) podtask.InstallDebugHandlers(core.Tasks(), mux) }) core.controller = controller.New(client, algorithm, recorder, q.Yield, errorHandler.Error, binder, startLatch) return core }
func TestDeleteOne_Running(t *testing.T) { assert := assert.New(t) obj := &types.MockScheduler{} reg := podtask.NewInMemoryRegistry() obj.On("Tasks").Return(reg) pod := &queuer.Pod{Pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", UID: "foo0", Namespace: api.NamespaceDefault, }}} task, err := podtask.New( api.NewDefaultContext(), podtask.Config{ ID: "bar", Prototype: &mesosproto.ExecutorInfo{}, HostPortStrategy: hostport.StrategyWildcard, }, pod.Pod, ) if err != nil { t.Fatalf("unexpected error: %v", err) } task, err = reg.Register(task) if err != nil { t.Fatalf("unexpected error: %v", err) } task.Set(podtask.Launched) err = reg.Update(task) if err != nil { t.Fatalf("unexpected error: %v", err) } // preconditions q := queue.NewDelayFIFO() qr := queuer.New(q, nil) q.Add(pod, queue.ReplaceExisting) assert.Equal(1, len(q.List())) _, found := q.Get("default/foo") assert.True(found) obj.On("KillTask", task.ID).Return(nil) // exec & post conditions d := New(obj, qr) err = d.DeleteOne(pod) assert.Nil(err) _, found = q.Get("foo0") assert.False(found) assert.Equal(0, len(q.List())) obj.AssertExpectations(t) }
func TestDeleteOne_Running(t *testing.T) { assert := assert.New(t) obj := &MockScheduler{} reg := podtask.NewInMemoryRegistry() obj.On("tasks").Return(reg) pod := &Pod{Pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", UID: "foo0", Namespace: api.NamespaceDefault, }}} task, err := podtask.New(api.NewDefaultContext(), "bar", *pod.Pod, &mesos.ExecutorInfo{}) if err != nil { t.Fatalf("unexpected error: %v", err) } task, err = reg.Register(task) if err != nil { t.Fatalf("unexpected error: %v", err) } task.Set(podtask.Launched) err = reg.Update(task) if err != nil { t.Fatalf("unexpected error: %v", err) } // preconditions qr := newQueuer(nil) qr.podQueue.Add(pod, queue.ReplaceExisting) assert.Equal(1, len(qr.podQueue.List())) _, found := qr.podQueue.Get("default/foo") assert.True(found) obj.On("killTask", task.ID).Return(nil) // exec & post conditions d := &deleter{ api: obj, qr: qr, } err = d.deleteOne(pod) assert.Nil(err) _, found = qr.podQueue.Get("foo0") assert.False(found) assert.Equal(0, len(qr.podQueue.List())) obj.AssertExpectations(t) }
func TestDeleteOne_PendingPod(t *testing.T) { assert := assert.New(t) obj := &types.MockScheduler{} reg := podtask.NewInMemoryRegistry() obj.On("Tasks").Return(reg) pod := &queuer.Pod{Pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", UID: "foo0", Namespace: api.NamespaceDefault, }}} task, err := podtask.New( api.NewDefaultContext(), "bar", pod.Pod, &mesosproto.ExecutorInfo{}, nil, nil, ) if err != nil { t.Fatalf("failed to create task: %v", err) } _, err = reg.Register(task) if err != nil { t.Fatalf("failed to register task: %v", err) } // preconditions q := queue.NewDelayFIFO() qr := queuer.New(q, nil) q.Add(pod, queue.ReplaceExisting) assert.Equal(1, len(q.List())) _, found := q.Get("default/foo") assert.True(found) // exec & post conditions d := New(obj, qr) err = d.DeleteOne(pod) assert.Nil(err) _, found = q.Get("foo0") assert.False(found) assert.Equal(0, len(q.List())) obj.AssertExpectations(t) }
func TestDeleteOne_NonexistentPod(t *testing.T) { assert := assert.New(t) obj := &types.MockScheduler{} reg := podtask.NewInMemoryRegistry() obj.On("Tasks").Return(reg) q := queue.NewDelayFIFO() qr := queuer.New(q, nil) assert.Equal(0, len(q.List())) d := New(obj, qr) pod := &queuer.Pod{Pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }}} err := d.DeleteOne(pod) assert.Equal(err, errors.NoSuchPodErr) obj.AssertExpectations(t) }
//test we can handle different status updates, TODO check state transitions func TestStatus_Update(t *testing.T) { mockdriver := MockSchedulerDriver{} // setup expectations mockdriver.On("KillTask", util.NewTaskID("test-task-001")).Return(mesos.Status_DRIVER_RUNNING, nil) testScheduler := &KubernetesScheduler{ offers: offers.CreateRegistry(offers.RegistryConfig{ Compat: func(o *mesos.Offer) bool { return true }, // remember expired offers so that we can tell if a previously scheduler offer relies on one LingerTTL: schedcfg.DefaultOfferLingerTTL, TTL: schedcfg.DefaultOfferTTL, ListenerDelay: schedcfg.DefaultListenerDelay, }), slaveHostNames: slave.NewRegistry(), driver: &mockdriver, taskRegistry: podtask.NewInMemoryRegistry(), } taskStatus_task_starting := util.NewTaskStatus( util.NewTaskID("test-task-001"), mesos.TaskState_TASK_RUNNING, ) testScheduler.StatusUpdate(testScheduler.driver, taskStatus_task_starting) taskStatus_task_running := util.NewTaskStatus( util.NewTaskID("test-task-001"), mesos.TaskState_TASK_RUNNING, ) testScheduler.StatusUpdate(testScheduler.driver, taskStatus_task_running) taskStatus_task_failed := util.NewTaskStatus( util.NewTaskID("test-task-001"), mesos.TaskState_TASK_FAILED, ) testScheduler.StatusUpdate(testScheduler.driver, taskStatus_task_failed) //assert that mock was invoked mockdriver.AssertExpectations(t) }
func TestDeleteOne_NonexistentPod(t *testing.T) { assert := assert.New(t) obj := &MockScheduler{} reg := podtask.NewInMemoryRegistry() obj.On("tasks").Return(reg) qr := newQueuer(nil) assert.Equal(0, len(qr.podQueue.List())) d := &deleter{ api: obj, qr: qr, } pod := &Pod{Pod: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }}} err := d.deleteOne(pod) assert.Equal(err, noSuchPodErr) obj.AssertExpectations(t) }
func mockScheduler() scheduler.Scheduler { mockScheduler := &scheduler.MockScheduler{} reg := podtask.NewInMemoryRegistry() mockScheduler.On("Tasks").Return(reg) return mockScheduler }