func TestProc_manyEndings(t *testing.T) { p := New() const COUNT = 20 var wg sync.WaitGroup wg.Add(COUNT) for i := 0; i < COUNT; i++ { runtime.On(p.End(), wg.Done) } fatalAfter(t, runtime.After(wg.Wait), 5*time.Second, "timed out waiting for loose End()s") fatalAfter(t, p.Done(), 5*time.Second, "timed out waiting for process death") }
func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *http.ServeMux, podsWatcher *cache.ListWatch) *PluginConfig { // Watch and queue pods that need scheduling. updates := make(chan queue.Entry, k.schedcfg.UpdatesBacklog) podUpdates := &podStoreAdapter{queue.NewHistorical(updates)} reflector := cache.NewReflector(podsWatcher, &api.Pod{}, podUpdates, 0) // lock that guards critial sections that involve transferring pods from // the store (cache) to the scheduling queue; its purpose is to maintain // an ordering (vs interleaving) of operations that's easier to reason about. kapi := &k8smScheduler{internal: k} q := newQueuer(podUpdates) podDeleter := &deleter{ api: kapi, qr: q, } eh := &errorHandler{ api: kapi, backoff: backoff.New(k.schedcfg.InitialPodBackoff.Duration, k.schedcfg.MaxPodBackoff.Duration), qr: q, } startLatch := make(chan struct{}) eventBroadcaster := record.NewBroadcaster() runtime.On(startLatch, func() { eventBroadcaster.StartRecordingToSink(k.client.Events("")) reflector.Run() // TODO(jdef) should listen for termination podDeleter.Run(updates, terminate) q.Run(terminate) q.installDebugHandlers(mux) podtask.InstallDebugHandlers(k.taskRegistry, mux) }) return &PluginConfig{ Config: &plugin.Config{ MinionLister: nil, Algorithm: &kubeScheduler{ api: kapi, podUpdates: podUpdates, defaultContainerCPULimit: k.defaultContainerCPULimit, defaultContainerMemLimit: k.defaultContainerMemLimit, }, Binder: &binder{api: kapi}, NextPod: q.yield, Error: eh.handleSchedulingError, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}), }, api: kapi, client: k.client, qr: q, deleter: podDeleter, starting: startLatch, } }
func New(sched bindings.Scheduler) *SchedulerProcess { p := &SchedulerProcess{ Process: proc.New(), Scheduler: sched, stage: initStage, elected: make(chan struct{}), failover: make(chan struct{}), standby: make(chan struct{}), fin: make(chan struct{}), } runtime.On(p.Running(), p.begin) return p }
// watch the scheduler process for failover signals and properly handle such. may never return. func (s *SchedulerServer) awaitFailover(schedulerProcess schedulerProcessInterface, handler func() error) error { // we only want to return the first error (if any), everyone else can block forever errCh := make(chan error, 1) doFailover := func() error { // we really don't expect handler to return, if it does something went seriously wrong err := handler() if err != nil { defer schedulerProcess.End() err = fmt.Errorf("failover failed, scheduler will terminate: %v", err) } return err } // guard for failover signal processing, first signal processor wins failoverLatch := &runtime.Latch{} runtime.On(schedulerProcess.Terminal(), func() { if !failoverLatch.Acquire() { log.V(1).Infof("scheduler process ending, already failing over") select {} } var err error defer func() { errCh <- err }() select { case <-schedulerProcess.Failover(): err = doFailover() default: if s.HA { err = fmt.Errorf("ha scheduler exiting instead of failing over") } else { log.Infof("exiting scheduler") } } }) runtime.OnOSSignal(makeFailoverSigChan(), func(_ os.Signal) { if !failoverLatch.Acquire() { log.V(1).Infof("scheduler process signalled, already failing over") select {} } errCh <- doFailover() }) return <-errCh }
func (s *SchedulerServer) bootstrap(hks hyperkube.Interface, sc *schedcfg.Config) (*ha.SchedulerProcess, ha.DriverFactory, tools.EtcdGetSet, *uid.UID) { s.FrameworkName = strings.TrimSpace(s.FrameworkName) if s.FrameworkName == "" { log.Fatalf("framework-name must be a non-empty string") } s.FrameworkWebURI = strings.TrimSpace(s.FrameworkWebURI) metrics.Register() runtime.Register() s.mux.Handle("/metrics", prometheus.Handler()) if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) { log.Fatalf("specify either --etcd-servers or --etcd-config") } if len(s.APIServerList) < 1 { log.Fatal("No api servers specified.") } client, err := s.createAPIServerClient() if err != nil { log.Fatalf("Unable to make apiserver client: %v", err) } s.client = client if s.ReconcileCooldown < defaultReconcileCooldown { s.ReconcileCooldown = defaultReconcileCooldown log.Warningf("user-specified reconcile cooldown too small, defaulting to %v", s.ReconcileCooldown) } executor, eid, err := s.prepareExecutorInfo(hks) if err != nil { log.Fatalf("misconfigured executor: %v", err) } // TODO(jdef): remove the dependency on etcd as soon as // (1) the generic config store is available for the FrameworkId storage // (2) the generic master election is provided by the apiserver // Compare docs/proposals/high-availability.md etcdClient, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList) if err != nil { log.Fatalf("misconfigured etcd: %v", err) } mesosPodScheduler := scheduler.New(scheduler.Config{ Schedcfg: *sc, Executor: executor, ScheduleFunc: scheduler.FCFSScheduleFunc, Client: client, EtcdClient: etcdClient, FailoverTimeout: s.FailoverTimeout, ReconcileInterval: s.ReconcileInterval, ReconcileCooldown: s.ReconcileCooldown, }) masterUri := s.MesosMaster info, cred, err := s.buildFrameworkInfo() if err != nil { log.Fatalf("Misconfigured mesos framework: %v", err) } schedulerProcess := ha.New(mesosPodScheduler) dconfig := &bindings.DriverConfig{ Scheduler: schedulerProcess, Framework: info, Master: masterUri, Credential: cred, BindingAddress: net.IP(s.Address), BindingPort: uint16(s.DriverPort), HostnameOverride: s.HostnameOverride, WithAuthContext: func(ctx context.Context) context.Context { ctx = auth.WithLoginProvider(ctx, s.MesosAuthProvider) ctx = sasl.WithBindingAddress(ctx, net.IP(s.Address)) return ctx }, } kpl := scheduler.NewPlugin(mesosPodScheduler.NewDefaultPluginConfig(schedulerProcess.Terminal(), s.mux)) runtime.On(mesosPodScheduler.Registration(), func() { kpl.Run(schedulerProcess.Terminal()) }) runtime.On(mesosPodScheduler.Registration(), s.newServiceWriter(schedulerProcess.Terminal())) driverFactory := ha.DriverFactory(func() (drv bindings.SchedulerDriver, err error) { log.V(1).Infoln("performing deferred initialization") if err = mesosPodScheduler.Init(schedulerProcess.Master(), kpl, s.mux); err != nil { return nil, fmt.Errorf("failed to initialize pod scheduler: %v", err) } log.V(1).Infoln("deferred init complete") // defer obtaining framework ID to prevent multiple schedulers // from overwriting each other's framework IDs dconfig.Framework.Id, err = s.fetchFrameworkID(etcdClient) if err != nil { return nil, fmt.Errorf("failed to fetch framework ID from etcd: %v", err) } log.V(1).Infoln("constructing mesos scheduler driver") drv, err = bindings.NewMesosSchedulerDriver(*dconfig) if err != nil { return nil, fmt.Errorf("failed to construct scheduler driver: %v", err) } log.V(1).Infoln("constructed mesos scheduler driver:", drv) s.setDriver(drv) return drv, nil }) return schedulerProcess, driverFactory, etcdClient, eid }