func (s *offerStorage) Init(done <-chan struct{}) { // zero delay, reap offers as soon as they expire go runtime.Until(s.ageOffers, 0, done) // cached offer ids for the purposes of listener notification idCache := &stringsCache{ refill: func() sets.String { result := sets.NewString() for _, v := range s.offers.List() { if offer, ok := v.(Perishable); ok { result.Insert(offer.Id()) } } return result }, ttl: offerIdCacheTTL, } go runtime.Until(func() { s.notifyListeners(idCache.Strings) }, notifyListenersDelay, done) }
// Notify runs Elect() on m, and calls Start()/Stop() on s when the // elected master starts/stops matching 'id'. Never returns. func Notify(m MasterElector, path, id string, s Service, abort <-chan struct{}) { n := ¬ifier{id: Master(id), service: s} n.changed = make(chan struct{}) finished := runtime.After(func() { runtime.Until(func() { for { w := m.Elect(path, id) for { select { case <-abort: return case event, open := <-w.ResultChan(): if !open { break } if event.Type != watch.Modified { continue } electedMaster, ok := event.Object.(Master) if !ok { glog.Errorf("Unexpected object from election channel: %v", event.Object) break } n.lock.Lock() n.desired = electedMaster n.lock.Unlock() // notify serviceLoop, but don't block. If a change // is queued already it will see the new n.desired. select { case n.changed <- struct{}{}: } } } } }, 0, abort) }) runtime.Until(func() { n.serviceLoop(finished) }, 0, abort) }
// perform one-time initialization actions upon the first registration event received from Mesos. func (k *KubernetesScheduler) onInitialRegistration(driver bindings.SchedulerDriver) { defer close(k.registration) if k.failoverTimeout > 0 { refreshInterval := k.schedcfg.FrameworkIdRefreshInterval.Duration if k.failoverTimeout < k.schedcfg.FrameworkIdRefreshInterval.Duration.Seconds() { refreshInterval = time.Duration(math.Max(1, k.failoverTimeout/2)) * time.Second } go runtime.Until(k.storeFrameworkId, refreshInterval, k.terminate) } r1 := k.makeTaskRegistryReconciler() r2 := k.makePodRegistryReconciler() k.reconciler = newReconciler(k.asRegisteredMaster, k.makeCompositeReconciler(r1, r2), k.reconcileCooldown, k.schedcfg.ExplicitReconciliationAbortTimeout.Duration, k.terminate) go k.reconciler.Run(driver) if k.reconcileInterval > 0 { ri := time.Duration(k.reconcileInterval) * time.Second time.AfterFunc(k.schedcfg.InitialImplicitReconciliationDelay.Duration, func() { runtime.Until(k.reconciler.RequestImplicit, ri, k.terminate) }) log.Infof("will perform implicit task reconciliation at interval: %v after %v", ri, k.schedcfg.InitialImplicitReconciliationDelay.Duration) } }
// currently monitors for "pod deleted" events, upon which handle() // is invoked. func (k *deleter) Run(updates <-chan queue.Entry, done <-chan struct{}) { go runtime.Until(func() { for { entry := <-updates pod := entry.Value().(*Pod) if entry.Is(queue.DELETE_EVENT) { if err := k.deleteOne(pod); err != nil { log.Error(err) } } else if !entry.Is(queue.POP_EVENT) { k.qr.updatesAvailable() } } }, 1*time.Second, done) }
// spawns a go-routine to watch for unscheduled pods and queue them up // for scheduling. returns immediately. func (q *queuer) Run(done <-chan struct{}) { go runtime.Until(func() { log.Info("Watching for newly created pods") q.lock.Lock() defer q.lock.Unlock() for { // limit blocking here for short intervals so that scheduling // may proceed even if there have been no recent pod changes p := q.podUpdates.Await(enqueuePopTimeout) if p == nil { signalled := runtime.After(q.deltaCond.Wait) // we've yielded the lock select { case <-time.After(enqueueWaitTimeout): q.deltaCond.Broadcast() // abort Wait() <-signalled // wait for lock re-acquisition log.V(4).Infoln("timed out waiting for a pod update") case <-signalled: // we've acquired the lock and there may be // changes for us to process now } continue } pod := p.(*Pod) if recoverAssignedSlave(pod.Pod) != "" { log.V(3).Infof("dequeuing assigned pod for scheduling: %v", pod.Pod.Name) q.dequeue(pod.GetUID()) } else if pod.InGracefulTermination() { // pods which are pre-scheduled (i.e. NodeName is set) may be gracefully deleted, // even though they are not running yet. log.V(3).Infof("dequeuing graceful deleted pre-scheduled pod for scheduling: %v", pod.Pod.Name) q.dequeue(pod.GetUID()) } else { // use ReplaceExisting because we are always pushing the latest state now := time.Now() pod.deadline = &now if q.podQueue.Offer(pod, queue.ReplaceExisting) { q.unscheduledCond.Broadcast() log.V(3).Infof("queued pod for scheduling: %v", pod.Pod.Name) } else { log.Warningf("failed to queue pod for scheduling: %v", pod.Pod.Name) } } } }, 1*time.Second, done) }
func (s *SchedulerServer) Run(hks hyperkube.Interface, _ []string) error { // get scheduler low-level config sc := schedcfg.CreateDefaultConfig() if s.SchedulerConfigFileName != "" { f, err := os.Open(s.SchedulerConfigFileName) if err != nil { log.Fatalf("Cannot open scheduler config file: %v", err) } err = sc.Read(bufio.NewReader(f)) if err != nil { log.Fatalf("Invalid scheduler config file: %v", err) } } schedulerProcess, driverFactory, etcdClient, eid := s.bootstrap(hks, sc) if s.EnableProfiling { profile.InstallHandler(s.mux) } go runtime.Until(func() { log.V(1).Info("Starting HTTP interface") log.Error(http.ListenAndServe(net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), s.mux)) }, sc.HttpBindInterval.Duration, schedulerProcess.Terminal()) if s.HA { validation := ha.ValidationFunc(validateLeadershipTransition) srv := ha.NewCandidate(schedulerProcess, driverFactory, validation) path := fmt.Sprintf(meta.DefaultElectionFormat, s.FrameworkName) sid := uid.New(eid.Group(), "").String() log.Infof("registering for election at %v with id %v", path, sid) go election.Notify(election.NewEtcdMasterElector(etcdClient), path, sid, srv, nil) } else { log.Infoln("self-electing in non-HA mode") schedulerProcess.Elect(driverFactory) } return s.awaitFailover(schedulerProcess, func() error { return s.failover(s.getDriver(), hks) }) }
func (s *schedulingPlugin) Run(done <-chan struct{}) { defer close(s.starting) go runtime.Until(s.scheduleOne, pluginRecoveryDelay, done) }