func TestReflectorForWatchCache(t *testing.T) { store := newWatchCache(5) { _, version := store.ListWithVersion() if version != 0 { t.Errorf("unexpected resource version: %d", version) } } lw := &testLW{ WatchFunc: func(rv string) (watch.Interface, error) { fw := watch.NewFake() go fw.Stop() return fw, nil }, ListFunc: func() (runtime.Object, error) { return &api.PodList{ListMeta: unversioned.ListMeta{ResourceVersion: "10"}}, nil }, } r := cache.NewReflector(lw, &api.Pod{}, store, 0) r.ListAndWatch(util.NeverStop) { _, version := store.ListWithVersion() if version != 10 { t.Errorf("unexpected resource version: %d", version) } } }
// newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver. func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) { send := func(objs []interface{}) { var pods []*api.Pod for _, o := range objs { pods = append(pods, o.(*api.Pod)) } updates <- kubelet.PodUpdate{Pods: pods, Op: kubelet.SET, Source: kubelet.ApiserverSource} } cache.NewReflector(lw, &api.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run() }
func newEndpointsSourceApiFromLW(endpointsLW cache.ListerWatcher, period time.Duration, endpointsChan chan<- EndpointsUpdate) { endpointsPush := func(objs []interface{}) { var endpoints []api.Endpoints for _, o := range objs { endpoints = append(endpoints, *(o.(*api.Endpoints))) } endpointsChan <- EndpointsUpdate{Op: SET, Endpoints: endpoints} } endpointQueue := cache.NewUndeltaStore(endpointsPush, cache.MetaNamespaceKeyFunc) cache.NewReflector(endpointsLW, &api.Endpoints{}, endpointQueue, period).Run() }
func newServicesSourceApiFromLW(servicesLW cache.ListerWatcher, period time.Duration, servicesChan chan<- ServiceUpdate) { servicesPush := func(objs []interface{}) { var services []api.Service for _, o := range objs { services = append(services, *(o.(*api.Service))) } servicesChan <- ServiceUpdate{Op: SET, Services: services} } serviceQueue := cache.NewUndeltaStore(servicesPush, cache.MetaNamespaceKeyFunc) cache.NewReflector(servicesLW, &api.Service{}, serviceQueue, period).Run() }
func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *http.ServeMux, podsWatcher *cache.ListWatch) *PluginConfig { // Watch and queue pods that need scheduling. updates := make(chan queue.Entry, k.schedcfg.UpdatesBacklog) podUpdates := &podStoreAdapter{queue.NewHistorical(updates)} reflector := cache.NewReflector(podsWatcher, &api.Pod{}, podUpdates, 0) // lock that guards critial sections that involve transferring pods from // the store (cache) to the scheduling queue; its purpose is to maintain // an ordering (vs interleaving) of operations that's easier to reason about. kapi := &k8smScheduler{internal: k} q := newQueuer(podUpdates) podDeleter := &deleter{ api: kapi, qr: q, } eh := &errorHandler{ api: kapi, backoff: backoff.New(k.schedcfg.InitialPodBackoff.Duration, k.schedcfg.MaxPodBackoff.Duration), qr: q, } startLatch := make(chan struct{}) eventBroadcaster := record.NewBroadcaster() runtime.On(startLatch, func() { eventBroadcaster.StartRecordingToSink(k.client.Events("")) reflector.Run() // TODO(jdef) should listen for termination podDeleter.Run(updates, terminate) q.Run(terminate) q.installDebugHandlers(mux) podtask.InstallDebugHandlers(k.taskRegistry, mux) }) return &PluginConfig{ Config: &plugin.Config{ NodeLister: nil, Algorithm: &kubeScheduler{ api: kapi, podUpdates: podUpdates, }, Binder: &binder{api: kapi}, NextPod: q.yield, Error: eh.handleSchedulingError, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}), }, api: kapi, client: k.client, qr: q, deleter: podDeleter, starting: startLatch, } }
// Run starts a background goroutine that watches for changes to services that // have (or had) externalLoadBalancers=true and ensures that they have external // load balancers created and deleted appropriately. // serviceSyncPeriod controls how often we check the cluster's services to // ensure that the correct external load balancers exist. // nodeSyncPeriod controls how often we check the cluster's nodes to determine // if external load balancers need to be updated to point to a new set. // // It's an error to call Run() more than once for a given ServiceController // object. func (s *ServiceController) Run(serviceSyncPeriod, nodeSyncPeriod time.Duration) error { if err := s.init(); err != nil { return err } // We have to make this check beecause the ListWatch that we use in // WatchServices requires Client functions that aren't in the interface // for some reason. if _, ok := s.kubeClient.(*client.Client); !ok { return fmt.Errorf("ServiceController only works with real Client objects, but was passed something else satisfying the client Interface.") } // Get the currently existing set of services and then all future creates // and updates of services. // A delta compressor is needed for the DeltaFIFO queue because we only ever // care about the most recent state. serviceQueue := cache.NewDeltaFIFO( cache.MetaNamespaceKeyFunc, cache.DeltaCompressorFunc(func(d cache.Deltas) cache.Deltas { if len(d) == 0 { return d } return cache.Deltas{*d.Newest()} }), s.cache, ) lw := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "services", api.NamespaceAll, fields.Everything()) cache.NewReflector(lw, &api.Service{}, serviceQueue, serviceSyncPeriod).Run() for i := 0; i < workerGoroutines; i++ { go s.watchServices(serviceQueue) } nodeLW := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "nodes", api.NamespaceAll, fields.Everything()) cache.NewReflector(nodeLW, &api.Node{}, s.nodeLister.Store, 0).Run() go s.nodeSyncLoop(nodeSyncPeriod) return nil }
// Run begins processing items, and will continue until a value is sent down stopCh. // It's an error to call Run more than once. // Run blocks; call via go. func (c *Controller) Run(stopCh <-chan struct{}) { defer util.HandleCrash() r := cache.NewReflector( c.config.ListerWatcher, c.config.ObjectType, c.config.Queue, c.config.FullResyncPeriod, ) c.reflectorMutex.Lock() c.reflector = r c.reflectorMutex.Unlock() r.RunUntil(stopCh) util.Until(c.processLoop, time.Second, stopCh) }
// NewProvision creates a new namespace provision admission control handler func NewProvision(c client.Interface) admission.Interface { store := cache.NewStore(cache.MetaNamespaceKeyFunc) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, store, 0, ) reflector.Run() return createProvision(c, store) }
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used // to close the reflector backing the watch. The caller is responsible for derring a close on the channel to // stop the reflector. func (c *realRecyclerClient) WatchPod(name, namespace, resourceVersion string, stopChannel chan struct{}) func() *api.Pod { fieldSelector, _ := fields.ParseSelector("metadata.name=" + name) podLW := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.client.Pods(namespace).List(labels.Everything(), fieldSelector) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.client.Pods(namespace).Watch(labels.Everything(), fieldSelector, resourceVersion) }, } queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(podLW, &api.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel) return func() *api.Pod { obj := queue.Pop() return obj.(*api.Pod) } }
// NewExists creates a new namespace exists admission control handler func NewExists(c client.Interface) admission.Interface { store := cache.NewStore(cache.MetaNamespaceKeyFunc) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, store, 5*time.Minute, ) reflector.Run() return &exists{ client: c, store: store, Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete), } }
// Create a new Cacher responsible from service WATCH and LIST requests from its // internal cache and updating its cache in the background based on the given // configuration. func NewCacher(config CacherConfig) *Cacher { watchCache := newWatchCache(config.CacheCapacity) listerWatcher := newCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) cacher := &Cacher{ usable: sync.RWMutex{}, storage: config.Storage, watchCache: watchCache, reflector: cache.NewReflector(listerWatcher, config.Type, watchCache, 0), watcherIdx: 0, watchers: make(map[int]*cacheWatcher), versioner: config.Versioner, keyFunc: config.KeyFunc, } cacher.usable.Lock() // See startCaching method for why explanation on it. watchCache.SetOnReplace(func() { cacher.usable.Unlock() }) watchCache.SetOnEvent(cacher.processEvent) stopCh := config.StopChannel go util.Until(func() { cacher.startCaching(stopCh) }, 0, stopCh) return cacher }
// Creates a scheduler from a set of registered fit predicate keys and priority keys. func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String) (*scheduler.Config, error) { glog.V(2).Infof("creating scheduler with fit predicates '%v' and priority functions '%v", predicateKeys, priorityKeys) pluginArgs := PluginFactoryArgs{ PodLister: f.PodLister, ServiceLister: f.ServiceLister, ControllerLister: f.ControllerLister, // All fit predicates only need to consider schedulable nodes. NodeLister: f.NodeLister.NodeCondition(api.NodeReady, api.ConditionTrue), NodeInfo: f.NodeLister, } predicateFuncs, err := getFitPredicateFunctions(predicateKeys, pluginArgs) if err != nil { return nil, err } priorityConfigs, err := getPriorityFunctionConfigs(priorityKeys, pluginArgs) if err != nil { return nil, err } // Watch and queue pods that need scheduling. cache.NewReflector(f.createUnassignedPodLW(), &api.Pod{}, f.PodQueue, 0).RunUntil(f.StopEverything) // Begin populating scheduled pods. go f.scheduledPodPopulator.Run(f.StopEverything) // Watch nodes. // Nodes may be listed frequently, so provide a local up-to-date cache. cache.NewReflector(f.createNodeLW(), &api.Node{}, f.NodeLister.Store, 0).RunUntil(f.StopEverything) // Watch and cache all service objects. Scheduler needs to find all pods // created by the same services or ReplicationControllers, so that it can spread them correctly. // Cache this locally. cache.NewReflector(f.createServiceLW(), &api.Service{}, f.ServiceLister.Store, 0).RunUntil(f.StopEverything) // Watch and cache all ReplicationController objects. Scheduler needs to find all pods // created by the same services or ReplicationControllers, so that it can spread them correctly. // Cache this locally. cache.NewReflector(f.createControllerLW(), &api.ReplicationController{}, f.ControllerLister.Store, 0).RunUntil(f.StopEverything) r := rand.New(rand.NewSource(time.Now().UnixNano())) algo := scheduler.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r) podBackoff := podBackoff{ perPodBackoff: map[types.NamespacedName]*backoffEntry{}, clock: realClock{}, defaultDuration: 1 * time.Second, maxDuration: 60 * time.Second, } return &scheduler.Config{ Modeler: f.modeler, // The scheduler only needs to consider schedulable nodes. NodeLister: f.NodeLister.NodeCondition(api.NodeReady, api.ConditionTrue), Algorithm: algo, Binder: &binder{f.Client}, NextPod: func() *api.Pod { pod := f.PodQueue.Pop().(*api.Pod) glog.V(2).Infof("About to try and schedule pod %v", pod.Name) return pod }, Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue), BindPodsRateLimiter: f.BindPodsRateLimiter, StopEverything: f.StopEverything, }, nil }