func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface) *JobController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) } jm := &JobController{ kubeClient: kubeClient, podControl: controller.RealPodControl{ KubeClient: kubeClient, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}), }, expectations: controller.NewControllerExpectations(), queue: workqueue.New(), recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}), } jm.jobStore.Store, jm.jobController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return jm.kubeClient.Batch().Jobs(api.NamespaceAll).Watch(options) }, }, &batch.Job{}, // TODO: Can we have much longer period here? replicationcontroller.FullControllerResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: jm.enqueueController, UpdateFunc: func(old, cur interface{}) { if job := cur.(*batch.Job); !isJobFinished(job) { jm.enqueueController(job) } }, DeleteFunc: jm.enqueueController, }, ) podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: jm.addPod, UpdateFunc: jm.updatePod, DeleteFunc: jm.deletePod, }) jm.podStore.Indexer = podInformer.GetIndexer() jm.podStoreSynced = podInformer.HasSynced jm.updateHandler = jm.updateJobStatus jm.syncHandler = jm.syncJob return jm }
func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) } dsc := &DaemonSetsController{ kubeClient: kubeClient, eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemonset-controller"}), podControl: controller.RealPodControl{ KubeClient: kubeClient, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemon-set"}), }, burstReplicas: BurstReplicas, expectations: controller.NewControllerExpectations(), queue: workqueue.New(), } // Manage addition/update of daemon sets. dsc.dsStore.Store, dsc.dsController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).Watch(options) }, }, &extensions.DaemonSet{}, // TODO: Can we have much longer period here? FullDaemonSetResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { ds := obj.(*extensions.DaemonSet) glog.V(4).Infof("Adding daemon set %s", ds.Name) dsc.enqueueDaemonSet(ds) }, UpdateFunc: func(old, cur interface{}) { oldDS := old.(*extensions.DaemonSet) curDS := cur.(*extensions.DaemonSet) // We should invalidate the whole lookup cache if a DS's selector has been updated. // // Imagine that you have two RSs: // * old DS1 // * new DS2 // You also have a pod that is attached to DS2 (because it doesn't match DS1 selector). // Now imagine that you are changing DS1 selector so that it is now matching that pod, // in such case we must invalidate the whole cache so that pod could be adopted by DS1 // // This makes the lookup cache less helpful, but selector update does not happen often, // so it's not a big problem if !reflect.DeepEqual(oldDS.Spec.Selector, curDS.Spec.Selector) { dsc.lookupCache.InvalidateAll() } glog.V(4).Infof("Updating daemon set %s", oldDS.Name) dsc.enqueueDaemonSet(curDS) }, DeleteFunc: dsc.deleteDaemonset, }, ) // Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete // more pods until all the effects (expectations) of a daemon set's create/delete have been observed. podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: dsc.addPod, UpdateFunc: dsc.updatePod, DeleteFunc: dsc.deletePod, }) dsc.podStore.Indexer = podInformer.GetIndexer() dsc.podController = podInformer.GetController() dsc.podStoreSynced = podInformer.HasSynced // Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change, dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return dsc.kubeClient.Core().Nodes().List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return dsc.kubeClient.Core().Nodes().Watch(options) }, }, &api.Node{}, resyncPeriod(), framework.ResourceEventHandlerFuncs{ AddFunc: dsc.addNode, UpdateFunc: dsc.updateNode, }, ) dsc.syncHandler = dsc.syncDaemonSet dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize) return dsc }
// NewReplicaSetController creates a new ReplicaSetController. func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicaSetController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) } rsc := &ReplicaSetController{ kubeClient: kubeClient, podControl: controller.RealPodControl{ KubeClient: kubeClient, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replicaset-controller"}), }, burstReplicas: burstReplicas, expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), queue: workqueue.New(), } rsc.rsStore.Store, rsc.rsController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).Watch(options) }, }, &extensions.ReplicaSet{}, // TODO: Can we have much longer period here? FullControllerResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: rsc.enqueueReplicaSet, UpdateFunc: func(old, cur interface{}) { oldRS := old.(*extensions.ReplicaSet) curRS := cur.(*extensions.ReplicaSet) // We should invalidate the whole lookup cache if a RS's selector has been updated. // // Imagine that you have two RSs: // * old RS1 // * new RS2 // You also have a pod that is attached to RS2 (because it doesn't match RS1 selector). // Now imagine that you are changing RS1 selector so that it is now matching that pod, // in such case we must invalidate the whole cache so that pod could be adopted by RS1 // // This makes the lookup cache less helpful, but selector update does not happen often, // so it's not a big problem if !reflect.DeepEqual(oldRS.Spec.Selector, curRS.Spec.Selector) { rsc.lookupCache.InvalidateAll() } // You might imagine that we only really need to enqueue the // replica set when Spec changes, but it is safer to sync any // time this function is triggered. That way a full informer // resync can requeue any replica set that don't yet have pods // but whose last attempts at creating a pod have failed (since // we don't block on creation of pods) instead of those // replica sets stalling indefinitely. Enqueueing every time // does result in some spurious syncs (like when Status.Replica // is updated and the watch notification from it retriggers // this function), but in general extra resyncs shouldn't be // that bad as ReplicaSets that haven't met expectations yet won't // sync, and all the listing is done using local stores. if oldRS.Status.Replicas != curRS.Status.Replicas { glog.V(4).Infof("Observed updated replica count for ReplicaSet: %v, %d->%d", curRS.Name, oldRS.Status.Replicas, curRS.Status.Replicas) } rsc.enqueueReplicaSet(cur) }, // This will enter the sync loop and no-op, because the replica set has been deleted from the store. // Note that deleting a replica set immediately after scaling it to 0 will not work. The recommended // way of achieving this is by performing a `stop` operation on the replica set. DeleteFunc: rsc.enqueueReplicaSet, }, ) rsc.podStore.Indexer, rsc.podController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return rsc.kubeClient.Core().Pods(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return rsc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options) }, }, &api.Pod{}, resyncPeriod(), framework.ResourceEventHandlerFuncs{ AddFunc: rsc.addPod, // This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like // overkill the most frequent pod update is status, and the associated ReplicaSet will only list from // local storage, so it should be ok. UpdateFunc: rsc.updatePod, DeleteFunc: rsc.deletePod, }, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) rsc.syncHandler = rsc.syncReplicaSet rsc.podStoreSynced = rsc.podController.HasSynced rsc.lookupCache = controller.NewMatchingCache(lookupCacheSize) return rsc }