// NewDeploymentConfigController creates a new DeploymentConfigController. func NewDeploymentConfigController(dcInformer, rcInformer, podInformer framework.SharedIndexInformer, oc osclient.Interface, kc kclient.Interface, codec runtime.Codec) *DeploymentConfigController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(kc.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deploymentconfig-controller"}) c := &DeploymentConfigController{ dn: oc, rn: kc, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), recorder: recorder, codec: codec, } c.dcStore.Indexer = dcInformer.GetIndexer() dcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addDeploymentConfig, UpdateFunc: c.updateDeploymentConfig, DeleteFunc: c.deleteDeploymentConfig, }) c.rcStore.Indexer = rcInformer.GetIndexer() rcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addReplicationController, UpdateFunc: c.updateReplicationController, DeleteFunc: c.deleteReplicationController, }) c.podStore.Indexer = podInformer.GetIndexer() c.dcStoreSynced = dcInformer.HasSynced c.rcStoreSynced = rcInformer.HasSynced c.podStoreSynced = podInformer.HasSynced return c }
// NewClusterQuotaMappingController builds a mapping between namespaces and clusterresourcequotas func NewClusterQuotaMappingController(namespaceInformer shared.NamespaceInformer, quotaInformer shared.ClusterResourceQuotaInformer) *ClusterQuotaMappingController { c := &ClusterQuotaMappingController{ namespaceQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), quotaQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), clusterQuotaMapper: NewClusterQuotaMapper(), } namespaceInformer.Informer().AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addNamespace, UpdateFunc: c.updateNamespace, DeleteFunc: c.deleteNamespace, }) c.namespaceLister = namespaceInformer.Lister() c.namespacesSynced = namespaceInformer.Informer().HasSynced quotaInformer.Informer().AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addQuota, UpdateFunc: c.updateQuota, DeleteFunc: c.deleteQuota, }) c.quotaLister = quotaInformer.Lister() c.quotasSynced = quotaInformer.Informer().HasSynced return c }
// NewClusterQuotaMappingController builds a mapping between namespaces and clusterresourcequotas func NewClusterQuotaMappingController(namespaceInformer shared.NamespaceInformer, quotaInformer shared.ClusterResourceQuotaInformer) *ClusterQuotaMappingController { c := &ClusterQuotaMappingController{ namespaceQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), quotaQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), clusterQuotaMapper: &clusterQuotaMapper{ requiredQuotaToSelector: map[string]*unversioned.LabelSelector{}, requiredNamespaceToLabels: map[string]map[string]string{}, completedQuotaToSelector: map[string]*unversioned.LabelSelector{}, completedNamespaceToLabels: map[string]map[string]string{}, quotaToNamespaces: map[string]sets.String{}, namespaceToQuota: map[string]sets.String{}, }, } namespaceInformer.Informer().AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addNamespace, UpdateFunc: c.updateNamespace, DeleteFunc: c.deleteNamespace, }) c.namespaceLister = namespaceInformer.Lister() c.namespacesSynced = namespaceInformer.Informer().HasSynced quotaInformer.Informer().AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addQuota, UpdateFunc: c.updateQuota, DeleteFunc: c.deleteQuota, }) c.quotaLister = quotaInformer.Lister() c.quotasSynced = quotaInformer.Informer().HasSynced return c }
// NewTokensController returns a new *TokensController. func NewTokensController(cl clientset.Interface, options TokensControllerOptions) *TokensController { maxRetries := options.MaxRetries if maxRetries == 0 { maxRetries = 10 } e := &TokensController{ client: cl, token: options.TokenGenerator, rootCA: options.RootCA, syncServiceAccountQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), syncSecretQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), maxRetries: maxRetries, } e.serviceAccounts, e.serviceAccountController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options) }, }, &api.ServiceAccount{}, options.ServiceAccountResync, framework.ResourceEventHandlerFuncs{ AddFunc: e.queueServiceAccountSync, UpdateFunc: e.queueServiceAccountUpdateSync, DeleteFunc: e.queueServiceAccountSync, }, ) tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) e.secrets, e.secretController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.FieldSelector = tokenSelector return e.client.Core().Secrets(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.FieldSelector = tokenSelector return e.client.Core().Secrets(api.NamespaceAll).Watch(options) }, }, &api.Secret{}, options.SecretResync, framework.ResourceEventHandlerFuncs{ AddFunc: e.queueSecretSync, UpdateFunc: e.queueSecretUpdateSync, DeleteFunc: e.queueSecretSync, }, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}, ) return e }
// NewTaskQueue creates a new task queue with the given sync function. // The sync function is called for every element inserted into the queue. func NewTaskQueue(syncFn func(string) error) *taskQueue { return &taskQueue{ queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), sync: syncFn, workerDone: make(chan struct{}), } }
// NewDeploymentController creates a new DeploymentController. func NewDeploymentController(rcInformer, podInformer framework.SharedIndexInformer, kc kclient.Interface, sa, image string, env []kapi.EnvVar, codec runtime.Codec) *DeploymentController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(kc.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployments-controller"}) c := &DeploymentController{ rn: kc, pn: kc, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), serviceAccount: sa, deployerImage: image, environment: env, recorder: recorder, codec: codec, } c.rcStore.Indexer = rcInformer.GetIndexer() rcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addReplicationController, UpdateFunc: c.updateReplicationController, }) c.podStore.Indexer = podInformer.GetIndexer() podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ UpdateFunc: c.updatePod, DeleteFunc: c.deletePod, }) c.rcStoreSynced = rcInformer.HasSynced c.podStoreSynced = podInformer.HasSynced return c }
// NewDockercfgController returns a new *DockercfgController. func NewDockercfgController(cl kclientset.Interface, options DockercfgControllerOptions) *DockercfgController { e := &DockercfgController{ client: cl, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), dockerURLsIntialized: options.DockerURLsIntialized, } var serviceAccountCache cache.Store serviceAccountCache, e.serviceAccountController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options) }, }, &api.ServiceAccount{}, options.Resync, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { serviceAccount := obj.(*api.ServiceAccount) glog.V(5).Infof("Adding service account %s", serviceAccount.Name) e.enqueueServiceAccount(serviceAccount) }, UpdateFunc: func(old, cur interface{}) { serviceAccount := cur.(*api.ServiceAccount) glog.V(5).Infof("Updating service account %s", serviceAccount.Name) // Resync on service object relist. e.enqueueServiceAccount(serviceAccount) }, }, ) e.serviceAccountCache = NewEtcdMutationCache(serviceAccountCache) tokenSecretSelector := fields.OneTermEqualSelector(api.SecretTypeField, string(api.SecretTypeServiceAccountToken)) e.secretCache, e.secretController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.FieldSelector = tokenSecretSelector return e.client.Core().Secrets(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.FieldSelector = tokenSecretSelector return e.client.Core().Secrets(api.NamespaceAll).Watch(options) }, }, &api.Secret{}, options.Resync, cache.ResourceEventHandlerFuncs{ AddFunc: func(cur interface{}) { e.handleTokenSecretUpdate(nil, cur) }, UpdateFunc: func(old, cur interface{}) { e.handleTokenSecretUpdate(old, cur) }, DeleteFunc: e.handleTokenSecretDelete, }, ) e.syncHandler = e.syncServiceAccount return e }
func NewBucketingWorkQueue() BucketingWorkQueue { return &workQueueBucket{ queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), work: map[interface{}][]interface{}{}, dirtyWork: map[interface{}][]interface{}{}, inProgress: map[interface{}]bool{}, } }
// NewPetSetController creates a new petset controller. func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "petset"}) pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}} psc := &PetSetController{ kubeClient: kubeClient, blockingPetStore: newUnHealthyPetTracker(pc), newSyncer: func(blockingPet *pcb) *petSyncer { return &petSyncer{pc, blockingPet} }, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), } podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ // lookup the petset and enqueue AddFunc: psc.addPod, // lookup current and old petset if labels changed UpdateFunc: psc.updatePod, // lookup petset accounting for deletion tombstones DeleteFunc: psc.deletePod, }) psc.podStore.Indexer = podInformer.GetIndexer() psc.podController = podInformer.GetController() psc.psStore.Store, psc.psController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return psc.kubeClient.Apps().PetSets(api.NamespaceAll).Watch(options) }, }, &apps.PetSet{}, petSetResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: psc.enqueuePetSet, UpdateFunc: func(old, cur interface{}) { oldPS := old.(*apps.PetSet) curPS := cur.(*apps.PetSet) if oldPS.Status.Replicas != curPS.Status.Replicas { glog.V(4).Infof("Observed updated replica count for PetSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas) } psc.enqueuePetSet(cur) }, DeleteFunc: psc.enqueuePetSet, }, ) // TODO: Watch volumes psc.podStoreSynced = psc.podController.HasSynced psc.syncHandler = psc.Sync return psc }
// newReplicationManager configures a replication manager with the specified event recorder func newReplicationManager(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager { if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) } rm := &ReplicationManager{ kubeClient: kubeClient, podControl: controller.RealPodControl{ KubeClient: kubeClient, Recorder: eventRecorder, }, burstReplicas: burstReplicas, expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), garbageCollectorEnabled: garbageCollectorEnabled, } rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options) }, }, &api.ReplicationController{}, // TODO: Can we have much longer period here? FullControllerResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: rm.enqueueController, UpdateFunc: rm.updateRC, // This will enter the sync loop and no-op, because the controller has been deleted from the store. // Note that deleting a controller immediately after scaling it to 0 will not work. The recommended // way of achieving this is by performing a `stop` operation on the controller. DeleteFunc: rm.enqueueController, }, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: rm.addPod, // This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill // the most frequent pod update is status, and the associated rc will only list from local storage, so // it should be ok. UpdateFunc: rm.updatePod, DeleteFunc: rm.deletePod, }) rm.podStore.Indexer = podInformer.GetIndexer() rm.podController = podInformer.GetController() rm.syncHandler = rm.syncReplicationController rm.podStoreSynced = rm.podController.HasSynced rm.lookupCache = controller.NewMatchingCache(lookupCacheSize) return rm }
// NewIngressIPController creates a new IngressIPController. // TODO this should accept a shared informer func NewIngressIPController(kc kclientset.Interface, ipNet *net.IPNet, resyncInterval time.Duration) *IngressIPController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(&kcoreclient.EventSinkImpl{Interface: kc.Core().Events("")}) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "ingressip-controller"}) ic := &IngressIPController{ client: kc.Core(), queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), maxRetries: 10, recorder: recorder, } ic.cache, ic.controller = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return ic.client.Services(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return ic.client.Services(kapi.NamespaceAll).Watch(options) }, }, &kapi.Service{}, resyncInterval, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { service := obj.(*kapi.Service) glog.V(5).Infof("Adding service %s/%s", service.Namespace, service.Name) ic.enqueueChange(obj, nil) }, UpdateFunc: func(old, cur interface{}) { service := cur.(*kapi.Service) glog.V(5).Infof("Updating service %s/%s", service.Namespace, service.Name) ic.enqueueChange(cur, old) }, DeleteFunc: func(obj interface{}) { service := obj.(*kapi.Service) glog.V(5).Infof("Deleting service %s/%s", service.Namespace, service.Name) ic.enqueueChange(nil, obj) }, }, ) ic.changeHandler = ic.processChange ic.persistenceHandler = persistService ic.ipAllocator = ipallocator.NewAllocatorCIDRRange(ipNet, func(max int, rangeSpec string) allocator.Interface { return allocator.NewAllocationMap(max, rangeSpec) }) ic.allocationMap = make(map[string]string) ic.requeuedAllocations = sets.NewString() return ic }
func NewUnidlingController(scaleNS kextclient.ScalesGetter, endptsNS kclient.EndpointsGetter, evtNS kclient.EventsGetter, dcNamespacer deployclient.DeploymentConfigsGetter, rcNamespacer kclient.ReplicationControllersGetter, resyncPeriod time.Duration) *UnidlingController { fieldSet := fields.Set{} fieldSet["reason"] = unidlingapi.NeedPodsReason fieldSelector := fieldSet.AsSelector() unidlingController := &UnidlingController{ scaleNamespacer: scaleNS, endpointsNamespacer: endptsNS, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), lastFiredCache: &lastFiredCache{ items: make(map[types.NamespacedName]time.Time), }, dcNamespacer: dcNamespacer, rcNamespacer: rcNamespacer, } _, controller := framework.NewInformer( &cache.ListWatch{ // No need to list -- we only care about new events ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return &kapi.EventList{}, nil }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector return evtNS.Events(kapi.NamespaceAll).Watch(options) }, }, &kapi.Event{}, resyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { event := obj.(*kapi.Event) unidlingController.enqueueEvent(event) }, UpdateFunc: func(oldObj interface{}, newObj interface{}) { // retrigger on new last-seen times event := newObj.(*kapi.Event) unidlingController.enqueueEvent(event) }, DeleteFunc: func(obj interface{}) { // this is just to clean up our cache of the last seen times event := obj.(*kapi.Event) unidlingController.clearEventFromCache(event) }, }, ) unidlingController.controller = controller return unidlingController }
// NewServiceServingCertUpdateController creates a new ServiceServingCertUpdateController. // TODO this should accept a shared informer func NewServiceServingCertUpdateController(serviceClient kcoreclient.ServicesGetter, secretClient kcoreclient.SecretsGetter, ca *crypto.CA, dnsSuffix string, resyncInterval time.Duration) *ServiceServingCertUpdateController { sc := &ServiceServingCertUpdateController{ secretClient: secretClient, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), ca: ca, dnsSuffix: dnsSuffix, // TODO base the expiry time on a percentage of the time for the lifespan of the cert minTimeLeftForCert: 1 * time.Hour, } sc.serviceCache, sc.serviceController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return serviceClient.Services(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return serviceClient.Services(kapi.NamespaceAll).Watch(options) }, }, &kapi.Service{}, resyncInterval, framework.ResourceEventHandlerFuncs{}, ) sc.serviceHasSynced = sc.serviceController.HasSynced sc.secretCache, sc.secretController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return sc.secretClient.Secrets(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return sc.secretClient.Secrets(kapi.NamespaceAll).Watch(options) }, }, &kapi.Secret{}, resyncInterval, framework.ResourceEventHandlerFuncs{ AddFunc: sc.addSecret, UpdateFunc: sc.updateSecret, }, ) sc.secretHasSynced = sc.secretController.HasSynced sc.syncHandler = sc.syncSecret return sc }
// NewNamespaceController creates a new NamespaceController func NewNamespaceController( kubeClient clientset.Interface, clientPool dynamic.ClientPool, groupVersionResources []unversioned.GroupVersionResource, resyncPeriod time.Duration, finalizerToken api.FinalizerName) *NamespaceController { // create the controller so we can inject the enqueue function namespaceController := &NamespaceController{ kubeClient: kubeClient, clientPool: clientPool, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), groupVersionResources: groupVersionResources, opCache: operationNotSupportedCache{}, finalizerToken: finalizerToken, } if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) } // configure the backing store/controller store, controller := framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return kubeClient.Core().Namespaces().List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return kubeClient.Core().Namespaces().Watch(options) }, }, &api.Namespace{}, resyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { namespace := obj.(*api.Namespace) namespaceController.enqueueNamespace(namespace) }, UpdateFunc: func(oldObj, newObj interface{}) { namespace := newObj.(*api.Namespace) namespaceController.enqueueNamespace(namespace) }, }, ) namespaceController.store = store namespaceController.controller = controller return namespaceController }
func TestWorkRequeuesWhenFull(t *testing.T) { tests := []struct { testName string requeuedChange bool requeuedService bool requeued bool }{ { testName: "Previously requeued change should be requeued", requeued: true, }, { testName: "The only pending allocation should be requeued", requeuedChange: true, requeuedService: true, requeued: true, }, { testName: "Already requeued allocation should not be requeued", requeuedService: true, requeued: false, }, } for _, test := range tests { c := newController(t, nil) c.changeHandler = func(change *serviceChange) error { return ipallocator.ErrFull } // Use a queue with no delay to avoid timing issues c.queue = workqueue.NewRateLimitingQueue(workqueue.NewMaxOfRateLimiter()) change := &serviceChange{ key: "foo", requeuedAllocation: test.requeuedChange, } if test.requeuedService { c.requeuedAllocations.Insert(change.key) } c.queue.Add(change) c.work() requeued := (c.queue.Len() == 1) if test.requeued != requeued { t.Errorf("Expected requeued == %v, got %v", test.requeued, requeued) } } }
// NewServiceServingCertController creates a new ServiceServingCertController. // TODO this should accept a shared informer func NewServiceServingCertController(serviceClient kclient.ServicesNamespacer, secretClient kclient.SecretsNamespacer, ca *crypto.CA, dnsSuffix string, resyncInterval time.Duration) *ServiceServingCertController { sc := &ServiceServingCertController{ serviceClient: serviceClient, secretClient: secretClient, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), maxRetries: 10, ca: ca, dnsSuffix: dnsSuffix, } sc.serviceCache, sc.serviceController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return sc.serviceClient.Services(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return sc.serviceClient.Services(kapi.NamespaceAll).Watch(options) }, }, &kapi.Service{}, resyncInterval, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { service := obj.(*kapi.Service) glog.V(4).Infof("Adding service %s", service.Name) sc.enqueueService(obj) }, UpdateFunc: func(old, cur interface{}) { service := cur.(*kapi.Service) glog.V(4).Infof("Updating service %s", service.Name) // Resync on service object relist. sc.enqueueService(cur) }, }, ) sc.syncHandler = sc.syncService return sc }
// NewDockercfgController returns a new *DockercfgController. func NewDockercfgController(cl client.Interface, options DockercfgControllerOptions) *DockercfgController { e := &DockercfgController{ client: cl, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), dockerURLsIntialized: options.DockerURLsIntialized, } var serviceAccountCache cache.Store serviceAccountCache, e.serviceAccountController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return e.client.ServiceAccounts(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return e.client.ServiceAccounts(api.NamespaceAll).Watch(options) }, }, &api.ServiceAccount{}, options.Resync, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { serviceAccount := obj.(*api.ServiceAccount) glog.V(5).Infof("Adding service account %s", serviceAccount.Name) e.enqueueServiceAccount(serviceAccount) }, UpdateFunc: func(old, cur interface{}) { serviceAccount := cur.(*api.ServiceAccount) glog.V(5).Infof("Updating service account %s", serviceAccount.Name) // Resync on service object relist. e.enqueueServiceAccount(serviceAccount) }, }, ) e.serviceAccountCache = NewEtcdMutationCache(serviceAccountCache) e.syncHandler = e.syncServiceAccount return e }
// NewImageChangeController returns a new ImageChangeController. func NewImageChangeController(dcInformer, streamInformer framework.SharedIndexInformer, oc osclient.Interface) *ImageChangeController { c := &ImageChangeController{ dn: oc, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), } c.streamLister.Indexer = streamInformer.GetIndexer() streamInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addImageStream, UpdateFunc: c.updateImageStream, }) c.streamStoreSynced = streamInformer.HasSynced c.dcLister.Indexer = dcInformer.GetIndexer() dcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addDeploymentConfig, UpdateFunc: c.updateDeploymentConfig, }) c.dcStoreSynced = dcInformer.HasSynced return c }
// NewDeploymentTriggerController returns a new DeploymentTriggerController. func NewDeploymentTriggerController(dcInformer, streamInformer framework.SharedIndexInformer, oc osclient.Interface, codec runtime.Codec) *DeploymentTriggerController { c := &DeploymentTriggerController{ dn: oc, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), codec: codec, } c.dcStore.Indexer = dcInformer.GetIndexer() dcInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addDeploymentConfig, UpdateFunc: c.updateDeploymentConfig, }) c.dcStoreSynced = dcInformer.HasSynced streamInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: c.addImageStream, UpdateFunc: c.updateImageStream, }) return c }
// newReplicationManagerInternal configures a replication manager with the specified event recorder func newReplicationManagerInternal(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicationManager { if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) } rm := &ReplicationManager{ kubeClient: kubeClient, podControl: controller.RealPodControl{ KubeClient: kubeClient, Recorder: eventRecorder, }, burstReplicas: burstReplicas, expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), } rm.rcStore.Indexer, rm.rcController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options) }, }, &api.ReplicationController{}, // TODO: Can we have much longer period here? FullControllerResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: rm.enqueueController, UpdateFunc: func(old, cur interface{}) { oldRC := old.(*api.ReplicationController) curRC := cur.(*api.ReplicationController) // We should invalidate the whole lookup cache if a RC's selector has been updated. // // Imagine that you have two RCs: // * old RC1 // * new RC2 // You also have a pod that is attached to RC2 (because it doesn't match RC1 selector). // Now imagine that you are changing RC1 selector so that it is now matching that pod, // in such case, we must invalidate the whole cache so that pod could be adopted by RC1 // // This makes the lookup cache less helpful, but selector update does not happen often, // so it's not a big problem if !reflect.DeepEqual(oldRC.Spec.Selector, curRC.Spec.Selector) { rm.lookupCache.InvalidateAll() } // You might imagine that we only really need to enqueue the // controller when Spec changes, but it is safer to sync any // time this function is triggered. That way a full informer // resync can requeue any controllers that don't yet have pods // but whose last attempts at creating a pod have failed (since // we don't block on creation of pods) instead of those // controllers stalling indefinitely. Enqueueing every time // does result in some spurious syncs (like when Status.Replica // is updated and the watch notification from it retriggers // this function), but in general extra resyncs shouldn't be // that bad as rcs that haven't met expectations yet won't // sync, and all the listing is done using local stores. if oldRC.Status.Replicas != curRC.Status.Replicas { glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas) } rm.enqueueController(cur) }, // This will enter the sync loop and no-op, because the controller has been deleted from the store. // Note that deleting a controller immediately after scaling it to 0 will not work. The recommended // way of achieving this is by performing a `stop` operation on the controller. DeleteFunc: rm.enqueueController, }, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{ AddFunc: rm.addPod, // This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill // the most frequent pod update is status, and the associated rc will only list from local storage, so // it should be ok. UpdateFunc: rm.updatePod, DeleteFunc: rm.deletePod, }) rm.podStore.Indexer = podInformer.GetIndexer() rm.podController = podInformer.GetController() rm.syncHandler = rm.syncReplicationController rm.podStoreSynced = rm.podController.HasSynced rm.lookupCache = controller.NewMatchingCache(lookupCacheSize) return rm }
// NewDeploymentController creates a new DeploymentController. func NewDeploymentController(client clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *DeploymentController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // TODO: remove the wrapper when every clients have moved to use the clientset. eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: client.Core().Events("")}) if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().GetRESTClient().GetRateLimiter()) } dc := &DeploymentController{ client: client, eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}), queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), } dc.dStore.Store, dc.dController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return dc.client.Extensions().Deployments(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return dc.client.Extensions().Deployments(api.NamespaceAll).Watch(options) }, }, &extensions.Deployment{}, FullDeploymentResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: dc.addDeploymentNotification, UpdateFunc: dc.updateDeploymentNotification, // This will enter the sync loop and no-op, because the deployment has been deleted from the store. DeleteFunc: dc.deleteDeploymentNotification, }, ) dc.rsStore.Store, dc.rsController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return dc.client.Extensions().ReplicaSets(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return dc.client.Extensions().ReplicaSets(api.NamespaceAll).Watch(options) }, }, &extensions.ReplicaSet{}, resyncPeriod(), framework.ResourceEventHandlerFuncs{ AddFunc: dc.addReplicaSet, UpdateFunc: dc.updateReplicaSet, DeleteFunc: dc.deleteReplicaSet, }, ) dc.podStore.Indexer, dc.podController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return dc.client.Core().Pods(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return dc.client.Core().Pods(api.NamespaceAll).Watch(options) }, }, &api.Pod{}, resyncPeriod(), framework.ResourceEventHandlerFuncs{ AddFunc: dc.addPod, UpdateFunc: dc.updatePod, DeleteFunc: dc.deletePod, }, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) dc.syncHandler = dc.syncDeployment dc.dStoreSynced = dc.dController.HasSynced dc.rsStoreSynced = dc.rsController.HasSynced dc.podStoreSynced = dc.podController.HasSynced return dc }
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController { // build the resource quota controller rq := &ResourceQuotaController{ kubeClient: options.KubeClient, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), resyncPeriod: options.ResyncPeriod, registry: options.Registry, replenishmentControllers: []framework.ControllerInterface{}, } if options.KubeClient != nil && options.KubeClient.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", options.KubeClient.Core().GetRESTClient().GetRateLimiter()) } // set the synchronization handler rq.syncHandler = rq.syncResourceQuotaFromKey // build the controller that observes quota rq.rqIndexer, rq.rqController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).Watch(options) }, }, &api.ResourceQuota{}, rq.resyncPeriod(), framework.ResourceEventHandlerFuncs{ AddFunc: rq.enqueueResourceQuota, UpdateFunc: func(old, cur interface{}) { // We are only interested in observing updates to quota.spec to drive updates to quota.status. // We ignore all updates to quota.Status because they are all driven by this controller. // IMPORTANT: // We do not use this function to queue up a full quota recalculation. To do so, would require // us to enqueue all quota.Status updates, and since quota.Status updates involve additional queries // that cannot be backed by a cache and result in a full query of a namespace's content, we do not // want to pay the price on spurious status updates. As a result, we have a separate routine that is // responsible for enqueue of all resource quotas when doing a full resync (enqueueAll) oldResourceQuota := old.(*api.ResourceQuota) curResourceQuota := cur.(*api.ResourceQuota) if quota.Equals(curResourceQuota.Spec.Hard, oldResourceQuota.Spec.Hard) { return } rq.enqueueResourceQuota(curResourceQuota) }, // This will enter the sync loop and no-op, because the controller has been deleted from the store. // Note that deleting a controller immediately after scaling it to 0 will not work. The recommended // way of achieving this is by performing a `stop` operation on the controller. DeleteFunc: rq.enqueueResourceQuota, }, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}, ) for _, groupKindToReplenish := range options.GroupKindsToReplenish { controllerOptions := &ReplenishmentControllerOptions{ GroupKind: groupKindToReplenish, ResyncPeriod: options.ReplenishmentResyncPeriod, ReplenishmentFunc: rq.replenishQuota, } replenishmentController, err := options.ControllerFactory.NewController(controllerOptions) if err != nil { glog.Warningf("quota controller unable to replenish %s due to %v, changes only accounted during full resync", groupKindToReplenish, err) } else { rq.replenishmentControllers = append(rq.replenishmentControllers, replenishmentController) } } return rq }
// NewDockerRegistryServiceController returns a new *DockerRegistryServiceController. func NewDockerRegistryServiceController(cl client.Interface, options DockerRegistryServiceControllerOptions) *DockerRegistryServiceController { e := &DockerRegistryServiceController{ client: cl, dockercfgController: options.DockercfgController, registryLocationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), secretsToUpdate: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), serviceName: options.RegistryServiceName, serviceNamespace: options.RegistryNamespace, dockerURLsIntialized: options.DockerURLsIntialized, } e.serviceCache, e.serviceController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) { opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName) return e.client.Services(options.RegistryNamespace).List(opts) }, WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) { opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName) return e.client.Services(options.RegistryNamespace).Watch(opts) }, }, &kapi.Service{}, options.Resync, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { e.enqueueRegistryLocationQueue() }, UpdateFunc: func(old, cur interface{}) { e.enqueueRegistryLocationQueue() }, DeleteFunc: func(obj interface{}) { e.enqueueRegistryLocationQueue() }, }, ) e.servicesSynced = e.serviceController.HasSynced e.syncRegistryLocationHandler = e.syncRegistryLocationChange dockercfgOptions := kapi.ListOptions{FieldSelector: fields.SelectorFromSet(map[string]string{kapi.SecretTypeField: string(kapi.SecretTypeDockercfg)})} e.secretCache, e.secretController = framework.NewInformer( &cache.ListWatch{ ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) { return e.client.Secrets(kapi.NamespaceAll).List(dockercfgOptions) }, WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) { return e.client.Secrets(kapi.NamespaceAll).Watch(dockercfgOptions) }, }, &kapi.Secret{}, options.Resync, framework.ResourceEventHandlerFuncs{}, ) e.secretsSynced = e.secretController.HasSynced e.syncSecretHandler = e.syncSecretUpdate return e }