示例#1
0
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.Core().RESTClient().GetRateLimiter())
	}
	rc := &RouteController{
		routes:      routes,
		kubeClient:  kubeClient,
		clusterName: clusterName,
		clusterCIDR: clusterCIDR,
	}

	rc.nodeStore.Store, rc.nodeController = cache.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
				return rc.kubeClient.Core().Nodes().List(options)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				return rc.kubeClient.Core().Nodes().Watch(options)
			},
		},
		&v1.Node{},
		controller.NoResyncPeriodFunc(),
		cache.ResourceEventHandlerFuncs{},
	)

	return rc
}
// NewServiceAccountsController returns a new *ServiceAccountsController.
func NewServiceAccountsController(saInformer informers.ServiceAccountInformer, nsInformer informers.NamespaceInformer, cl clientset.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
	e := &ServiceAccountsController{
		client:                  cl,
		serviceAccountsToEnsure: options.ServiceAccounts,
		queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"),
	}
	if cl != nil && cl.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().RESTClient().GetRateLimiter())
	}

	saInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		DeleteFunc: e.serviceAccountDeleted,
	})
	nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc:    e.namespaceAdded,
		UpdateFunc: e.namespaceUpdated,
	})

	e.saSynced = saInformer.Informer().HasSynced
	e.saLister = saInformer.Lister()
	e.nsSynced = nsInformer.Informer().HasSynced
	e.nsLister = nsInformer.Lister()

	e.syncHandler = e.syncNamespace

	return e
}
示例#3
0
func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}
	gcc := &GCController{
		kubeClient: kubeClient,
		threshold:  threshold,
		deletePod: func(namespace, name string) error {
			return kubeClient.Core().Pods(namespace).Delete(name, api.NewDeleteOptions(0))
		},
	}

	terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))

	gcc.podStore.Indexer, gcc.podStoreSyncer = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.FieldSelector = terminatedSelector
				return gcc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.FieldSelector = terminatedSelector
				return gcc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{},
		// We don't need to build a index for podStore here actually, but build one for consistency.
		// It will ensure that if people start making use of the podStore in more specific ways,
		// they'll get the benefits they expect. It will also reserve the name for future refactorings.
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)
	return gcc
}
示例#4
0
func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInformer, terminatedPodThreshold int) *PodGCController {
	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.Core().RESTClient().GetRateLimiter())
	}
	gcc := &PodGCController{
		kubeClient:             kubeClient,
		terminatedPodThreshold: terminatedPodThreshold,
		deletePod: func(namespace, name string) error {
			glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name)
			return kubeClient.Core().Pods(namespace).Delete(name, v1.NewDeleteOptions(0))
		},
	}

	gcc.podStore.Indexer = podInformer.GetIndexer()
	gcc.podController = podInformer.GetController()

	gcc.nodeStore.Store, gcc.nodeController = cache.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
				return gcc.kubeClient.Core().Nodes().List(options)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				return gcc.kubeClient.Core().Nodes().Watch(options)
			},
		},
		&v1.Node{},
		controller.NoResyncPeriodFunc(),
		cache.ResourceEventHandlerFuncs{},
	)

	return gcc
}
// NewPersistentVolumeRecycler creates a new PersistentVolumeRecycler
func NewPersistentVolumeRecycler(kubeClient clientset.Interface, syncPeriod time.Duration, maximumRetry int, plugins []volume.VolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeRecycler, error) {
	recyclerClient := NewRecyclerClient(kubeClient)
	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("pv_recycler_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}
	recycler := &PersistentVolumeRecycler{
		client:          recyclerClient,
		kubeClient:      kubeClient,
		cloud:           cloud,
		maximumRetry:    maximumRetry,
		syncPeriod:      syncPeriod,
		releasedVolumes: make(map[string]releasedVolumeStatus),
	}

	if err := recycler.pluginMgr.InitPlugins(plugins, recycler); err != nil {
		return nil, fmt.Errorf("Could not initialize volume plugins for PVClaimBinder: %+v", err)
	}

	_, volumeController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return kubeClient.Core().PersistentVolumes().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return kubeClient.Core().PersistentVolumes().Watch(options)
			},
		},
		&api.PersistentVolume{},
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				pv, ok := obj.(*api.PersistentVolume)
				if !ok {
					glog.Errorf("Error casting object to PersistentVolume: %v", obj)
					return
				}
				recycler.reclaimVolume(pv)
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				pv, ok := newObj.(*api.PersistentVolume)
				if !ok {
					glog.Errorf("Error casting object to PersistentVolume: %v", newObj)
					return
				}
				recycler.reclaimVolume(pv)
			},
			DeleteFunc: func(obj interface{}) {
				pv, ok := obj.(*api.PersistentVolume)
				if !ok {
					glog.Errorf("Error casting object to PersistentVolume: %v", obj)
					return
				}
				recycler.removeReleasedVolume(pv)
			},
		},
	)

	recycler.volumeController = volumeController
	return recycler, nil
}
// newReplicationManager configures a replication manager with the specified event recorder
func newReplicationManager(eventRecorder record.EventRecorder, podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().RESTClient().GetRateLimiter())
	}

	rm := &ReplicationManager{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventRecorder,
		},
		burstReplicas: burstReplicas,
		expectations:  controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
		queue:         workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "replicationmanager"),
		garbageCollectorEnabled: garbageCollectorEnabled,
	}

	rm.rcStore.Indexer, rm.rcController = cache.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
			},
		},
		&api.ReplicationController{},
		// TODO: Can we have much longer period here?
		FullControllerResyncPeriod,
		cache.ResourceEventHandlerFuncs{
			AddFunc:    rm.enqueueController,
			UpdateFunc: rm.updateRC,
			// This will enter the sync loop and no-op, because the controller has been deleted from the store.
			// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
			// way of achieving this is by performing a `stop` operation on the controller.
			DeleteFunc: rm.enqueueController,
		},
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)

	podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc: rm.addPod,
		// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
		// the most frequent pod update is status, and the associated rc will only list from local storage, so
		// it should be ok.
		UpdateFunc: rm.updatePod,
		DeleteFunc: rm.deletePod,
	})
	rm.podStore.Indexer = podInformer.GetIndexer()
	rm.podController = podInformer.GetController()

	rm.syncHandler = rm.syncReplicationController
	rm.podStoreSynced = rm.podController.HasSynced
	rm.lookupCache = controller.NewMatchingCache(lookupCacheSize)
	return rm
}
示例#7
0
func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface) *JobController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	// TODO: remove the wrapper when every clients have moved to use the clientset.
	eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})

	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}

	jm := &JobController{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
		},
		expectations: controller.NewControllerExpectations(),
		queue:        workqueue.New(),
		recorder:     eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
	}

	jm.jobStore.Store, jm.jobController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return jm.kubeClient.Batch().Jobs(api.NamespaceAll).Watch(options)
			},
		},
		&batch.Job{},
		// TODO: Can we have much longer period here?
		replicationcontroller.FullControllerResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: jm.enqueueController,
			UpdateFunc: func(old, cur interface{}) {
				if job := cur.(*batch.Job); !IsJobFinished(job) {
					jm.enqueueController(job)
				}
			},
			DeleteFunc: jm.enqueueController,
		},
	)

	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    jm.addPod,
		UpdateFunc: jm.updatePod,
		DeleteFunc: jm.deletePod,
	})
	jm.podStore.Indexer = podInformer.GetIndexer()
	jm.podStoreSynced = podInformer.HasSynced

	jm.updateHandler = jm.updateJobStatus
	jm.syncHandler = jm.syncJob
	return jm
}
示例#8
0
// newReplicaSetController configures a replica set controller with the specified event recorder
func newReplicaSetController(eventRecorder record.EventRecorder, podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicaSetController {
	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}

	rsc := &ReplicaSetController{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventRecorder,
		},
		burstReplicas: burstReplicas,
		expectations:  controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
		queue:         workqueue.New(),
		garbageCollectorEnabled: garbageCollectorEnabled,
	}

	rsc.rsStore.Store, rsc.rsController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).Watch(options)
			},
		},
		&extensions.ReplicaSet{},
		// TODO: Can we have much longer period here?
		FullControllerResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    rsc.enqueueReplicaSet,
			UpdateFunc: rsc.updateRS,
			// This will enter the sync loop and no-op, because the replica set has been deleted from the store.
			// Note that deleting a replica set immediately after scaling it to 0 will not work. The recommended
			// way of achieving this is by performing a `stop` operation on the replica set.
			DeleteFunc: rsc.enqueueReplicaSet,
		},
	)

	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc: rsc.addPod,
		// This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
		// overkill the most frequent pod update is status, and the associated ReplicaSet will only list from
		// local storage, so it should be ok.
		UpdateFunc: rsc.updatePod,
		DeleteFunc: rsc.deletePod,
	})
	rsc.podStore.Indexer = podInformer.GetIndexer()
	rsc.podController = podInformer.GetController()

	rsc.syncHandler = rsc.syncReplicaSet
	rsc.podStoreSynced = rsc.podController.HasSynced
	rsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
	return rsc
}
示例#9
0
// NewTokensController returns a new *TokensController.
func NewTokensController(cl clientset.Interface, options TokensControllerOptions) *TokensController {
	e := &TokensController{
		client: cl,
		token:  options.TokenGenerator,
		rootCA: options.RootCA,
	}
	if cl != nil && cl.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter())
	}
	e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
			},
		},
		&api.ServiceAccount{},
		options.ServiceAccountResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.serviceAccountAdded,
			UpdateFunc: e.serviceAccountUpdated,
			DeleteFunc: e.serviceAccountDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
	e.secrets, e.secretController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.FieldSelector = tokenSelector
				return e.client.Core().Secrets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.FieldSelector = tokenSelector
				return e.client.Core().Secrets(api.NamespaceAll).Watch(options)
			},
		},
		&api.Secret{},
		options.SecretResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.secretAdded,
			UpdateFunc: e.secretUpdated,
			DeleteFunc: e.secretDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	e.serviceAccountsSynced = e.serviceAccountController.HasSynced
	e.secretsSynced = e.secretController.HasSynced

	return e
}
示例#10
0
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}
	return &RouteController{
		routes:      routes,
		kubeClient:  kubeClient,
		clusterName: clusterName,
		clusterCIDR: clusterCIDR,
	}
}
示例#11
0
func (r *RegisteredRateLimiter) registerIfNotPresent(gv unversioned.GroupVersion, client *dynamic.Client, prefix string) {
	once, found := r.rateLimiters[gv]
	if !found {
		return
	}
	once.Do(func() {
		if rateLimiter := client.GetRateLimiter(); rateLimiter != nil {
			group := strings.Replace(gv.Group, ".", ":", -1)
			metrics.RegisterMetricAndTrackRateLimiterUsage(fmt.Sprintf("%s_%s_%s", prefix, group, gv.Version), rateLimiter)
		}
	})
}
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
func NewPersistentVolumeClaimBinder(kubeClient clientset.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("pv_claim_binder_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}
	volumeIndex := NewPersistentVolumeOrderedIndex()
	binderClient := NewBinderClient(kubeClient)
	binder := &PersistentVolumeClaimBinder{
		volumeIndex: volumeIndex,
		client:      binderClient,
	}

	_, volumeController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return kubeClient.Core().PersistentVolumes().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return kubeClient.Core().PersistentVolumes().Watch(options)
			},
		},
		&api.PersistentVolume{},
		// TODO: Can we have much longer period here?
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addVolume,
			UpdateFunc: binder.updateVolume,
			DeleteFunc: binder.deleteVolume,
		},
	)
	_, claimController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
			},
		},
		&api.PersistentVolumeClaim{},
		// TODO: Can we have much longer period here?
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addClaim,
			UpdateFunc: binder.updateClaim,
			DeleteFunc: binder.deleteClaim,
		},
	)

	binder.claimController = claimController
	binder.volumeController = volumeController

	return binder
}
示例#13
0
// NewServiceAccountsController returns a new *ServiceAccountsController.
func NewServiceAccountsController(cl clientset.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
	e := &ServiceAccountsController{
		client:                  cl,
		serviceAccountsToEnsure: options.ServiceAccounts,
	}
	if cl != nil && cl.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter())
	}
	accountSelector := fields.Everything()
	if len(options.ServiceAccounts) == 1 {
		// If we're maintaining a single account, we can scope the accounts we watch to just that name
		accountSelector = fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ServiceAccounts[0].Name})
	}
	e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.FieldSelector = accountSelector
				return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.FieldSelector = accountSelector
				return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
			},
		},
		&api.ServiceAccount{},
		options.ServiceAccountResync,
		framework.ResourceEventHandlerFuncs{
			DeleteFunc: e.serviceAccountDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	e.namespaces, e.namespaceController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return e.client.Core().Namespaces().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.Core().Namespaces().Watch(options)
			},
		},
		&api.Namespace{},
		options.NamespaceResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.namespaceAdded,
			UpdateFunc: e.namespaceUpdated,
		},
		cache.Indexers{"name": nameIndexFunc},
	)

	return e
}
示例#14
0
// New returns a new service controller to keep cloud provider service resources
// (like load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) (*ServiceController, error) {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
	recorder := broadcaster.NewRecorder(v1.EventSource{Component: "service-controller"})

	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().RESTClient().GetRateLimiter())
	}

	s := &ServiceController{
		cloud:            cloud,
		knownHosts:       []string{},
		kubeClient:       kubeClient,
		clusterName:      clusterName,
		cache:            &serviceCache{serviceMap: make(map[string]*cachedService)},
		eventBroadcaster: broadcaster,
		eventRecorder:    recorder,
		nodeLister: cache.StoreToNodeLister{
			Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
		},
		workingQueue: workqueue.NewDelayingQueue(),
	}
	s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
				return s.kubeClient.Core().Services(v1.NamespaceAll).List(options)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				return s.kubeClient.Core().Services(v1.NamespaceAll).Watch(options)
			},
		},
		&v1.Service{},
		serviceSyncPeriod,
		cache.ResourceEventHandlerFuncs{
			AddFunc: s.enqueueService,
			UpdateFunc: func(old, cur interface{}) {
				oldSvc, ok1 := old.(*v1.Service)
				curSvc, ok2 := cur.(*v1.Service)
				if ok1 && ok2 && s.needsUpdate(oldSvc, curSvc) {
					s.enqueueService(cur)
				}
			},
			DeleteFunc: s.enqueueService,
		},
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)
	if err := s.init(); err != nil {
		return nil, err
	}
	return s, nil
}
示例#15
0
// NewNamespaceController creates a new NamespaceController
func NewNamespaceController(
	kubeClient clientset.Interface,
	clientPool dynamic.ClientPool,
	groupVersionResources []unversioned.GroupVersionResource,
	resyncPeriod time.Duration,
	finalizerToken api.FinalizerName) *NamespaceController {
	// create the controller so we can inject the enqueue function
	namespaceController := &NamespaceController{
		kubeClient: kubeClient,
		clientPool: clientPool,
		queue:      workqueue.New(),
		groupVersionResources: groupVersionResources,
		opCache:               operationNotSupportedCache{},
		finalizerToken:        finalizerToken,
	}

	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}

	// configure the backing store/controller
	store, controller := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return kubeClient.Core().Namespaces().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return kubeClient.Core().Namespaces().Watch(options)
			},
		},
		&api.Namespace{},
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				namespace := obj.(*api.Namespace)
				namespaceController.enqueueNamespace(namespace)
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				namespace := newObj.(*api.Namespace)
				namespaceController.enqueueNamespace(namespace)
			},
		},
	)

	namespaceController.store = store
	namespaceController.controller = controller
	return namespaceController
}
示例#16
0
func (r *RegisteredRateLimiter) registerIfNotPresent(gv unversioned.GroupVersion, client *dynamic.Client, prefix string) {
	r.lock.RLock()
	_, ok := r.rateLimiters[gv]
	r.lock.RUnlock()
	if ok {
		return
	}
	r.lock.Lock()
	defer r.lock.Unlock()
	if _, ok := r.rateLimiters[gv]; !ok {
		if rateLimiter := client.GetRateLimiter(); rateLimiter != nil {
			group := strings.Replace(gv.Group, ".", ":", -1)
			metrics.RegisterMetricAndTrackRateLimiterUsage(fmt.Sprintf("%s_%s_%s", prefix, group, gv.Version), rateLimiter)
		}
		r.rateLimiters[gv] = struct{}{}
	}
}
// NewDeploymentController creates a new DeploymentController.
func NewDeploymentController(dInformer informers.DeploymentInformer, rsInformer informers.ReplicaSetInformer, podInformer informers.PodInformer, client clientset.Interface) *DeploymentController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	// TODO: remove the wrapper when every clients have moved to use the clientset.
	eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.Core().Events("")})

	if client != nil && client.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter())
	}
	dc := &DeploymentController{
		client:        client,
		eventRecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "deployment-controller"}),
		queue:         workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
		progressQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "progress-check"),
	}
	dc.rsControl = controller.RealRSControl{
		KubeClient: client,
		Recorder:   dc.eventRecorder,
	}

	dInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc:    dc.addDeployment,
		UpdateFunc: dc.updateDeployment,
		// This will enter the sync loop and no-op, because the deployment has been deleted from the store.
		DeleteFunc: dc.deleteDeployment,
	})
	rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc:    dc.addReplicaSet,
		UpdateFunc: dc.updateReplicaSet,
		DeleteFunc: dc.deleteReplicaSet,
	})
	podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		DeleteFunc: dc.deletePod,
	})

	dc.syncHandler = dc.syncDeployment
	dc.enqueueDeployment = dc.enqueue

	dc.dLister = dInformer.Lister()
	dc.rsLister = rsInformer.Lister()
	dc.podLister = podInformer.Lister()
	dc.dListerSynced = dInformer.Informer().HasSynced
	dc.rsListerSynced = dInformer.Informer().HasSynced
	dc.podListerSynced = dInformer.Informer().HasSynced
	return dc
}
示例#18
0
func NewJobController(podInformer cache.SharedIndexInformer, jobInformer informers.JobInformer, kubeClient clientset.Interface) *JobController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	// TODO: remove the wrapper when every clients have moved to use the clientset.
	eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})

	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.Core().RESTClient().GetRateLimiter())
	}

	jm := &JobController{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(v1.EventSource{Component: "job-controller"}),
		},
		expectations: controller.NewControllerExpectations(),
		queue:        workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "job"),
		recorder:     eventBroadcaster.NewRecorder(v1.EventSource{Component: "job-controller"}),
	}

	jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc: jm.enqueueController,
		UpdateFunc: func(old, cur interface{}) {
			if job := cur.(*batch.Job); !IsJobFinished(job) {
				jm.enqueueController(job)
			}
		},
		DeleteFunc: jm.enqueueController,
	})
	jm.jobLister = jobInformer.Lister()
	jm.jobStoreSynced = jobInformer.Informer().HasSynced

	podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc:    jm.addPod,
		UpdateFunc: jm.updatePod,
		DeleteFunc: jm.deletePod,
	})
	jm.podStore.Indexer = podInformer.GetIndexer()
	jm.podStoreSynced = podInformer.HasSynced

	jm.updateHandler = jm.updateJobStatus
	jm.syncHandler = jm.syncJob
	return jm
}
// NewReplicationManager configures a replication manager with the specified event recorder
func NewReplicationManager(podInformer, rcInformer cache.SharedIndexInformer, kubeClient clientset.Interface, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("replication_controller", kubeClient.Core().RESTClient().GetRateLimiter())
	}

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})

	rm := &ReplicationManager{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(v1.EventSource{Component: "replication-controller"}),
		},
		burstReplicas: burstReplicas,
		expectations:  controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
		queue:         workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "replicationmanager"),
		garbageCollectorEnabled: garbageCollectorEnabled,
	}

	rcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc:    rm.enqueueController,
		UpdateFunc: rm.updateRC,
		// This will enter the sync loop and no-op, because the controller has been deleted from the store.
		// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
		// way of achieving this is by performing a `stop` operation on the controller.
		DeleteFunc: rm.enqueueController,
	})
	podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc: rm.addPod,
		// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
		// the most frequent pod update is status, and the associated rc will only list from local storage, so
		// it should be ok.
		UpdateFunc: rm.updatePod,
		DeleteFunc: rm.deletePod,
	})

	rm.syncHandler = rm.syncReplicationController
	rm.rcLister.Indexer = rcInformer.GetIndexer()
	rm.podLister.Indexer = podInformer.GetIndexer()
	rm.podListerSynced = podInformer.HasSynced
	rm.lookupCache = controller.NewMatchingCache(lookupCacheSize)
	return rm
}
示例#20
0
func NewScheduledJobController(kubeClient *client.Client) *ScheduledJobController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	// TODO: remove the wrapper when every clients have moved to use the clientset.
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	if kubeClient != nil && kubeClient.GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("scheduledjob_controller", kubeClient.GetRateLimiter())
	}

	jm := &ScheduledJobController{
		kubeClient: kubeClient,
		jobControl: realJobControl{KubeClient: kubeClient},
		sjControl:  &realSJControl{KubeClient: kubeClient},
		recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduled-job-controller"}),
	}

	return jm
}
示例#21
0
// NewEndpointController returns a new *EndpointController.
func NewEndpointController(podInformer cache.SharedIndexInformer, client clientset.Interface) *EndpointController {
	if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter())
	}
	e := &EndpointController{
		client: client,
		queue:  workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "endpoint"),
	}

	e.serviceStore.Indexer, e.serviceController = cache.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return e.client.Core().Services(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.Core().Services(api.NamespaceAll).Watch(options)
			},
		},
		&api.Service{},
		// TODO: Can we have much longer period here?
		FullServiceResyncPeriod,
		cache.ResourceEventHandlerFuncs{
			AddFunc: e.enqueueService,
			UpdateFunc: func(old, cur interface{}) {
				e.enqueueService(cur)
			},
			DeleteFunc: e.enqueueService,
		},
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)

	podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc:    e.addPod,
		UpdateFunc: e.updatePod,
		DeleteFunc: e.deletePod,
	})
	e.podStore.Indexer = podInformer.GetIndexer()
	e.podController = podInformer.GetController()
	e.podStoreSynced = podInformer.HasSynced

	return e
}
示例#22
0
func NewCronJobController(kubeClient clientset.Interface) *CronJobController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	// TODO: remove the wrapper when every clients have moved to use the clientset.
	eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})

	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.Core().RESTClient().GetRateLimiter())
	}

	jm := &CronJobController{
		kubeClient: kubeClient,
		jobControl: realJobControl{KubeClient: kubeClient},
		sjControl:  &realSJControl{KubeClient: kubeClient},
		podControl: &realPodControl{KubeClient: kubeClient},
		recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "cronjob-controller"}),
	}

	return jm
}
示例#23
0
// New returns a new service controller to keep cloud provider service resources
// (like load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) *ServiceController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})

	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}

	return &ServiceController{
		cloud:            cloud,
		kubeClient:       kubeClient,
		clusterName:      clusterName,
		cache:            &serviceCache{serviceMap: make(map[string]*cachedService)},
		eventBroadcaster: broadcaster,
		eventRecorder:    recorder,
		nodeLister: cache.StoreToNodeLister{
			Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
		},
	}
}
示例#24
0
// NewNodeController returns a new node controller to sync instances from cloudprovider.
// This method returns an error if it is unable to initialize the CIDR bitmap with
// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes
// currently, this should be handled as a fatal error.
func NewNodeController(
	podInformer informers.PodInformer,
	nodeInformer informers.NodeInformer,
	daemonSetInformer informers.DaemonSetInformer,
	cloud cloudprovider.Interface,
	kubeClient clientset.Interface,
	podEvictionTimeout time.Duration,
	evictionLimiterQPS float32,
	secondaryEvictionLimiterQPS float32,
	largeClusterThreshold int32,
	unhealthyZoneThreshold float32,
	nodeMonitorGracePeriod time.Duration,
	nodeStartupGracePeriod time.Duration,
	nodeMonitorPeriod time.Duration,
	clusterCIDR *net.IPNet,
	serviceCIDR *net.IPNet,
	nodeCIDRMaskSize int,
	allocateNodeCIDRs bool) (*NodeController, error) {
	eventBroadcaster := record.NewBroadcaster()
	recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "controllermanager"})
	eventBroadcaster.StartLogging(glog.Infof)
	if kubeClient != nil {
		glog.V(0).Infof("Sending events to api server.")
		eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
	} else {
		glog.V(0).Infof("No api server defined - no events will be sent to API server.")
	}

	if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.Core().RESTClient().GetRateLimiter())
	}

	if allocateNodeCIDRs {
		if clusterCIDR == nil {
			glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
		}
		mask := clusterCIDR.Mask
		if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
			glog.Fatal("NodeController: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
		}
	}

	nc := &NodeController{
		cloud:                       cloud,
		knownNodeSet:                make(map[string]*v1.Node),
		kubeClient:                  kubeClient,
		recorder:                    recorder,
		podEvictionTimeout:          podEvictionTimeout,
		maximumGracePeriod:          5 * time.Minute,
		zonePodEvictor:              make(map[string]*RateLimitedTimedQueue),
		nodeStatusMap:               make(map[string]nodeStatusData),
		nodeMonitorGracePeriod:      nodeMonitorGracePeriod,
		nodeMonitorPeriod:           nodeMonitorPeriod,
		nodeStartupGracePeriod:      nodeStartupGracePeriod,
		lookupIP:                    net.LookupIP,
		now:                         metav1.Now,
		clusterCIDR:                 clusterCIDR,
		serviceCIDR:                 serviceCIDR,
		allocateNodeCIDRs:           allocateNodeCIDRs,
		forcefullyDeletePod:         func(p *v1.Pod) error { return forcefullyDeletePod(kubeClient, p) },
		nodeExistsInCloudProvider:   func(nodeName types.NodeName) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
		evictionLimiterQPS:          evictionLimiterQPS,
		secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS,
		largeClusterThreshold:       largeClusterThreshold,
		unhealthyZoneThreshold:      unhealthyZoneThreshold,
		zoneStates:                  make(map[string]zoneState),
		podInformer:                 podInformer,
		nodeInformer:                nodeInformer,
		daemonSetInformer:           daemonSetInformer,
	}
	nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc
	nc.enterFullDisruptionFunc = nc.HealthyQPSFunc
	nc.computeZoneStateFunc = nc.ComputeZoneState

	podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc:    nc.maybeDeleteTerminatingPod,
		UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
	})
	nc.podStore = *podInformer.Lister()

	nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
	if nc.allocateNodeCIDRs {
		var nodeList *v1.NodeList
		var err error
		// We must poll because apiserver might not be up. This error causes
		// controller manager to restart.
		if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) {
			nodeList, err = kubeClient.Core().Nodes().List(v1.ListOptions{
				FieldSelector: fields.Everything().String(),
				LabelSelector: labels.Everything().String(),
			})
			if err != nil {
				glog.Errorf("Failed to list all nodes: %v", err)
				return false, nil
			}
			return true, nil
		}); pollErr != nil {
			return nil, fmt.Errorf("Failed to list all nodes in %v, cannot proceed without updating CIDR map", apiserverStartupGracePeriod)
		}
		nc.cidrAllocator, err = NewCIDRRangeAllocator(kubeClient, clusterCIDR, serviceCIDR, nodeCIDRMaskSize, nodeList)
		if err != nil {
			return nil, err
		}

		nodeEventHandlerFuncs = cache.ResourceEventHandlerFuncs{
			AddFunc: func(originalObj interface{}) {
				obj, err := api.Scheme.DeepCopy(originalObj)
				if err != nil {
					utilruntime.HandleError(err)
					return
				}
				node := obj.(*v1.Node)

				if err := nc.cidrAllocator.AllocateOrOccupyCIDR(node); err != nil {
					utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
				}
			},
			UpdateFunc: func(_, obj interface{}) {
				node := obj.(*v1.Node)
				// If the PodCIDR is not empty we either:
				// - already processed a Node that already had a CIDR after NC restarted
				//   (cidr is marked as used),
				// - already processed a Node successfully and allocated a CIDR for it
				//   (cidr is marked as used),
				// - already processed a Node but we did saw a "timeout" response and
				//   request eventually got through in this case we haven't released
				//   the allocated CIDR (cidr is still marked as used).
				// There's a possible error here:
				// - NC sees a new Node and assigns a CIDR X to it,
				// - Update Node call fails with a timeout,
				// - Node is updated by some other component, NC sees an update and
				//   assigns CIDR Y to the Node,
				// - Both CIDR X and CIDR Y are marked as used in the local cache,
				//   even though Node sees only CIDR Y
				// The problem here is that in in-memory cache we see CIDR X as marked,
				// which prevents it from being assigned to any new node. The cluster
				// state is correct.
				// Restart of NC fixes the issue.
				if node.Spec.PodCIDR == "" {
					nodeCopy, err := api.Scheme.Copy(node)
					if err != nil {
						utilruntime.HandleError(err)
						return
					}

					if err := nc.cidrAllocator.AllocateOrOccupyCIDR(nodeCopy.(*v1.Node)); err != nil {
						utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
					}
				}
			},
			DeleteFunc: func(originalObj interface{}) {
				obj, err := api.Scheme.DeepCopy(originalObj)
				if err != nil {
					utilruntime.HandleError(err)
					return
				}

				node, isNode := obj.(*v1.Node)
				// We can get DeletedFinalStateUnknown instead of *v1.Node here and we need to handle that correctly. #34692
				if !isNode {
					deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
					if !ok {
						glog.Errorf("Received unexpected object: %v", obj)
						return
					}
					node, ok = deletedState.Obj.(*v1.Node)
					if !ok {
						glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
						return
					}
				}
				if err := nc.cidrAllocator.ReleaseCIDR(node); err != nil {
					glog.Errorf("Error releasing CIDR: %v", err)
				}
			},
		}
	}

	nodeInformer.Informer().AddEventHandler(nodeEventHandlerFuncs)
	nc.nodeStore = *nodeInformer.Lister()

	nc.daemonSetStore = *daemonSetInformer.Lister()

	return nc, nil
}
func (r *replenishmentControllerFactory) NewController(options *ReplenishmentControllerOptions) (result cache.ControllerInterface, err error) {
	if r.kubeClient != nil && r.kubeClient.Core().RESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("replenishment_controller", r.kubeClient.Core().RESTClient().GetRateLimiter())
	}

	switch options.GroupKind {
	case api.Kind("Pod"):
		if r.sharedInformerFactory != nil {
			result, err = controllerFor(api.Resource("pods"), r.sharedInformerFactory, cache.ResourceEventHandlerFuncs{
				UpdateFunc: PodReplenishmentUpdateFunc(options),
				DeleteFunc: ObjectReplenishmentDeleteFunc(options),
			})
			break
		}
		result = informers.NewPodInformer(r.kubeClient, options.ResyncPeriod())
	case api.Kind("Service"):
		// TODO move to informer when defined
		_, result = cache.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
					return r.kubeClient.Core().Services(v1.NamespaceAll).List(options)
				},
				WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
					return r.kubeClient.Core().Services(v1.NamespaceAll).Watch(options)
				},
			},
			&v1.Service{},
			options.ResyncPeriod(),
			cache.ResourceEventHandlerFuncs{
				UpdateFunc: ServiceReplenishmentUpdateFunc(options),
				DeleteFunc: ObjectReplenishmentDeleteFunc(options),
			},
		)
	case api.Kind("ReplicationController"):
		// TODO move to informer when defined
		_, result = cache.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
					return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
				},
				WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
					return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
				},
			},
			&v1.ReplicationController{},
			options.ResyncPeriod(),
			cache.ResourceEventHandlerFuncs{
				DeleteFunc: ObjectReplenishmentDeleteFunc(options),
			},
		)
	case api.Kind("PersistentVolumeClaim"):
		if r.sharedInformerFactory != nil {
			result, err = controllerFor(api.Resource("persistentvolumeclaims"), r.sharedInformerFactory, cache.ResourceEventHandlerFuncs{
				DeleteFunc: ObjectReplenishmentDeleteFunc(options),
			})
			break
		}
		// TODO (derekwaynecarr) remove me when we can require a sharedInformerFactory in all code paths...
		_, result = cache.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
					return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options)
				},
				WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
					return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options)
				},
			},
			&v1.PersistentVolumeClaim{},
			options.ResyncPeriod(),
			cache.ResourceEventHandlerFuncs{
				DeleteFunc: ObjectReplenishmentDeleteFunc(options),
			},
		)
	case api.Kind("Secret"):
		// TODO move to informer when defined
		_, result = cache.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
					return r.kubeClient.Core().Secrets(v1.NamespaceAll).List(options)
				},
				WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
					return r.kubeClient.Core().Secrets(v1.NamespaceAll).Watch(options)
				},
			},
			&v1.Secret{},
			options.ResyncPeriod(),
			cache.ResourceEventHandlerFuncs{
				DeleteFunc: ObjectReplenishmentDeleteFunc(options),
			},
		)
	case api.Kind("ConfigMap"):
		// TODO move to informer when defined
		_, result = cache.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
					return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).List(options)
				},
				WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
					return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).Watch(options)
				},
			},
			&v1.ConfigMap{},
			options.ResyncPeriod(),
			cache.ResourceEventHandlerFuncs{
				DeleteFunc: ObjectReplenishmentDeleteFunc(options),
			},
		)
	default:
		return nil, NewUnhandledGroupKindError(options.GroupKind)
	}
	return result, err
}
示例#26
0
func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, lookupCacheSize int) *DaemonSetsController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	// TODO: remove the wrapper when every clients have moved to use the clientset.
	eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})

	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}
	dsc := &DaemonSetsController{
		kubeClient:    kubeClient,
		eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemonset-controller"}),
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "daemon-set"}),
		},
		burstReplicas: BurstReplicas,
		expectations:  controller.NewControllerExpectations(),
		queue:         workqueue.New(),
	}
	// Manage addition/update of daemon sets.
	dsc.dsStore.Store, dsc.dsController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return dsc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).Watch(options)
			},
		},
		&extensions.DaemonSet{},
		// TODO: Can we have much longer period here?
		FullDaemonSetResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				ds := obj.(*extensions.DaemonSet)
				glog.V(4).Infof("Adding daemon set %s", ds.Name)
				dsc.enqueueDaemonSet(ds)
			},
			UpdateFunc: func(old, cur interface{}) {
				oldDS := old.(*extensions.DaemonSet)
				curDS := cur.(*extensions.DaemonSet)
				// We should invalidate the whole lookup cache if a DS's selector has been updated.
				//
				// Imagine that you have two RSs:
				// * old DS1
				// * new DS2
				// You also have a pod that is attached to DS2 (because it doesn't match DS1 selector).
				// Now imagine that you are changing DS1 selector so that it is now matching that pod,
				// in such case we must invalidate the whole cache so that pod could be adopted by DS1
				//
				// This makes the lookup cache less helpful, but selector update does not happen often,
				// so it's not a big problem
				if !reflect.DeepEqual(oldDS.Spec.Selector, curDS.Spec.Selector) {
					dsc.lookupCache.InvalidateAll()
				}

				glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
				dsc.enqueueDaemonSet(curDS)
			},
			DeleteFunc: dsc.deleteDaemonset,
		},
	)

	// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
	// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		AddFunc:    dsc.addPod,
		UpdateFunc: dsc.updatePod,
		DeleteFunc: dsc.deletePod,
	})
	dsc.podStore.Indexer = podInformer.GetIndexer()
	dsc.podController = podInformer.GetController()
	dsc.podStoreSynced = podInformer.HasSynced

	// Watch for new nodes or updates to nodes - daemon pods are launched on new nodes, and possibly when labels on nodes change,
	dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return dsc.kubeClient.Core().Nodes().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return dsc.kubeClient.Core().Nodes().Watch(options)
			},
		},
		&api.Node{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{
			AddFunc:    dsc.addNode,
			UpdateFunc: dsc.updateNode,
		},
	)
	dsc.syncHandler = dsc.syncDaemonSet
	dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
	return dsc
}
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *ResourceQuotaController {
	// build the resource quota controller
	rq := &ResourceQuotaController{
		kubeClient:               options.KubeClient,
		queue:                    workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
		resyncPeriod:             options.ResyncPeriod,
		registry:                 options.Registry,
		replenishmentControllers: []framework.ControllerInterface{},
	}
	if options.KubeClient != nil && options.KubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", options.KubeClient.Core().GetRESTClient().GetRateLimiter())
	}
	// set the synchronization handler
	rq.syncHandler = rq.syncResourceQuotaFromKey

	// build the controller that observes quota
	rq.rqIndexer, rq.rqController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).Watch(options)
			},
		},
		&api.ResourceQuota{},
		rq.resyncPeriod(),
		framework.ResourceEventHandlerFuncs{
			AddFunc: rq.enqueueResourceQuota,
			UpdateFunc: func(old, cur interface{}) {
				// We are only interested in observing updates to quota.spec to drive updates to quota.status.
				// We ignore all updates to quota.Status because they are all driven by this controller.
				// IMPORTANT:
				// We do not use this function to queue up a full quota recalculation.  To do so, would require
				// us to enqueue all quota.Status updates, and since quota.Status updates involve additional queries
				// that cannot be backed by a cache and result in a full query of a namespace's content, we do not
				// want to pay the price on spurious status updates.  As a result, we have a separate routine that is
				// responsible for enqueue of all resource quotas when doing a full resync (enqueueAll)
				oldResourceQuota := old.(*api.ResourceQuota)
				curResourceQuota := cur.(*api.ResourceQuota)
				if quota.Equals(curResourceQuota.Spec.Hard, oldResourceQuota.Spec.Hard) {
					return
				}
				rq.enqueueResourceQuota(curResourceQuota)
			},
			// This will enter the sync loop and no-op, because the controller has been deleted from the store.
			// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
			// way of achieving this is by performing a `stop` operation on the controller.
			DeleteFunc: rq.enqueueResourceQuota,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	for _, groupKindToReplenish := range options.GroupKindsToReplenish {
		controllerOptions := &ReplenishmentControllerOptions{
			GroupKind:         groupKindToReplenish,
			ResyncPeriod:      options.ReplenishmentResyncPeriod,
			ReplenishmentFunc: rq.replenishQuota,
		}
		replenishmentController, err := options.ControllerFactory.NewController(controllerOptions)
		if err != nil {
			glog.Warningf("quota controller unable to replenish %s due to %v, changes only accounted during full resync", groupKindToReplenish, err)
		} else {
			rq.replenishmentControllers = append(rq.replenishmentControllers, replenishmentController)
		}
	}
	return rq
}
示例#28
0
// NewTokensController returns a new *TokensController.
func NewTokensController(cl clientset.Interface, options TokensControllerOptions) *TokensController {
	maxRetries := options.MaxRetries
	if maxRetries == 0 {
		maxRetries = 10
	}

	e := &TokensController{
		client: cl,
		token:  options.TokenGenerator,
		rootCA: options.RootCA,

		syncServiceAccountQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount_tokens_service"),
		syncSecretQueue:         workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount_tokens_secret"),

		maxRetries: maxRetries,
	}
	if cl != nil && cl.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.Core().GetRESTClient().GetRateLimiter())
	}

	e.serviceAccounts, e.serviceAccountController = cache.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
			},
		},
		&api.ServiceAccount{},
		options.ServiceAccountResync,
		cache.ResourceEventHandlerFuncs{
			AddFunc:    e.queueServiceAccountSync,
			UpdateFunc: e.queueServiceAccountUpdateSync,
			DeleteFunc: e.queueServiceAccountSync,
		},
	)

	tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
	e.secrets, e.secretController = cache.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.FieldSelector = tokenSelector
				return e.client.Core().Secrets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.FieldSelector = tokenSelector
				return e.client.Core().Secrets(api.NamespaceAll).Watch(options)
			},
		},
		&api.Secret{},
		options.SecretResync,
		cache.ResourceEventHandlerFuncs{
			AddFunc:    e.queueSecretSync,
			UpdateFunc: e.queueSecretUpdateSync,
			DeleteFunc: e.queueSecretSync,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	return e
}
示例#29
0
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
	cloud cloudprovider.Interface,
	kubeClient clientset.Interface,
	podEvictionTimeout time.Duration,
	deletionEvictionLimiter flowcontrol.RateLimiter,
	terminationEvictionLimiter flowcontrol.RateLimiter,
	nodeMonitorGracePeriod time.Duration,
	nodeStartupGracePeriod time.Duration,
	nodeMonitorPeriod time.Duration,
	clusterCIDR *net.IPNet,
	serviceCIDR *net.IPNet,
	nodeCIDRMaskSize int,
	allocateNodeCIDRs bool) *NodeController {
	eventBroadcaster := record.NewBroadcaster()
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
	eventBroadcaster.StartLogging(glog.Infof)
	if kubeClient != nil {
		glog.V(0).Infof("Sending events to api server.")
		eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
	} else {
		glog.V(0).Infof("No api server defined - no events will be sent to API server.")
	}

	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("node_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}

	if allocateNodeCIDRs {
		if clusterCIDR == nil {
			glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
		}
		mask := clusterCIDR.Mask
		if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
			glog.Fatal("NodeController: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
		}
	}
	evictorLock := sync.Mutex{}

	nc := &NodeController{
		cloud:                     cloud,
		knownNodeSet:              make(sets.String),
		kubeClient:                kubeClient,
		recorder:                  recorder,
		podEvictionTimeout:        podEvictionTimeout,
		maximumGracePeriod:        5 * time.Minute,
		evictorLock:               &evictorLock,
		podEvictor:                NewRateLimitedTimedQueue(deletionEvictionLimiter),
		terminationEvictor:        NewRateLimitedTimedQueue(terminationEvictionLimiter),
		nodeStatusMap:             make(map[string]nodeStatusData),
		nodeMonitorGracePeriod:    nodeMonitorGracePeriod,
		nodeMonitorPeriod:         nodeMonitorPeriod,
		nodeStartupGracePeriod:    nodeStartupGracePeriod,
		lookupIP:                  net.LookupIP,
		now:                       unversioned.Now,
		clusterCIDR:               clusterCIDR,
		serviceCIDR:               serviceCIDR,
		allocateNodeCIDRs:         allocateNodeCIDRs,
		forcefullyDeletePod:       func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) },
		nodeExistsInCloudProvider: func(nodeName string) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
		nodeCIDRUpdateChannel:     make(chan nodeAndCIDR, cidrUpdateQueueSize),
	}

	nc.podStore.Indexer, nc.podController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return nc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return nc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
			},
		},
		&api.Pod{},
		controller.NoResyncPeriodFunc(),
		framework.ResourceEventHandlerFuncs{
			AddFunc:    nc.maybeDeleteTerminatingPod,
			UpdateFunc: func(_, obj interface{}) { nc.maybeDeleteTerminatingPod(obj) },
		},
		// We don't need to build a index for podStore here actually, but build one for consistency.
		// It will ensure that if people start making use of the podStore in more specific ways,
		// they'll get the benefits they expect. It will also reserve the name for future refactorings.
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)

	nodeEventHandlerFuncs := framework.ResourceEventHandlerFuncs{}
	if nc.allocateNodeCIDRs {
		nodeEventHandlerFuncs = framework.ResourceEventHandlerFuncs{
			AddFunc:    nc.allocateOrOccupyCIDR,
			DeleteFunc: nc.recycleCIDR,
		}
	}

	nc.nodeStore.Store, nc.nodeController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return nc.kubeClient.Core().Nodes().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return nc.kubeClient.Core().Nodes().Watch(options)
			},
		},
		&api.Node{},
		controller.NoResyncPeriodFunc(),
		nodeEventHandlerFuncs,
	)

	nc.daemonSetStore.Store, nc.daemonSetController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return nc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return nc.kubeClient.Extensions().DaemonSets(api.NamespaceAll).Watch(options)
			},
		},
		&extensions.DaemonSet{},
		controller.NoResyncPeriodFunc(),
		framework.ResourceEventHandlerFuncs{},
	)

	if allocateNodeCIDRs {
		nc.cidrAllocator = NewCIDRRangeAllocator(clusterCIDR, nodeCIDRMaskSize)
	}

	return nc
}
示例#30
0
// NewReplicaSetController creates a new ReplicaSetController.
func NewReplicaSetController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})

	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("replicaset_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}

	rsc := &ReplicaSetController{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "replicaset-controller"}),
		},
		burstReplicas: burstReplicas,
		expectations:  controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
		queue:         workqueue.New(),
	}

	rsc.rsStore.Store, rsc.rsController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return rsc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).Watch(options)
			},
		},
		&extensions.ReplicaSet{},
		// TODO: Can we have much longer period here?
		FullControllerResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: rsc.enqueueReplicaSet,
			UpdateFunc: func(old, cur interface{}) {
				oldRS := old.(*extensions.ReplicaSet)
				curRS := cur.(*extensions.ReplicaSet)

				// We should invalidate the whole lookup cache if a RS's selector has been updated.
				//
				// Imagine that you have two RSs:
				// * old RS1
				// * new RS2
				// You also have a pod that is attached to RS2 (because it doesn't match RS1 selector).
				// Now imagine that you are changing RS1 selector so that it is now matching that pod,
				// in such case we must invalidate the whole cache so that pod could be adopted by RS1
				//
				// This makes the lookup cache less helpful, but selector update does not happen often,
				// so it's not a big problem
				if !reflect.DeepEqual(oldRS.Spec.Selector, curRS.Spec.Selector) {
					rsc.lookupCache.InvalidateAll()
				}

				// You might imagine that we only really need to enqueue the
				// replica set when Spec changes, but it is safer to sync any
				// time this function is triggered. That way a full informer
				// resync can requeue any replica set that don't yet have pods
				// but whose last attempts at creating a pod have failed (since
				// we don't block on creation of pods) instead of those
				// replica sets stalling indefinitely. Enqueueing every time
				// does result in some spurious syncs (like when Status.Replica
				// is updated and the watch notification from it retriggers
				// this function), but in general extra resyncs shouldn't be
				// that bad as ReplicaSets that haven't met expectations yet won't
				// sync, and all the listing is done using local stores.
				if oldRS.Status.Replicas != curRS.Status.Replicas {
					glog.V(4).Infof("Observed updated replica count for ReplicaSet: %v, %d->%d", curRS.Name, oldRS.Status.Replicas, curRS.Status.Replicas)
				}
				rsc.enqueueReplicaSet(cur)
			},
			// This will enter the sync loop and no-op, because the replica set has been deleted from the store.
			// Note that deleting a replica set immediately after scaling it to 0 will not work. The recommended
			// way of achieving this is by performing a `stop` operation on the replica set.
			DeleteFunc: rsc.enqueueReplicaSet,
		},
	)

	rsc.podStore.Indexer, rsc.podController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return rsc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return rsc.kubeClient.Core().Pods(api.NamespaceAll).Watch(options)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{
			AddFunc: rsc.addPod,
			// This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
			// overkill the most frequent pod update is status, and the associated ReplicaSet will only list from
			// local storage, so it should be ok.
			UpdateFunc: rsc.updatePod,
			DeleteFunc: rsc.deletePod,
		},
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)

	rsc.syncHandler = rsc.syncReplicaSet
	rsc.podStoreSynced = rsc.podController.HasSynced
	rsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
	return rsc
}