示例#1
1
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	gcc := &GCController{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "pod-garbage-collector"}),
			KubeClient: kubeClient,
		},
		threshold: threshold,
	}

	terminatedSelector := compileTerminatedPodSelector()

	gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return gcc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), terminatedSelector)
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return gcc.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), terminatedSelector, rv)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{},
	)
	return gcc
}
// NewDockercfgController returns a new *DockercfgController.
func NewDockercfgController(cl client.Interface, options DockercfgControllerOptions) *DockercfgController {
	e := &DockercfgController{
		client:               cl,
		queue:                workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
		dockerURLsIntialized: options.DockerURLsIntialized,
	}

	var serviceAccountCache cache.Store
	serviceAccountCache, e.serviceAccountController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).Watch(options)
			},
		},
		&api.ServiceAccount{},
		options.Resync,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				serviceAccount := obj.(*api.ServiceAccount)
				glog.V(5).Infof("Adding service account %s", serviceAccount.Name)
				e.enqueueServiceAccount(serviceAccount)
			},
			UpdateFunc: func(old, cur interface{}) {
				serviceAccount := cur.(*api.ServiceAccount)
				glog.V(5).Infof("Updating service account %s", serviceAccount.Name)
				// Resync on service object relist.
				e.enqueueServiceAccount(serviceAccount)
			},
		},
	)
	e.serviceAccountCache = NewEtcdMutationCache(serviceAccountCache)

	tokenSecretSelector := fields.OneTermEqualSelector(api.SecretTypeField, string(api.SecretTypeServiceAccountToken))
	e.secretCache, e.secretController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.FieldSelector = tokenSecretSelector
				return e.client.Secrets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.FieldSelector = tokenSecretSelector
				return e.client.Secrets(api.NamespaceAll).Watch(options)
			},
		},
		&api.Secret{},
		options.Resync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    func(cur interface{}) { e.handleTokenSecretUpdate(nil, cur) },
			UpdateFunc: func(old, cur interface{}) { e.handleTokenSecretUpdate(old, cur) },
			DeleteFunc: e.handleTokenSecretDelete,
		},
	)

	e.syncHandler = e.syncServiceAccount

	return e
}
示例#3
0
func NewJobController(kubeClient client.Interface) *JobController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	jm := &JobController{
		kubeClient: kubeClient,
		podControl: controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "job"}),
		},
		expectations: controller.NewControllerExpectations(),
		queue:        workqueue.New(),
	}

	jm.jobStore.Store, jm.jobController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&experimental.Job{},
		replicationcontroller.FullControllerResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: jm.enqueueController,
			UpdateFunc: func(old, cur interface{}) {
				if job := cur.(*experimental.Job); !isJobFinished(job) {
					jm.enqueueController(job)
				}
			},
			DeleteFunc: jm.enqueueController,
		},
	)

	jm.podStore.Store, jm.podController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return jm.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return jm.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.Pod{},
		replicationcontroller.PodRelistPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    jm.addPod,
			UpdateFunc: jm.updatePod,
			DeleteFunc: jm.deletePod,
		},
	)

	jm.updateHandler = jm.updateJobStatus
	jm.syncHandler = jm.syncJob
	jm.podStoreSynced = jm.podController.HasSynced
	return jm
}
示例#4
0
// newIPVSController creates a new controller from the given config.
func newIPVSController(kubeClient *unversioned.Client, namespace string, useUnicast bool, password string) *ipvsControllerController {
	ipvsc := ipvsControllerController{
		client:            kubeClient,
		queue:             workqueue.New(),
		reloadRateLimiter: util.NewTokenBucketRateLimiter(reloadQPS, int(reloadQPS)),
		reloadLock:        &sync.Mutex{},
	}

	clusterNodes := getClusterNodesIP(kubeClient)

	nodeInfo, err := getNodeInfo(clusterNodes)
	if err != nil {
		glog.Fatalf("Error getting local IP from nodes in the cluster: %v", err)
	}

	neighbors := getNodeNeighbors(nodeInfo, clusterNodes)

	ipvsc.keepalived = &keepalived{
		iface:      nodeInfo.iface,
		ip:         nodeInfo.ip,
		netmask:    nodeInfo.netmask,
		nodes:      clusterNodes,
		neighbors:  neighbors,
		priority:   getNodePriority(nodeInfo.ip, clusterNodes),
		useUnicast: useUnicast,
		password:   password,
	}

	enqueue := func(obj interface{}) {
		key, err := keyFunc(obj)
		if err != nil {
			glog.Infof("Couldn't get key for object %+v: %v", obj, err)
			return
		}

		ipvsc.queue.Add(key)
	}

	eventHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc:    enqueue,
		DeleteFunc: enqueue,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				enqueue(cur)
			}
		},
	}

	ipvsc.svcLister.Store, ipvsc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			ipvsc.client, "services", namespace, fields.Everything()),
		&api.Service{}, resyncPeriod, eventHandlers)

	ipvsc.epLister.Store, ipvsc.epController = framework.NewInformer(
		cache.NewListWatchFromClient(
			ipvsc.client, "endpoints", namespace, fields.Everything()),
		&api.Endpoints{}, resyncPeriod, eventHandlers)

	return &ipvsc
}
示例#5
0
// newLoadBalancerController creates a new controller from the given config.
func newLoadBalancerController(cfg *loadBalancerConfig, kubeClient *unversioned.Client, namespace string) *loadBalancerController {

	lbc := loadBalancerController{
		cfg:    cfg,
		client: kubeClient,
		queue:  workqueue.New(),
		reloadRateLimiter: util.NewTokenBucketRateLimiter(
			reloadQPS, int(reloadQPS)),
		targetService:   *targetService,
		forwardServices: *forwardServices,
		httpPort:        *httpPort,
		tcpServices:     map[string]int{},
	}

	for _, service := range strings.Split(*tcpServices, ",") {
		portSplit := strings.Split(service, ":")
		if len(portSplit) != 2 {
			glog.Errorf("Ignoring misconfigured TCP service %v", service)
			continue
		}
		if port, err := strconv.Atoi(portSplit[1]); err != nil {
			glog.Errorf("Ignoring misconfigured TCP service %v: %v", service, err)
			continue
		} else {
			lbc.tcpServices[portSplit[0]] = port
		}
	}
	enqueue := func(obj interface{}) {
		key, err := keyFunc(obj)
		if err != nil {
			glog.Infof("Couldn't get key for object %+v: %v", obj, err)
			return
		}
		lbc.queue.Add(key)
	}
	eventHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc:    enqueue,
		DeleteFunc: enqueue,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				enqueue(cur)
			}
		},
	}

	lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "services", namespace, fields.Everything()),
		&api.Service{}, resyncPeriod, eventHandlers)

	lbc.epLister.Store, lbc.epController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "endpoints", namespace, fields.Everything()),
		&api.Endpoints{}, resyncPeriod, eventHandlers)

	return &lbc
}
// NewPersistentVolumeProvisionerController creates a new PersistentVolumeProvisionerController
func NewPersistentVolumeProvisionerController(client controllerClient, syncPeriod time.Duration, clusterName string, plugins []volume.VolumePlugin, provisioner volume.ProvisionableVolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeProvisionerController, error) {
	controller := &PersistentVolumeProvisionerController{
		client:      client,
		cloud:       cloud,
		provisioner: provisioner,
		clusterName: clusterName,
	}

	if err := controller.pluginMgr.InitPlugins(plugins, controller); err != nil {
		return nil, fmt.Errorf("Could not initialize volume plugins for PersistentVolumeProvisionerController: %+v", err)
	}

	glog.V(5).Infof("Initializing provisioner: %s", controller.provisioner.Name())
	controller.provisioner.Init(controller)

	controller.volumeStore, controller.volumeController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return client.ListPersistentVolumes(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return client.WatchPersistentVolumes(options)
			},
		},
		&api.PersistentVolume{},
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    controller.handleAddVolume,
			UpdateFunc: controller.handleUpdateVolume,
			// delete handler not needed in this controller.
			// volume deletion is handled by the recycler controller
		},
	)
	controller.claimStore, controller.claimController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return client.ListPersistentVolumeClaims(api.NamespaceAll, options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return client.WatchPersistentVolumeClaims(api.NamespaceAll, options)
			},
		},
		&api.PersistentVolumeClaim{},
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    controller.handleAddClaim,
			UpdateFunc: controller.handleUpdateClaim,
			// delete handler not needed.
			// normal recycling applies when a claim is deleted.
			// recycling is handled by the binding controller.
		},
	)

	return controller, nil
}
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
	volumeIndex := NewPersistentVolumeOrderedIndex()
	binderClient := NewBinderClient(kubeClient)
	binder := &PersistentVolumeClaimBinder{
		volumeIndex: volumeIndex,
		client:      binderClient,
	}

	_, volumeController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.PersistentVolumes().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				options := api.ListOptions{ResourceVersion: resourceVersion}
				return kubeClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), options)
			},
		},
		&api.PersistentVolume{},
		// TODO: Can we have much longer period here?
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addVolume,
			UpdateFunc: binder.updateVolume,
			DeleteFunc: binder.deleteVolume,
		},
	)
	_, claimController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.PersistentVolumeClaims(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				options := api.ListOptions{ResourceVersion: resourceVersion}
				return kubeClient.PersistentVolumeClaims(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), options)
			},
		},
		&api.PersistentVolumeClaim{},
		// TODO: Can we have much longer period here?
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addClaim,
			UpdateFunc: binder.updateClaim,
			// no DeleteFunc needed.  a claim requires no clean-up.
			// syncVolume handles the missing claim
		},
	)

	binder.claimController = claimController
	binder.volumeController = volumeController

	return binder
}
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
func NewPersistentVolumeClaimBinder(kubeClient clientset.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("pv_claim_binder_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}
	volumeIndex := NewPersistentVolumeOrderedIndex()
	binderClient := NewBinderClient(kubeClient)
	binder := &PersistentVolumeClaimBinder{
		volumeIndex: volumeIndex,
		client:      binderClient,
	}

	_, volumeController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return kubeClient.Core().PersistentVolumes().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return kubeClient.Core().PersistentVolumes().Watch(options)
			},
		},
		&api.PersistentVolume{},
		// TODO: Can we have much longer period here?
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addVolume,
			UpdateFunc: binder.updateVolume,
			DeleteFunc: binder.deleteVolume,
		},
	)
	_, claimController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
			},
		},
		&api.PersistentVolumeClaim{},
		// TODO: Can we have much longer period here?
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addClaim,
			UpdateFunc: binder.updateClaim,
			DeleteFunc: binder.deleteClaim,
		},
	)

	binder.claimController = claimController
	binder.volumeController = volumeController

	return binder
}
示例#9
0
// newLoadBalancerController creates a controller for nginx loadbalancer
func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration, defaultSvc,
	namespace, nxgConfigMapName, tcpConfigMapName, udpConfigMapName string, lbRuntimeInfo *lbInfo) (*loadBalancerController, error) {
	lbc := loadBalancerController{
		client:       kubeClient,
		stopCh:       make(chan struct{}),
		lbInfo:       lbRuntimeInfo,
		nginx:        nginx.NewManager(kubeClient),
		nxgConfigMap: nxgConfigMapName,
		tcpConfigMap: tcpConfigMapName,
		udpConfigMap: udpConfigMapName,
		defaultSvc:   defaultSvc,
	}

	lbc.syncQueue = NewTaskQueue(lbc.sync)

	eventHandler := framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			lbc.syncQueue.enqueue(obj)
		},
		DeleteFunc: func(obj interface{}) {
			lbc.syncQueue.enqueue(obj)
		},
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				lbc.syncQueue.enqueue(cur)
			}
		},
	}

	lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  ingressListFunc(lbc.client, namespace),
			WatchFunc: ingressWatchFunc(lbc.client, namespace),
		},
		&extensions.Ingress{}, resyncPeriod, eventHandler)

	lbc.endpLister.Store, lbc.endpController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  endpointsListFunc(lbc.client, namespace),
			WatchFunc: endpointsWatchFunc(lbc.client, namespace),
		},
		&api.Endpoints{}, resyncPeriod, eventHandler)

	lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  serviceListFunc(lbc.client, namespace),
			WatchFunc: serviceWatchFunc(lbc.client, namespace),
		},
		&api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})

	return &lbc, nil
}
示例#10
0
// newIPVSController creates a new controller from the given config.
func newIPVSController(kubeClient *unversioned.Client, namespace string, useUnicast bool, configMapName string) *ipvsControllerController {
	ipvsc := ipvsControllerController{
		client:            kubeClient,
		reloadRateLimiter: util.NewTokenBucketRateLimiter(reloadQPS, int(reloadQPS)),
		ruCfg:             []vip{},
		configMapName:     configMapName,
	}

	clusterNodes := getClusterNodesIP(kubeClient)

	nodeInfo, err := getNodeInfo(clusterNodes)
	if err != nil {
		glog.Fatalf("Error getting local IP from nodes in the cluster: %v", err)
	}

	neighbors := getNodeNeighbors(nodeInfo, clusterNodes)

	execer := exec.New()
	dbus := utildbus.New()
	iptInterface := utiliptables.New(execer, dbus, utiliptables.ProtocolIpv4)

	ipvsc.keepalived = &keepalived{
		iface:      nodeInfo.iface,
		ip:         nodeInfo.ip,
		netmask:    nodeInfo.netmask,
		nodes:      clusterNodes,
		neighbors:  neighbors,
		priority:   getNodePriority(nodeInfo.ip, clusterNodes),
		useUnicast: useUnicast,
		ipt:        iptInterface,
	}

	err = ipvsc.keepalived.loadTemplate()
	if err != nil {
		glog.Fatalf("Error loading keepalived template: %v", err)
	}

	eventHandlers := framework.ResourceEventHandlerFuncs{}

	ipvsc.svcLister.Store, ipvsc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			ipvsc.client, "services", namespace, fields.Everything()),
		&api.Service{}, resyncPeriod, eventHandlers)

	ipvsc.epLister.Store, ipvsc.epController = framework.NewInformer(
		cache.NewListWatchFromClient(
			ipvsc.client, "endpoints", namespace, fields.Everything()),
		&api.Endpoints{}, resyncPeriod, eventHandlers)

	return &ipvsc
}
示例#11
0
// Initializes the factory.
func NewConfigFactory(client *client.Client, schedulerName string) *ConfigFactory {
	stopEverything := make(chan struct{})
	schedulerCache := schedulercache.New(30*time.Second, stopEverything)

	c := &ConfigFactory{
		Client:             client,
		PodQueue:           cache.NewFIFO(cache.MetaNamespaceKeyFunc),
		ScheduledPodLister: &cache.StoreToPodLister{},
		// Only nodes in the "Ready" condition with status == "True" are schedulable
		NodeLister:       &cache.StoreToNodeLister{},
		PVLister:         &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		PVCLister:        &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ServiceLister:    &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ReplicaSetLister: &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		schedulerCache:   schedulerCache,
		StopEverything:   stopEverything,
		SchedulerName:    schedulerName,
	}

	c.PodLister = schedulerCache

	// On add/delete to the scheduled pods, remove from the assumed pods.
	// We construct this here instead of in CreateFromKeys because
	// ScheduledPodLister is something we provide to plug in functions that
	// they may need to call.
	c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer(
		c.createAssignedNonTerminatedPodLW(),
		&api.Pod{},
		0,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    c.addPodToCache,
			UpdateFunc: c.updatePodInCache,
			DeleteFunc: c.deletePodFromCache,
		},
	)

	c.NodeLister.Store, c.nodePopulator = framework.NewInformer(
		c.createNodeLW(),
		&api.Node{},
		0,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    c.addNodeToCache,
			UpdateFunc: c.updateNodeInCache,
			DeleteFunc: c.deleteNodeFromCache,
		},
	)

	return c
}
// NewEndpointController returns a new *EndpointController.
func NewEndpointController(client *client.Client, resyncPeriod controller.ResyncPeriodFunc) *EndpointController {
	e := &EndpointController{
		client: client,
		queue:  workqueue.New(),
	}

	e.serviceStore.Store, e.serviceController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Services(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				options := api.ListOptions{ResourceVersion: rv}
				return e.client.Services(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), options)
			},
		},
		&api.Service{},
		// TODO: Can we have much longer period here?
		FullServiceResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: e.enqueueService,
			UpdateFunc: func(old, cur interface{}) {
				e.enqueueService(cur)
			},
			DeleteFunc: e.enqueueService,
		},
	)

	e.podStore.Store, e.podController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				options := api.ListOptions{ResourceVersion: rv}
				return e.client.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), options)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.addPod,
			UpdateFunc: e.updatePod,
			DeleteFunc: e.deletePod,
		},
	)

	return e
}
// NewServiceServingCertUpdateController creates a new ServiceServingCertUpdateController.
// TODO this should accept a shared informer
func NewServiceServingCertUpdateController(serviceClient kcoreclient.ServicesGetter, secretClient kcoreclient.SecretsGetter, ca *crypto.CA, dnsSuffix string, resyncInterval time.Duration) *ServiceServingCertUpdateController {
	sc := &ServiceServingCertUpdateController{
		secretClient: secretClient,

		queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),

		ca:        ca,
		dnsSuffix: dnsSuffix,
		// TODO base the expiry time on a percentage of the time for the lifespan of the cert
		minTimeLeftForCert: 1 * time.Hour,
	}

	sc.serviceCache, sc.serviceController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
				return serviceClient.Services(kapi.NamespaceAll).List(options)
			},
			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
				return serviceClient.Services(kapi.NamespaceAll).Watch(options)
			},
		},
		&kapi.Service{},
		resyncInterval,
		framework.ResourceEventHandlerFuncs{},
	)
	sc.serviceHasSynced = sc.serviceController.HasSynced

	sc.secretCache, sc.secretController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
				return sc.secretClient.Secrets(kapi.NamespaceAll).List(options)
			},
			WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
				return sc.secretClient.Secrets(kapi.NamespaceAll).Watch(options)
			},
		},
		&kapi.Secret{},
		resyncInterval,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    sc.addSecret,
			UpdateFunc: sc.updateSecret,
		},
	)
	sc.secretHasSynced = sc.secretController.HasSynced

	sc.syncHandler = sc.syncSecret

	return sc
}
示例#14
0
// newLoadBalancerController creates a new controller from the given config.
func newLoadBalancerController(c *client.Client, namespace string,
	domain string, nodes []string) *loadBalancerController {
	mgr := &haproxy.HAProxyManager{
		Exec:       exec.New(),
		ConfigFile: "haproxy.cfg",
		DomainName: domain,
	}

	lbc := loadBalancerController{
		client:            c,
		queue:             workqueue.New(),
		reloadRateLimiter: util.NewTokenBucketRateLimiter(reloadQPS, int(reloadQPS)),
		haproxy:           mgr,
		domain:            domain,
		clusterNodes:      nodes,
	}

	enqueue := func(obj interface{}) {
		key, err := keyFunc(obj)
		if err != nil {
			glog.Infof("Couldn't get key for object %+v: %v", obj, err)
			return
		}
		lbc.queue.Add(key)
	}

	eventHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc:    enqueue,
		DeleteFunc: enqueue,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				enqueue(cur)
			}
		},
	}

	lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "services", namespace, fields.Everything()),
		&api.Service{}, resyncPeriod, eventHandlers)

	lbc.epLister.Store, lbc.epController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "endpoints", namespace, fields.Everything()),
		&api.Endpoints{}, resyncPeriod, eventHandlers)

	return &lbc
}
// NewPersistentVolumeRecycler creates a new PersistentVolumeRecycler
func NewPersistentVolumeRecycler(kubeClient clientset.Interface, syncPeriod time.Duration, maximumRetry int, plugins []volume.VolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeRecycler, error) {
	recyclerClient := NewRecyclerClient(kubeClient)
	if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("pv_recycler_controller", kubeClient.Core().GetRESTClient().GetRateLimiter())
	}
	recycler := &PersistentVolumeRecycler{
		client:          recyclerClient,
		kubeClient:      kubeClient,
		cloud:           cloud,
		maximumRetry:    maximumRetry,
		syncPeriod:      syncPeriod,
		releasedVolumes: make(map[string]releasedVolumeStatus),
	}

	if err := recycler.pluginMgr.InitPlugins(plugins, recycler); err != nil {
		return nil, fmt.Errorf("Could not initialize volume plugins for PVClaimBinder: %+v", err)
	}

	_, volumeController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return kubeClient.Core().PersistentVolumes().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return kubeClient.Core().PersistentVolumes().Watch(options)
			},
		},
		&api.PersistentVolume{},
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				pv, ok := obj.(*api.PersistentVolume)
				if !ok {
					glog.Errorf("Error casting object to PersistentVolume: %v", obj)
					return
				}
				recycler.reclaimVolume(pv)
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				pv, ok := newObj.(*api.PersistentVolume)
				if !ok {
					glog.Errorf("Error casting object to PersistentVolume: %v", newObj)
					return
				}
				recycler.reclaimVolume(pv)
			},
			DeleteFunc: func(obj interface{}) {
				pv, ok := obj.(*api.PersistentVolume)
				if !ok {
					glog.Errorf("Error casting object to PersistentVolume: %v", obj)
					return
				}
				recycler.removeReleasedVolume(pv)
			},
		},
	)

	recycler.volumeController = volumeController
	return recycler, nil
}
func (r *replenishmentControllerFactory) NewController(options *kresourcequota.ReplenishmentControllerOptions) (*framework.Controller, error) {
	var result *framework.Controller
	switch options.GroupKind {
	case imageapi.Kind("ImageStream"):
		_, result = framework.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options api.ListOptions) (runtime.Object, error) {
					return r.osClient.ImageStreams(api.NamespaceAll).List(options)
				},
				WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
					return r.osClient.ImageStreams(api.NamespaceAll).Watch(options)
				},
			},
			&imageapi.ImageStream{},
			options.ResyncPeriod(),
			framework.ResourceEventHandlerFuncs{
				UpdateFunc: ImageStreamReplenishmentUpdateFunc(options),
				DeleteFunc: kresourcequota.ObjectReplenishmentDeleteFunc(options),
			},
		)
	default:
		return nil, fmt.Errorf("no replenishment controller available for %s", options.GroupKind)
	}
	return result, nil
}
示例#17
0
文件: pods.go 项目: naxhh/heapster
func newPodsApi(client *kclient.Client) podsApi {
	// Extend the selector to include specific nodes to monitor
	// or provide an API to update the nodes to monitor.
	selector, err := kSelector.ParseSelector("spec.nodeName!=")
	if err != nil {
		panic(err)
	}

	lw := kcache.NewListWatchFromClient(client, "pods", kapi.NamespaceAll, selector)
	podLister := &kcache.StoreToPodLister{Store: kcache.NewStore(kcache.MetaNamespaceKeyFunc)}
	// Watch and cache all running pods.
	reflector := kcache.NewReflector(lw, &kapi.Pod{}, podLister.Store, 0)
	stopChan := make(chan struct{})
	reflector.RunUntil(stopChan)
	nStore, nController := kframework.NewInformer(
		createNamespaceLW(client),
		&kapi.Namespace{},
		resyncPeriod,
		kframework.ResourceEventHandlerFuncs{})
	go nController.Run(util.NeverStop)

	podsApi := &realPodsApi{
		client:         client,
		podLister:      podLister,
		stopChan:       stopChan,
		reflector:      reflector,
		namespaceStore: nStore,
	}

	return podsApi
}
示例#18
0
func newPodObserver(podLW cache.ListerWatcher, taskUpdateTx taskUpdateTx, terminate <-chan struct{}) *podObserver {
	// watch pods from the given pod ListWatch
	if podLW == nil {
		// fail early to make debugging easier
		panic("cannot create executor with nil PodLW")
	}

	p := &podObserver{
		terminate:    terminate,
		taskUpdateTx: taskUpdateTx,
	}
	_, p.podController = framework.NewInformer(podLW, &api.Pod{}, podRelistPeriod, &framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			pod := obj.(*api.Pod)
			log.V(4).Infof("pod %s/%s created on apiserver", pod.Namespace, pod.Name)
			p.handleChangedApiserverPod(pod)
		},
		UpdateFunc: func(oldObj, newObj interface{}) {
			pod := newObj.(*api.Pod)
			log.V(4).Infof("pod %s/%s updated on apiserver", pod.Namespace, pod.Name)
			p.handleChangedApiserverPod(pod)
		},
		DeleteFunc: func(obj interface{}) {
			pod := obj.(*api.Pod)
			log.V(4).Infof("pod %s/%s deleted on apiserver", pod.Namespace, pod.Name)
		},
	})
	return p
}
示例#19
0
// NewDockercfgDeletedController returns a new *DockercfgDeletedController.
func NewDockercfgDeletedController(cl client.Interface, options DockercfgDeletedControllerOptions) *DockercfgDeletedController {
	e := &DockercfgDeletedController{
		client: cl,
	}

	dockercfgSelector := fields.OneTermEqualSelector(client.SecretType, string(api.SecretTypeDockercfg))
	_, e.secretController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				opts := api.ListOptions{FieldSelector: dockercfgSelector}
				return e.client.Secrets(api.NamespaceAll).List(opts)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				opts := api.ListOptions{FieldSelector: dockercfgSelector, ResourceVersion: options.ResourceVersion}
				return e.client.Secrets(api.NamespaceAll).Watch(opts)
			},
		},
		&api.Secret{},
		options.Resync,
		framework.ResourceEventHandlerFuncs{
			DeleteFunc: e.secretDeleted,
		},
	)

	return e
}
// NewNamespaceController creates a new NamespaceController
func NewNamespaceController(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceController {
	_, controller := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				namespace := obj.(*api.Namespace)
				err := syncNamespace(kubeClient, *namespace)
				if err != nil {
					glog.Error(err)
				}
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				namespace := newObj.(*api.Namespace)
				err := syncNamespace(kubeClient, *namespace)
				if err != nil {
					glog.Error(err)
				}
			},
		},
	)

	return &NamespaceController{
		controller: controller,
	}
}
示例#21
0
func NewImportController(isNamespacer client.ImageStreamsNamespacer, ismNamespacer client.ImageStreamMappingsNamespacer, parallelImports int, resyncInterval time.Duration) *ImportController {
	c := &ImportController{
		streams:  isNamespacer,
		mappings: ismNamespacer,

		numParallelImports: parallelImports,
		work:               make(chan *api.ImageStream, 20*parallelImports),
		workingSet:         sets.String{},
	}

	_, c.imageStreamController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.streams.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.streams.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.ImageStream{},
		resyncInterval,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    c.imageStreamAdded,
			UpdateFunc: c.imageStreamUpdated,
		},
	)

	return c
}
示例#22
0
func New(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
	gcc := &GCController{
		kubeClient: kubeClient,
		threshold:  threshold,
		deletePod: func(namespace, name string) error {
			return kubeClient.Legacy().Pods(namespace).Delete(name, api.NewDeleteOptions(0))
		},
	}

	terminatedSelector := fields.ParseSelectorOrDie("status.phase!=" + string(api.PodPending) + ",status.phase!=" + string(api.PodRunning) + ",status.phase!=" + string(api.PodUnknown))

	gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.FieldSelector = terminatedSelector
				return gcc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.FieldSelector = terminatedSelector
				return gcc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{},
	)
	return gcc
}
示例#23
0
// NewDockercfgController returns a new *DockercfgController.
func NewDockercfgController(cl client.Interface, options DockercfgControllerOptions) *DockercfgController {
	e := &DockercfgController{
		client: cl,
	}

	_, e.serviceAccountController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).Watch(options)
			},
		},
		&api.ServiceAccount{},
		options.Resync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.serviceAccountAdded,
			UpdateFunc: e.serviceAccountUpdated,
		},
	)

	e.dockerURL = options.DefaultDockerURL

	return e
}
示例#24
0
// NewclusterController returns a new cluster controller
func NewclusterController(federationClient federationclientset.Interface, clusterMonitorPeriod time.Duration) *ClusterController {
	cc := &ClusterController{
		knownClusterSet:         make(sets.String),
		federationClient:        federationClient,
		clusterMonitorPeriod:    clusterMonitorPeriod,
		clusterClusterStatusMap: make(map[string]federation_v1alpha1.ClusterStatus),
		clusterKubeClientMap:    make(map[string]ClusterClient),
	}
	cc.clusterStore.Store, cc.clusterController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return cc.federationClient.Federation().Clusters().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return cc.federationClient.Federation().Clusters().Watch(options)
			},
		},
		&federation_v1alpha1.Cluster{},
		controller.NoResyncPeriodFunc(),
		framework.ResourceEventHandlerFuncs{
			DeleteFunc: cc.delFromClusterSet,
			AddFunc:    cc.addToClusterSet,
		},
	)
	return cc
}
示例#25
0
// NewNamespaceController creates a new NamespaceController
func NewNamespaceController(kubeClient client.Interface, experimentalMode bool, resyncPeriod time.Duration) *NamespaceController {
	var controller *framework.Controller
	_, controller = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				namespace := obj.(*api.Namespace)
				if err := syncNamespace(kubeClient, experimentalMode, *namespace); err != nil {
					if estimate, ok := err.(*contentRemainingError); ok {
						go func() {
							// Estimate is the aggregate total of TerminationGracePeriodSeconds, which defaults to 30s
							// for pods.  However, most processes will terminate faster - within a few seconds, probably
							// with a peak within 5-10s.  So this division is a heuristic that avoids waiting the full
							// duration when in many cases things complete more quickly. The extra second added is to
							// ensure we never wait 0 seconds.
							t := estimate.Estimate/2 + 1
							glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t)
							time.Sleep(time.Duration(t) * time.Second)
							if err := controller.Requeue(namespace); err != nil {
								util.HandleError(err)
							}
						}()
						return
					}
					util.HandleError(err)
				}
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				namespace := newObj.(*api.Namespace)
				if err := syncNamespace(kubeClient, experimentalMode, *namespace); err != nil {
					if estimate, ok := err.(*contentRemainingError); ok {
						go func() {
							t := estimate.Estimate/2 + 1
							glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t)
							time.Sleep(time.Duration(t) * time.Second)
							if err := controller.Requeue(namespace); err != nil {
								util.HandleError(err)
							}
						}()
						return
					}
					util.HandleError(err)
				}
			},
		},
	)

	return &NamespaceController{
		controller: controller,
	}
}
示例#26
0
// NewDockerRegistryServiceController returns a new *DockerRegistryServiceController.
func NewDockerRegistryServiceController(cl client.Interface, options DockerRegistryServiceControllerOptions) *DockerRegistryServiceController {
	e := &DockerRegistryServiceController{
		client: cl,
	}

	_, e.serviceController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(opts api.ListOptions) (runtime.Object, error) {
				return e.client.Services(options.RegistryNamespace).List(opts)
			},
			WatchFunc: func(opts api.ListOptions) (watch.Interface, error) {
				return e.client.Services(options.RegistryNamespace).Watch(opts)
			},
		},
		&api.Service{},
		options.Resync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.serviceAdded,
			UpdateFunc: e.serviceUpdated,
			DeleteFunc: e.serviceDeleted,
		},
	)
	e.registryServiceName = options.RegistryServiceName
	e.dockercfgController = options.DockercfgController
	e.defaultDockerURL = options.DefaultDockerURL

	return e
}
示例#27
0
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	gcc := &GCController{
		kubeClient: kubeClient,
		threshold:  threshold,
		deletePod: func(namespace, name string) error {
			return kubeClient.Pods(namespace).Delete(name, api.NewDeleteOptions(0))
		},
	}

	terminatedSelector := compileTerminatedPodSelector()

	gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				options := unversioned.ListOptions{FieldSelector: unversioned.FieldSelector{terminatedSelector}}
				return gcc.kubeClient.Pods(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options unversioned.ListOptions) (watch.Interface, error) {
				options.FieldSelector.Selector = terminatedSelector
				return gcc.kubeClient.Pods(api.NamespaceAll).Watch(options)
			},
		},
		&api.Pod{},
		resyncPeriod(),
		framework.ResourceEventHandlerFuncs{},
	)
	return gcc
}
示例#28
0
// NewTokensController returns a new *TokensController.
func NewTokensController(cl clientset.Interface, options TokensControllerOptions) *TokensController {
	maxRetries := options.MaxRetries
	if maxRetries == 0 {
		maxRetries = 10
	}

	e := &TokensController{
		client: cl,
		token:  options.TokenGenerator,
		rootCA: options.RootCA,

		syncServiceAccountQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
		syncSecretQueue:         workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),

		maxRetries: maxRetries,
	}

	e.serviceAccounts, e.serviceAccountController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
			},
		},
		&api.ServiceAccount{},
		options.ServiceAccountResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.queueServiceAccountSync,
			UpdateFunc: e.queueServiceAccountUpdateSync,
			DeleteFunc: e.queueServiceAccountSync,
		},
	)

	tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
	e.secrets, e.secretController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.FieldSelector = tokenSelector
				return e.client.Core().Secrets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.FieldSelector = tokenSelector
				return e.client.Core().Secrets(api.NamespaceAll).Watch(options)
			},
		},
		&api.Secret{},
		options.SecretResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.queueSecretSync,
			UpdateFunc: e.queueSecretUpdateSync,
			DeleteFunc: e.queueSecretSync,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	return e
}
示例#29
0
func monitorFor(p *Propagator, clientPool dynamic.ClientPool, resource unversioned.GroupVersionResource) (monitor, error) {
	// TODO: consider store in one storage.
	glog.V(6).Infof("create storage for resource %s", resource)
	var monitor monitor
	client, err := clientPool.ClientForGroupVersion(resource.GroupVersion())
	if err != nil {
		return monitor, err
	}
	monitor.store, monitor.controller = framework.NewInformer(
		// TODO: make special List and Watch function that removes fields other
		// than ObjectMeta.
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				// APIResource.Kind is not used by the dynamic client, so
				// leave it empty. We want to list this resource in all
				// namespaces if it's namespace scoped, so leave
				// APIResource.Namespaced as false is all right.
				apiResource := unversioned.APIResource{Name: resource.Resource}
				return client.Resource(&apiResource, api.NamespaceAll).List(&options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				// APIResource.Kind is not used by the dynamic client, so
				// leave it empty. We want to list this resource in all
				// namespaces if it's namespace scoped, so leave
				// APIResource.Namespaced as false is all right.
				apiResource := unversioned.APIResource{Name: resource.Resource}
				return client.Resource(&apiResource, api.NamespaceAll).Watch(&options)
			},
		},
		nil,
		ResourceResyncTime,
		framework.ResourceEventHandlerFuncs{
			// add the event to the propagator's eventQueue.
			AddFunc: func(obj interface{}) {
				event := event{
					eventType: addEvent,
					obj:       obj,
				}
				p.eventQueue.Add(event)
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				event := event{updateEvent, newObj, oldObj}
				p.eventQueue.Add(event)
			},
			DeleteFunc: func(obj interface{}) {
				// delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it
				if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok {
					obj = deletedFinalStateUnknown.Obj
				}
				event := event{
					eventType: deleteEvent,
					obj:       obj,
				}
				p.eventQueue.Add(event)
			},
		},
	)
	return monitor, nil
}
示例#30
-1
// NewDockerRegistryServiceController returns a new *DockerRegistryServiceController.
func NewDockerRegistryServiceController(cl client.Interface, options DockerRegistryServiceControllerOptions) *DockerRegistryServiceController {
	e := &DockerRegistryServiceController{
		client:                cl,
		dockercfgController:   options.DockercfgController,
		registryLocationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
		secretsToUpdate:       workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
		serviceName:           options.RegistryServiceName,
		serviceNamespace:      options.RegistryNamespace,
		dockerURLsIntialized:  options.DockerURLsIntialized,
	}

	e.serviceCache, e.serviceController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) {
				opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName)
				return e.client.Services(options.RegistryNamespace).List(opts)
			},
			WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) {
				opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName)
				return e.client.Services(options.RegistryNamespace).Watch(opts)
			},
		},
		&kapi.Service{},
		options.Resync,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				e.enqueueRegistryLocationQueue()
			},
			UpdateFunc: func(old, cur interface{}) {
				e.enqueueRegistryLocationQueue()
			},
			DeleteFunc: func(obj interface{}) {
				e.enqueueRegistryLocationQueue()
			},
		},
	)
	e.servicesSynced = e.serviceController.HasSynced
	e.syncRegistryLocationHandler = e.syncRegistryLocationChange

	dockercfgOptions := kapi.ListOptions{FieldSelector: fields.SelectorFromSet(map[string]string{kapi.SecretTypeField: string(kapi.SecretTypeDockercfg)})}
	e.secretCache, e.secretController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) {
				return e.client.Secrets(kapi.NamespaceAll).List(dockercfgOptions)
			},
			WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) {
				return e.client.Secrets(kapi.NamespaceAll).Watch(dockercfgOptions)
			},
		},
		&kapi.Secret{},
		options.Resync,
		framework.ResourceEventHandlerFuncs{},
	)
	e.secretsSynced = e.secretController.HasSynced
	e.syncSecretHandler = e.syncSecretUpdate

	return e
}