func newWatchCache(capacity int) *watchCache { wc := &watchCache{ capacity: capacity, cache: make([]watchCacheElement, capacity), startIndex: 0, endIndex: 0, store: cache.NewStore(cache.MetaNamespaceKeyFunc), resourceVersion: 0, clock: util.RealClock{}, } wc.cond = sync.NewCond(wc.RLocker()) return wc }
// NewEndpointsStore creates an undelta store that expands updates to the store into // EndpointsUpdate events on the channel. If no store is passed, a default store will // be initialized. Allows reuse of a cache store across multiple components. func NewEndpointsStore(store cache.Store, ch chan<- EndpointsUpdate) cache.Store { fn := func(objs []interface{}) { var endpoints []api.Endpoints for _, o := range objs { endpoints = append(endpoints, *(o.(*api.Endpoints))) } ch <- EndpointsUpdate{Op: SET, Endpoints: endpoints} } if store == nil { store = cache.NewStore(cache.MetaNamespaceKeyFunc) } return &cache.UndeltaStore{ Store: store, PushFunc: fn, } }
// NewServiceStore creates an undelta store that expands updates to the store into // ServiceUpdate events on the channel. If no store is passed, a default store will // be initialized. Allows reuse of a cache store across multiple components. func NewServiceStore(store cache.Store, ch chan<- ServiceUpdate) cache.Store { fn := func(objs []interface{}) { var services []api.Service for _, o := range objs { services = append(services, *(o.(*api.Service))) } ch <- ServiceUpdate{Op: SET, Services: services} } if store == nil { store = cache.NewStore(cache.MetaNamespaceKeyFunc) } return &cache.UndeltaStore{ Store: store, PushFunc: fn, } }
// New returns a new service controller to keep cloud provider service resources // (like load balancers) in sync with the registry. func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) *ServiceController { broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"}) if kubeClient != nil && kubeClient.Core().GetRESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().GetRESTClient().GetRateLimiter()) } return &ServiceController{ cloud: cloud, kubeClient: kubeClient, clusterName: clusterName, cache: &serviceCache{serviceMap: make(map[string]*cachedService)}, eventBroadcaster: broadcaster, eventRecorder: recorder, nodeLister: cache.StoreToNodeLister{ Store: cache.NewStore(cache.MetaNamespaceKeyFunc), }, } }
func NewKubeDNS(client clientset.Interface, domain string, federations map[string]string) (*KubeDNS, error) { // Verify that federation names should not contain dots ('.') // We can not allow dots since we use that as separator for path segments (svcname.nsname.fedname.svc.domain) for key := range federations { if strings.ContainsAny(key, ".") { return nil, fmt.Errorf("invalid federation name: %s, cannot have '.'", key) } } kd := &KubeDNS{ kubeClient: client, domain: domain, cache: NewTreeCache(), cacheLock: sync.RWMutex{}, nodesStore: kcache.NewStore(kcache.MetaNamespaceKeyFunc), reverseRecordMap: make(map[string]*skymsg.Service), clusterIPServiceMap: make(map[string]*kapi.Service), domainPath: reverseArray(strings.Split(strings.TrimRight(domain, "."), ".")), federations: federations, } kd.setEndpointsStore() kd.setServicesStore() return kd, nil }
func TestCheckPod(t *testing.T) { tcs := []struct { pod api.Pod prune bool }{ { pod: api.Pod{ ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil}, Spec: api.PodSpec{NodeName: "new"}, }, prune: false, }, { pod: api.Pod{ ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil}, Spec: api.PodSpec{NodeName: "old"}, }, prune: false, }, { pod: api.Pod{ ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil}, Spec: api.PodSpec{NodeName: ""}, }, prune: false, }, { pod: api.Pod{ ObjectMeta: api.ObjectMeta{DeletionTimestamp: nil}, Spec: api.PodSpec{NodeName: "nonexistant"}, }, prune: false, }, { pod: api.Pod{ ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, Spec: api.PodSpec{NodeName: "new"}, }, prune: false, }, { pod: api.Pod{ ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, Spec: api.PodSpec{NodeName: "old"}, }, prune: true, }, { pod: api.Pod{ ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, Spec: api.PodSpec{NodeName: "older"}, }, prune: true, }, { pod: api.Pod{ ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, Spec: api.PodSpec{NodeName: "oldest"}, }, prune: true, }, { pod: api.Pod{ ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, Spec: api.PodSpec{NodeName: ""}, }, prune: true, }, { pod: api.Pod{ ObjectMeta: api.ObjectMeta{DeletionTimestamp: &unversioned.Time{}}, Spec: api.PodSpec{NodeName: "nonexistant"}, }, prune: true, }, } nc := NewNodeController(nil, nil, 0, nil, nil, 0, 0, 0, nil, nil, 0, false) nc.nodeStore.Store = cache.NewStore(cache.MetaNamespaceKeyFunc) nc.nodeStore.Store.Add(&api.Node{ ObjectMeta: api.ObjectMeta{ Name: "new", }, Status: api.NodeStatus{ NodeInfo: api.NodeSystemInfo{ KubeletVersion: "v1.1.0", }, }, }) nc.nodeStore.Store.Add(&api.Node{ ObjectMeta: api.ObjectMeta{ Name: "old", }, Status: api.NodeStatus{ NodeInfo: api.NodeSystemInfo{ KubeletVersion: "v1.0.0", }, }, }) nc.nodeStore.Store.Add(&api.Node{ ObjectMeta: api.ObjectMeta{ Name: "older", }, Status: api.NodeStatus{ NodeInfo: api.NodeSystemInfo{ KubeletVersion: "v0.21.4", }, }, }) nc.nodeStore.Store.Add(&api.Node{ ObjectMeta: api.ObjectMeta{ Name: "oldest", }, Status: api.NodeStatus{ NodeInfo: api.NodeSystemInfo{ KubeletVersion: "v0.19.3", }, }, }) for i, tc := range tcs { var deleteCalls int nc.forcefullyDeletePod = func(_ *api.Pod) error { deleteCalls++ return nil } nc.maybeDeleteTerminatingPod(&tc.pod) if tc.prune && deleteCalls != 1 { t.Errorf("[%v] expected number of delete calls to be 1 but got %v", i, deleteCalls) } if !tc.prune && deleteCalls != 0 { t.Errorf("[%v] expected number of delete calls to be 0 but got %v", i, deleteCalls) } } }
// NewPersistentVolumeController creates a new PersistentVolumeController func NewPersistentVolumeController( kubeClient clientset.Interface, syncPeriod time.Duration, provisioner vol.ProvisionableVolumePlugin, recyclers []vol.VolumePlugin, cloud cloudprovider.Interface, clusterName string, volumeSource, claimSource cache.ListerWatcher, eventRecorder record.EventRecorder, enableDynamicProvisioning bool, ) *PersistentVolumeController { if eventRecorder == nil { broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventRecorder = broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"}) } controller := &PersistentVolumeController{ volumes: newPersistentVolumeOrderedIndex(), claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc), kubeClient: kubeClient, eventRecorder: eventRecorder, runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */), cloud: cloud, provisioner: provisioner, enableDynamicProvisioning: enableDynamicProvisioning, clusterName: clusterName, createProvisionedPVRetryCount: createProvisionedPVRetryCount, createProvisionedPVInterval: createProvisionedPVInterval, } controller.recyclePluginMgr.InitPlugins(recyclers, controller) if controller.provisioner != nil { if err := controller.provisioner.Init(controller); err != nil { glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err) } } if volumeSource == nil { volumeSource = &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return kubeClient.Core().PersistentVolumes().List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return kubeClient.Core().PersistentVolumes().Watch(options) }, } } controller.volumeSource = volumeSource if claimSource == nil { claimSource = &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) }, } } controller.claimSource = claimSource _, controller.volumeController = framework.NewIndexerInformer( volumeSource, &api.PersistentVolume{}, syncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: controller.addVolume, UpdateFunc: controller.updateVolume, DeleteFunc: controller.deleteVolume, }, cache.Indexers{"accessmodes": accessModesIndexFunc}, ) _, controller.claimController = framework.NewInformer( claimSource, &api.PersistentVolumeClaim{}, syncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: controller.addClaim, UpdateFunc: controller.updateClaim, DeleteFunc: controller.deleteClaim, }, ) return controller }
// NewUIDTrackingControllerExpectations returns a wrapper around // ControllerExpectations that is aware of deleteKeys. func NewUIDTrackingControllerExpectations(ce ControllerExpectationsInterface) *UIDTrackingControllerExpectations { return &UIDTrackingControllerExpectations{ControllerExpectationsInterface: ce, uidStore: cache.NewStore(UIDSetKeyFunc)} }
// NewControllerExpectations returns a store for ControllerExpectations. func NewControllerExpectations() *ControllerExpectations { return &ControllerExpectations{cache.NewStore(ExpKeyFunc)} }
// newUnHealthyPetTracker tracks unhealthy pets that block progress of petsets. func newUnHealthyPetTracker(pc petClient) *unhealthyPetTracker { return &unhealthyPetTracker{pc: pc, store: cache.NewStore(pcbKeyFunc)} }