func (factory *RouterControllerFactory) Create(plugin router.Plugin) *controller.RouterController { routeEventQueue := oscache.NewEventQueue(cache.MetaNamespaceKeyFunc) cache.NewReflector(&routeLW{factory.OSClient}, &routeapi.Route{}, routeEventQueue, 2*time.Minute).Run() endpointsEventQueue := oscache.NewEventQueue(cache.MetaNamespaceKeyFunc) cache.NewReflector(&endpointsLW{factory.KClient}, &kapi.Endpoints{}, endpointsEventQueue, 2*time.Minute).Run() return &controller.RouterController{ Plugin: plugin, NextEndpoints: func() (watch.EventType, *kapi.Endpoints, error) { eventType, obj, err := endpointsEventQueue.Pop() if err != nil { return watch.Error, nil, err } return eventType, obj.(*kapi.Endpoints), nil }, NextRoute: func() (watch.EventType, *routeapi.Route, error) { eventType, obj, err := routeEventQueue.Pop() if err != nil { return watch.Error, nil, err } return eventType, obj.(*routeapi.Route), nil }, } }
// Create creates a new ImageChangeController which is used to trigger builds when a new // image is available func (factory *ImageChangeControllerFactory) Create() controller.RunnableController { queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).Run() store := cache.NewStore(cache.MetaNamespaceKeyFunc) cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).Run() imageChangeController := &buildcontroller.ImageChangeController{ BuildConfigStore: store, BuildConfigInstantiator: factory.BuildConfigInstantiator, Stop: factory.Stop, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { kutil.HandleError(err) if _, isFatal := err.(buildcontroller.ImageChangeControllerFatalError); isFatal { return false } return retries.Count < maxRetries }, kutil.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { imageRepo := obj.(*imageapi.ImageStream) return imageChangeController.HandleImageRepo(imageRepo) }, } }
// Run starts a background goroutine that watches for changes to services that // have (or had) externalLoadBalancers=true and ensures that they have external // load balancers created and deleted appropriately. // nodeSyncPeriod controls how often we check the cluster's nodes to determine // if external load balancers need to be updated to point to a new set. func (s *ServiceController) Run(nodeSyncPeriod time.Duration) error { if err := s.init(); err != nil { return err } // We have to make this check beecause the ListWatch that we use in // WatchServices requires Client functions that aren't in the interface // for some reason. if _, ok := s.kubeClient.(*client.Client); !ok { return fmt.Errorf("ServiceController only works with real Client objects, but was passed something else satisfying the client Interface.") } // Get the currently existing set of services and then all future creates // and updates of services. // No delta compressor is needed for the DeltaFIFO queue because we only ever // care about the most recent state. serviceQueue := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.cache) lw := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "services", api.NamespaceAll, fields.Everything()) cache.NewReflector(lw, &api.Service{}, serviceQueue, 0).Run() for i := 0; i < workerGoroutines; i++ { go s.watchServices(serviceQueue) } nodeLister := &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)} nodeLW := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "nodes", api.NamespaceAll, fields.Everything()) cache.NewReflector(nodeLW, &api.Node{}, nodeLister.Store, 0).Run() go s.nodeSyncLoop(nodeLister, nodeSyncPeriod) return nil }
// Create constructs a BuildPodController func (factory *BuildPodControllerFactory) Create() controller.RunnableController { factory.buildStore = cache.NewStore(cache.MetaNamespaceKeyFunc) cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, factory.buildStore, 2*time.Minute).Run() queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&podLW{client: factory.KubeClient}, &kapi.Pod{}, queue, 2*time.Minute).Run() client := ControllerClient{factory.KubeClient, factory.OSClient} buildPodController := &buildcontroller.BuildPodController{ BuildStore: factory.buildStore, BuildUpdater: factory.BuildUpdater, PodManager: client, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { kutil.HandleError(err) return retries.Count < maxRetries }, kutil.NewTokenBucketRateLimiter(1, 10)), Handle: func(obj interface{}) error { pod := obj.(*kapi.Pod) return buildPodController.HandlePod(pod) }, } }
// Creates a scheduler from a set of registered fit predicate keys and priority keys. func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSet) (*scheduler.Config, error) { glog.V(2).Infof("creating scheduler with fit predicates '%v' and priority functions '%v", predicateKeys, priorityKeys) pluginArgs := PluginFactoryArgs{ PodLister: f.PodLister, ServiceLister: f.ServiceLister, NodeLister: f.NodeLister, NodeInfo: f.NodeLister, } predicateFuncs, err := getFitPredicateFunctions(predicateKeys, pluginArgs) if err != nil { return nil, err } priorityConfigs, err := getPriorityFunctionConfigs(priorityKeys, pluginArgs) if err != nil { return nil, err } // Watch and queue pods that need scheduling. cache.NewReflector(f.createUnassignedPodLW(), &api.Pod{}, f.PodQueue, 0).RunUntil(f.StopEverything) // Begin populating scheduled pods. go f.scheduledPodPopulator.Run(f.StopEverything) // Watch minions. // Minions may be listed frequently, so provide a local up-to-date cache. cache.NewReflector(f.createMinionLW(), &api.Node{}, f.NodeLister.Store, 0).RunUntil(f.StopEverything) // Watch and cache all service objects. Scheduler needs to find all pods // created by the same service, so that it can spread them correctly. // Cache this locally. cache.NewReflector(f.createServiceLW(), &api.Service{}, f.ServiceLister.Store, 0).RunUntil(f.StopEverything) r := rand.New(rand.NewSource(time.Now().UnixNano())) algo := algorithm.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r) podBackoff := podBackoff{ perPodBackoff: map[string]*backoffEntry{}, clock: realClock{}, defaultDuration: 1 * time.Second, maxDuration: 60 * time.Second, } return &scheduler.Config{ Modeler: f.modeler, MinionLister: f.NodeLister, Algorithm: algo, Binder: &binder{f.Client}, NextPod: func() *api.Pod { pod := f.PodQueue.Pop().(*api.Pod) glog.V(2).Infof("About to try and schedule pod %v", pod.Name) return pod }, Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue), StopEverything: f.StopEverything, }, nil }
// Create creates a scheduler and all support functions. func (factory *ConfigFactory) Create() *scheduler.Config { // Watch and queue pods that need scheduling. podQueue := cache.NewFIFO() cache.NewReflector(factory.createUnassignedPodLW(), &api.Pod{}, podQueue).Run() // Watch and cache all running pods. Scheduler needs to find all pods // so it knows where it's safe to place a pod. Cache this locally. podCache := cache.NewStore() cache.NewReflector(factory.createAssignedPodLW(), &api.Pod{}, podCache).Run() // Watch minions. // Minions may be listed frequently, so provide a local up-to-date cache. minionCache := cache.NewStore() if false { // Disable this code until minions support watches. cache.NewReflector(factory.createMinionLW(), &api.Minion{}, minionCache).Run() } else { cache.NewPoller(factory.pollMinions, 10*time.Second, minionCache).Run() } r := rand.New(rand.NewSource(time.Now().UnixNano())) minionLister := &storeToMinionLister{minionCache} algo := algorithm.NewGenericScheduler( []algorithm.FitPredicate{ // Fit is defined based on the absence of port conflicts. algorithm.PodFitsPorts, // Fit is determined by resource availability algorithm.NewResourceFitPredicate(minionLister), // Fit is determined by non-conflicting disk volumes algorithm.NoDiskConflict, // Fit is determined by node selector query algorithm.NewSelectorMatchPredicate(minionLister), }, // Prioritize nodes by least requested utilization. algorithm.LeastRequestedPriority, &storeToPodLister{podCache}, r) podBackoff := podBackoff{ perPodBackoff: map[string]*backoffEntry{}, clock: realClock{}, } return &scheduler.Config{ MinionLister: minionLister, Algorithm: algo, Binder: &binder{factory.Client}, NextPod: func() *api.Pod { pod := podQueue.Pop().(*api.Pod) glog.V(2).Infof("About to try and schedule pod %v\n"+ "\tknown minions: %v\n"+ "\tknown scheduled pods: %v\n", pod.Name, minionCache.ContainedIDs(), podCache.ContainedIDs()) return pod }, Error: factory.makeDefaultErrorFunc(&podBackoff, podQueue), } }
// CreateFromKeys creates a scheduler from a set of registered fit predicate keys and priority keys. func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSet) (*scheduler.Config, error) { glog.V(2).Infof("creating scheduler with fit predicates '%v' and priority functions '%v", predicateKeys, priorityKeys) predicateFuncs, err := getFitPredicateFunctions(predicateKeys) if err != nil { return nil, err } priorityConfigs, err := getPriorityFunctionConfigs(priorityKeys) if err != nil { return nil, err } // Watch and queue pods that need scheduling. cache.NewReflector(f.createUnassignedPodLW(), &api.Pod{}, f.PodQueue).Run() // Watch and cache all running pods. Scheduler needs to find all pods // so it knows where it's safe to place a pod. Cache this locally. cache.NewReflector(f.createAssignedPodLW(), &api.Pod{}, f.PodLister.Store).Run() // Watch minions. // Minions may be listed frequently, so provide a local up-to-date cache. if false { // Disable this code until minions support watches. Note when this code is enabled, // we need to make sure minion ListWatcher has proper FieldSelector. cache.NewReflector(f.createMinionLW(), &api.Node{}, f.MinionLister.Store).Run() } else { cache.NewPoller(f.pollMinions, 10*time.Second, f.MinionLister.Store).Run() } // Watch and cache all service objects. Scheduler needs to find all pods // created by the same service, so that it can spread them correctly. // Cache this locally. cache.NewReflector(f.createServiceLW(), &api.Service{}, f.ServiceLister.Store).Run() r := rand.New(rand.NewSource(time.Now().UnixNano())) algo := algorithm.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r) podBackoff := podBackoff{ perPodBackoff: map[string]*backoffEntry{}, clock: realClock{}, defaultDuration: 1 * time.Second, maxDuration: 60 * time.Second, } return &scheduler.Config{ MinionLister: f.MinionLister, Algorithm: algo, Binder: &binder{f.Client}, NextPod: func() *api.Pod { pod := f.PodQueue.Pop().(*api.Pod) glog.V(2).Infof("glog.v2 --> About to try and schedule pod %v", pod.Name) return pod }, Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue), }, nil }
func NewFirstContainerReady(kclient kclient.Interface, timeout time.Duration, interval time.Duration) *FirstContainerReady { return &FirstContainerReady{ timeout: timeout, interval: interval, podsForDeployment: func(deployment *kapi.ReplicationController) (*kapi.PodList, error) { selector := labels.Set(deployment.Spec.Selector).AsSelector() return kclient.Pods(deployment.Namespace).List(selector, fields.Everything()) }, getPodStore: func(namespace, name string) (cache.Store, chan struct{}) { sel, _ := fields.ParseSelector("metadata.name=" + name) store := cache.NewStore(cache.MetaNamespaceKeyFunc) lw := &deployutil.ListWatcherImpl{ ListFunc: func() (runtime.Object, error) { return kclient.Pods(namespace).List(labels.Everything(), sel) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kclient.Pods(namespace).Watch(labels.Everything(), sel, resourceVersion) }, } stop := make(chan struct{}) cache.NewReflector(lw, &kapi.Pod{}, store, 10*time.Second).RunUntil(stop) return store, stop }, } }
func RunProjectCache(c client.Interface, defaultNodeSelector string) { if pcache != nil { return } store := cache.NewStore(cache.MetaNamespaceKeyFunc) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &kapi.Namespace{}, store, 0, ) reflector.Run() pcache = &ProjectCache{ Client: c, Store: store, DefaultNodeSelector: defaultNodeSelector, } }
func (oi *OsdnRegistryInterface) WatchSubnets(receiver chan *osdnapi.SubnetEvent, stop chan bool) error { subnetEventQueue := oscache.NewEventQueue(cache.MetaNamespaceKeyFunc) listWatch := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return oi.oClient.HostSubnets().List() }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return oi.oClient.HostSubnets().Watch(resourceVersion) }, } cache.NewReflector(listWatch, &api.HostSubnet{}, subnetEventQueue, 4*time.Minute).Run() for { eventType, obj, err := subnetEventQueue.Pop() if err != nil { return err } switch eventType { case watch.Added, watch.Modified: // create SubnetEvent hs := obj.(*api.HostSubnet) receiver <- &osdnapi.SubnetEvent{Type: osdnapi.Added, Minion: hs.Host, Sub: osdnapi.Subnet{Minion: hs.HostIP, Sub: hs.Subnet}} case watch.Deleted: // TODO: There is a chance that a Delete event will not get triggered. // Need to use a periodic sync loop that lists and compares. hs := obj.(*api.HostSubnet) receiver <- &osdnapi.SubnetEvent{Type: osdnapi.Deleted, Minion: hs.Host, Sub: osdnapi.Subnet{Minion: hs.HostIP, Sub: hs.Subnet}} } } return nil }
// Create constructs a BuildController func (factory *BuildControllerFactory) Create() controller.RunnableController { queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, queue, 2*time.Minute).Run() eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events("")) client := ControllerClient{factory.KubeClient, factory.OSClient} buildController := &buildcontroller.BuildController{ BuildUpdater: factory.BuildUpdater, ImageStreamClient: client, PodManager: client, BuildStrategy: &typeBasedFactoryStrategy{ DockerBuildStrategy: factory.DockerBuildStrategy, SourceBuildStrategy: factory.SourceBuildStrategy, CustomBuildStrategy: factory.CustomBuildStrategy, }, Recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "build-controller"}), OpenshiftEnabled: factory.OpenshiftEnabled, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, limitedLogAndRetry(factory.BuildUpdater, 30*time.Minute), kutil.NewTokenBucketRateLimiter(1, 10)), Handle: func(obj interface{}) error { build := obj.(*buildapi.Build) return buildController.HandleBuild(build) }, } }
func (oi *OsdnRegistryInterface) WatchMinions(receiver chan *osdnapi.MinionEvent, stop chan bool) error { minionEventQueue := oscache.NewEventQueue(cache.MetaNamespaceKeyFunc) listWatch := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return oi.kClient.Nodes().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return oi.kClient.Nodes().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } cache.NewReflector(listWatch, &kapi.Node{}, minionEventQueue, 4*time.Minute).Run() for { eventType, obj, err := minionEventQueue.Pop() if err != nil { return err } switch eventType { case watch.Added: // we should ignore the modified event because status updates cause unnecessary noise // the only time we would care about modified would be if the minion changes its IP address // and hence all nodes need to update their vtep entries for the respective subnet // create minionEvent node := obj.(*kapi.Node) receiver <- &osdnapi.MinionEvent{Type: osdnapi.Added, Minion: node.ObjectMeta.Name} case watch.Deleted: // TODO: There is a chance that a Delete event will not get triggered. // Need to use a periodic sync loop that lists and compares. node := obj.(*kapi.Node) receiver <- &osdnapi.MinionEvent{Type: osdnapi.Deleted, Minion: node.ObjectMeta.Name} } } return nil }
func newPodsApi(client *kclient.Client) podsApi { // Extend the selector to include specific nodes to monitor // or provide an API to update the nodes to monitor. selector, err := kSelector.ParseSelector("spec.nodeName!=") if err != nil { panic(err) } lw := kcache.NewListWatchFromClient(client, "pods", kapi.NamespaceAll, selector) podLister := &kcache.StoreToPodLister{Store: kcache.NewStore(kcache.MetaNamespaceKeyFunc)} // Watch and cache all running pods. reflector := kcache.NewReflector(lw, &kapi.Pod{}, podLister.Store, 0) stopChan := make(chan struct{}) reflector.RunUntil(stopChan) nStore, nController := kframework.NewInformer( createNamespaceLW(client), &kapi.Namespace{}, resyncPeriod, kframework.ResourceEventHandlerFuncs{}) go nController.Run(util.NeverStop) podsApi := &realPodsApi{ client: client, podLister: podLister, stopChan: stopChan, reflector: reflector, namespaceStore: nStore, } return podsApi }
func NewReadOnlyClusterPolicyCache(registry clusterpolicyregistry.WatchingRegistry) readOnlyClusterPolicyCache { ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return registry.ListClusterPolicies(ctx, labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return registry.WatchClusterPolicies(ctx, labels.Everything(), fields.Everything(), resourceVersion) }, }, &authorizationapi.ClusterPolicy{}, indexer, 2*time.Minute, ) return readOnlyClusterPolicyCache{ registry: registry, indexer: indexer, reflector: *reflector, keyFunc: cache.MetaNamespaceKeyFunc, } }
// CreateDeleteController constructs a BuildPodDeleteController func (factory *BuildPodControllerFactory) CreateDeleteController() controller.RunnableController { client := ControllerClient{factory.KubeClient, factory.OSClient} queue := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, nil) cache.NewReflector(&buildPodDeleteLW{client, queue}, &kapi.Pod{}, queue, 5*time.Minute).Run() buildPodDeleteController := &buildcontroller.BuildPodDeleteController{ BuildStore: factory.buildStore, BuildUpdater: factory.BuildUpdater, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, controller.RetryNever, kutil.NewTokenBucketRateLimiter(1, 10)), Handle: func(obj interface{}) error { deltas := obj.(cache.Deltas) for _, delta := range deltas { if delta.Type == cache.Deleted { return buildPodDeleteController.HandleBuildPodDeletion(delta.Object.(*kapi.Pod)) } } return nil }, } }
// Create creates an ImportController. func (f *ImportControllerFactory) Create() controller.RunnableController { lw := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return f.Client.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } q := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run() c := &ImportController{ client: dockerregistry.NewClient(), streams: f.Client, mappings: f.Client, } return &controller.RetryController{ Queue: q, RetryManager: controller.NewQueueRetryManager( q, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { util.HandleError(err) return retries.Count < 5 }, kutil.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { r := obj.(*api.ImageStream) return c.Next(r) }, } }
// New creates a new Kubelet for use in main func NewMainKubelet( hostname string, dockerClient dockertools.DockerInterface, etcdClient tools.EtcdClient, kubeClient *client.Client, rootDirectory string, networkContainerImage string, resyncInterval time.Duration, pullQPS float32, pullBurst int, minimumGCAge time.Duration, maxContainerCount int, sourceReady SourceReadyFn, clusterDomain string, clusterDNS net.IP, masterServiceNamespace string) (*Kubelet, error) { if rootDirectory == "" { return nil, fmt.Errorf("invalid root directory %q", rootDirectory) } if resyncInterval <= 0 { return nil, fmt.Errorf("invalid sync frequency %d", resyncInterval) } if minimumGCAge <= 0 { return nil, fmt.Errorf("invalid minimum GC age %d", minimumGCAge) } serviceStore := cache.NewStore() cache.NewReflector(&cache.ListWatch{kubeClient, labels.Everything(), "services", api.NamespaceAll}, &api.Service{}, serviceStore).Run() serviceLister := &cache.StoreToServiceLister{serviceStore} klet := &Kubelet{ hostname: hostname, dockerClient: dockerClient, etcdClient: etcdClient, rootDirectory: rootDirectory, resyncInterval: resyncInterval, networkContainerImage: networkContainerImage, podWorkers: newPodWorkers(), dockerIDToRef: map[dockertools.DockerID]*api.ObjectReference{}, runner: dockertools.NewDockerContainerCommandRunner(dockerClient), httpClient: &http.Client{}, pullQPS: pullQPS, pullBurst: pullBurst, minimumGCAge: minimumGCAge, maxContainerCount: maxContainerCount, sourceReady: sourceReady, clusterDomain: clusterDomain, clusterDNS: clusterDNS, serviceLister: serviceLister, masterServiceNamespace: masterServiceNamespace, } if err := klet.setupDataDirs(); err != nil { return nil, err } return klet, nil }
// Create creates a DeploymentConfigChangeController. func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController { deploymentConfigLW := &deployutil.ListWatcherImpl{ ListFunc: func() (runtime.Object, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run() eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events("")) changeController := &DeploymentConfigChangeController{ changeStrategy: &changeStrategyImpl{ getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) { return factory.KubeClient.ReplicationControllers(namespace).Get(name) }, generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) { return factory.Client.DeploymentConfigs(namespace).Generate(name) }, updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) { return factory.Client.DeploymentConfigs(namespace).Update(config) }, }, decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) { return deployutil.DecodeDeploymentConfig(deployment, factory.Codec) }, recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}), } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { kutil.HandleError(err) if _, isFatal := err.(fatalError); isFatal { return false } if retries.Count > 0 { return false } return true }, kutil.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { config := obj.(*deployapi.DeploymentConfig) return changeController.Handle(config) }, } }
// newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver. func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) { send := func(objs []interface{}) { var pods []*api.Pod for _, o := range objs { pods = append(pods, o.(*api.Pod)) } updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.ApiserverSource} } cache.NewReflector(lw, &api.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run() }
// Create creates a new ImageChangeController which is used to trigger builds when a new // image is available func (factory *ImageChangeControllerFactory) Create() controller.RunnableController { queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).Run() store := cache.NewStore(cache.MetaNamespaceKeyFunc) cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).Run() imageChangeController := &buildcontroller.ImageChangeController{ BuildConfigStore: store, BuildConfigInstantiator: factory.BuildConfigInstantiator, Stop: factory.Stop, } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { imageStream := obj.(*imageapi.ImageStream) if _, isFatal := err.(buildcontroller.ImageChangeControllerFatalError); isFatal { glog.V(3).Infof("Will not retry fatal error for ImageStream update event %s/%s: %v", imageStream.Namespace, imageStream.Name, err) kutil.HandleError(err) return false } if maxRetries > retries.Count { glog.V(3).Infof("Giving up retrying ImageStream update event %s/%s: %v", imageStream.Namespace, imageStream.Name, err) kutil.HandleError(err) return false } glog.V(4).Infof("Retrying ImageStream update event %s/%s: %v", imageStream.Namespace, imageStream.Name, err) return true }, kutil.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { imageRepo := obj.(*imageapi.ImageStream) return imageChangeController.HandleImageRepo(imageRepo) }, } }
// Run begins processing items, and will continue until a value is sent down stopCh. // It's an error to call Run more than once. // Run blocks; call via go. func (c *Controller) Run(stopCh <-chan struct{}) { defer util.HandleCrash() cache.NewReflector( c.config.ListerWatcher, c.config.ObjectType, c.config.Queue, c.config.FullResyncPeriod, ).RunUntil(stopCh) util.Until(c.processLoop, time.Second, stopCh) }
func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *http.ServeMux, podsWatcher *cache.ListWatch) *PluginConfig { // Watch and queue pods that need scheduling. updates := make(chan queue.Entry, k.schedcfg.UpdatesBacklog) podUpdates := &podStoreAdapter{queue.NewHistorical(updates)} reflector := cache.NewReflector(podsWatcher, &api.Pod{}, podUpdates, 0) // lock that guards critial sections that involve transferring pods from // the store (cache) to the scheduling queue; its purpose is to maintain // an ordering (vs interleaving) of operations that's easier to reason about. kapi := &k8smScheduler{internal: k} q := newQueuer(podUpdates) podDeleter := &deleter{ api: kapi, qr: q, } eh := &errorHandler{ api: kapi, backoff: backoff.New(k.schedcfg.InitialPodBackoff.Duration, k.schedcfg.MaxPodBackoff.Duration), qr: q, } startLatch := make(chan struct{}) eventBroadcaster := record.NewBroadcaster() runtime.On(startLatch, func() { eventBroadcaster.StartRecordingToSink(k.client.Events("")) reflector.Run() // TODO(jdef) should listen for termination podDeleter.Run(updates, terminate) q.Run(terminate) q.installDebugHandlers(mux) podtask.InstallDebugHandlers(k.taskRegistry, mux) }) return &PluginConfig{ Config: &plugin.Config{ MinionLister: nil, Algorithm: &kubeScheduler{ api: kapi, podUpdates: podUpdates, defaultContainerCPULimit: k.defaultContainerCPULimit, defaultContainerMemLimit: k.defaultContainerMemLimit, }, Binder: &binder{api: kapi}, NextPod: q.yield, Error: eh.handleSchedulingError, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}), }, api: kapi, client: k.client, qr: q, deleter: podDeleter, starting: startLatch, } }
// Create creates a scheduler and all support functions. func (factory *ConfigFactory) Create() *scheduler.Config { // Watch and queue pods that need scheduling. podQueue := cache.NewFIFO() cache.NewReflector(factory.createUnassignedPodLW(), &api.Pod{}, podQueue).Run() // Watch and cache all running pods. Scheduler needs to find all pods // so it knows where it's safe to place a pod. Cache this locally. podCache := cache.NewStore() cache.NewReflector(factory.createAssignedPodLW(), &api.Pod{}, podCache).Run() // Watch minions. // Minions may be listed frequently, so provide a local up-to-date cache. minionCache := cache.NewStore() if false { // Disable this code until minions support watches. cache.NewReflector(factory.createMinionLW(), &api.Minion{}, minionCache).Run() } else { cache.NewPoller(factory.pollMinions, 10*time.Second, minionCache).Run() } r := rand.New(rand.NewSource(time.Now().UnixNano())) algo := algorithm.NewRandomFitScheduler( &storeToPodLister{podCache}, r) return &scheduler.Config{ MinionLister: &storeToMinionLister{minionCache}, Algorithm: algo, Binder: &binder{factory.Client}, NextPod: func() *api.Pod { pod := podQueue.Pop().(*api.Pod) // TODO: Remove or reduce verbosity by sep 6th, 2014. Leave until then to // make it easy to find scheduling problems. glog.Infof("About to try and schedule pod %v\n"+ "\tknown minions: %v\n"+ "\tknown scheduled pods: %v\n", pod.ID, minionCache.Contains(), podCache.Contains()) return pod }, Error: factory.makeDefaultErrorFunc(podQueue), } }
func newServicesSourceApiFromLW(servicesLW cache.ListerWatcher, period time.Duration, servicesChan chan<- ServiceUpdate) { servicesPush := func(objs []interface{}) { var services []api.Service for _, o := range objs { services = append(services, *(o.(*api.Service))) } servicesChan <- ServiceUpdate{Op: SET, Services: services} } serviceQueue := cache.NewUndeltaStore(servicesPush, cache.MetaNamespaceKeyFunc) cache.NewReflector(servicesLW, &api.Service{}, serviceQueue, period).Run() }
func newEndpointsSourceApiFromLW(endpointsLW cache.ListerWatcher, period time.Duration, endpointsChan chan<- EndpointsUpdate) { endpointsPush := func(objs []interface{}) { var endpoints []api.Endpoints for _, o := range objs { endpoints = append(endpoints, *(o.(*api.Endpoints))) } endpointsChan <- EndpointsUpdate{Op: SET, Endpoints: endpoints} } endpointQueue := cache.NewUndeltaStore(endpointsPush, cache.MetaNamespaceKeyFunc) cache.NewReflector(endpointsLW, &api.Endpoints{}, endpointQueue, period).Run() }
// newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver. func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) { send := func(objs []interface{}) { var pods []api.Pod for _, o := range objs { pod := o.(*api.Pod) // Make a dummy self link so that references to this pod will work. pod.SelfLink = "/api/v1beta1/pods/" + pod.Name pods = append(pods, *pod) } updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.ApiserverSource} } cache.NewReflector(lw, &api.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run() }
// NewCachedServiceAccessor returns a service accessor that can answer queries about services. // It uses a backing cache to make PortalIP lookups efficient. func NewCachedServiceAccessor(client *client.Client, stopCh <-chan struct{}) ServiceAccessor { lw := cache.NewListWatchFromClient(client, "services", api.NamespaceAll, fields.Everything()) store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, map[string]cache.IndexFunc{ "portalIP": indexServiceByPortalIP, // for reverse lookups "namespace": cache.MetaNamespaceIndexFunc, }) reflector := cache.NewReflector(lw, &api.Service{}, store, 2*time.Minute) if stopCh != nil { reflector.RunUntil(stopCh) } else { reflector.Run() } return &cachedServiceAccessor{ reflector: reflector, store: store, } }
// NewProvision creates a new namespace provision admission control handler func NewProvision(c client.Interface) admission.Interface { store := cache.NewStore(cache.MetaNamespaceKeyFunc) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, store, 0, ) reflector.Run() return createProvision(c, store) }
// newSourceApiserverFromLW holds creates a config source that watches an pulls from the apiserver. func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) { send := func(objs []interface{}) { var bpods []api.BoundPod for _, o := range objs { pod := o.(*api.Pod) bpod := api.BoundPod{} if err := api.Scheme.Convert(pod, &bpod); err != nil { glog.Errorf("Unable to interpret Pod from apiserver as a BoundPod: %v: %+v", err, pod) continue } // Make a dummy self link so that references to this bound pod will work. bpod.SelfLink = "/api/v1beta1/boundPods/" + bpod.Name bpods = append(bpods, bpod) } updates <- kubelet.PodUpdate{bpods, kubelet.SET, kubelet.ApiserverSource} } cache.NewReflector(lw, &api.Pod{}, cache.NewUndeltaStore(send)).Run() }
func NewKubeNodes(client *client.Client) (NodesApi, error) { if client == nil { return nil, fmt.Errorf("client is nil") } lw := cache.NewListWatchFromClient(client, "minions", api.NamespaceAll, fields.Everything()) nodeLister := &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)} reflector := cache.NewReflector(lw, &api.Node{}, nodeLister.Store, 0) stopChan := make(chan struct{}) reflector.RunUntil(stopChan) return &kubeNodes{ client: client, nodeLister: nodeLister, reflector: reflector, stopChan: stopChan, nodeErrors: make(map[string]int), }, nil }