// NewSourceAPIserver creates config source that watches for changes to the services and endpoints. func NewSourceAPI(c *client.Client, period time.Duration, servicesChan chan<- ServiceUpdate, endpointsChan chan<- EndpointsUpdate) { servicesLW := cache.NewListWatchFromClient(c, "services", api.NamespaceAll, fields.Everything()) endpointsLW := cache.NewListWatchFromClient(c, "endpoints", api.NamespaceAll, fields.Everything()) newServicesSourceApiFromLW(servicesLW, period, servicesChan) newEndpointsSourceApiFromLW(endpointsLW, period, endpointsChan) }
// newLoadBalancerController creates a new controller from the given config. func newLoadBalancerController(cfg *loadBalancerConfig, kubeClient *unversioned.Client, namespace string) *loadBalancerController { lbc := loadBalancerController{ cfg: cfg, client: kubeClient, queue: workqueue.New(), reloadRateLimiter: util.NewTokenBucketRateLimiter( reloadQPS, int(reloadQPS)), targetService: *targetService, forwardServices: *forwardServices, httpPort: *httpPort, tcpServices: map[string]int{}, } for _, service := range strings.Split(*tcpServices, ",") { portSplit := strings.Split(service, ":") if len(portSplit) != 2 { glog.Errorf("Ignoring misconfigured TCP service %v", service) continue } if port, err := strconv.Atoi(portSplit[1]); err != nil { glog.Errorf("Ignoring misconfigured TCP service %v: %v", service, err) continue } else { lbc.tcpServices[portSplit[0]] = port } } enqueue := func(obj interface{}) { key, err := keyFunc(obj) if err != nil { glog.Infof("Couldn't get key for object %+v: %v", obj, err) return } lbc.queue.Add(key) } eventHandlers := framework.ResourceEventHandlerFuncs{ AddFunc: enqueue, DeleteFunc: enqueue, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { enqueue(cur) } }, } lbc.svcLister.Store, lbc.svcController = framework.NewInformer( cache.NewListWatchFromClient( lbc.client, "services", namespace, fields.Everything()), &api.Service{}, resyncPeriod, eventHandlers) lbc.epLister.Store, lbc.epController = framework.NewInformer( cache.NewListWatchFromClient( lbc.client, "endpoints", namespace, fields.Everything()), &api.Endpoints{}, resyncPeriod, eventHandlers) return &lbc }
// Run starts a background goroutine that watches for changes to services that // have (or had) externalLoadBalancers=true and ensures that they have external // load balancers created and deleted appropriately. // serviceSyncPeriod controls how often we check the cluster's services to // ensure that the correct external load balancers exist. // nodeSyncPeriod controls how often we check the cluster's nodes to determine // if external load balancers need to be updated to point to a new set. // // It's an error to call Run() more than once for a given ServiceController // object. func (s *ServiceController) Run(serviceSyncPeriod, nodeSyncPeriod time.Duration) error { if err := s.init(); err != nil { return err } // We have to make this check beecause the ListWatch that we use in // WatchServices requires Client functions that aren't in the interface // for some reason. if _, ok := s.kubeClient.(*client.Client); !ok { return fmt.Errorf("ServiceController only works with real Client objects, but was passed something else satisfying the client Interface.") } // Get the currently existing set of services and then all future creates // and updates of services. // A delta compressor is needed for the DeltaFIFO queue because we only ever // care about the most recent state. serviceQueue := cache.NewDeltaFIFO( cache.MetaNamespaceKeyFunc, cache.DeltaCompressorFunc(func(d cache.Deltas) cache.Deltas { if len(d) == 0 { return d } return cache.Deltas{*d.Newest()} }), s.cache, ) lw := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "services", api.NamespaceAll, fields.Everything()) cache.NewReflector(lw, &api.Service{}, serviceQueue, serviceSyncPeriod).Run() for i := 0; i < workerGoroutines; i++ { go s.watchServices(serviceQueue) } nodeLW := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "nodes", api.NamespaceAll, fields.Everything()) cache.NewReflector(nodeLW, &api.Node{}, s.nodeLister.Store, 0).Run() go s.nodeSyncLoop(nodeSyncPeriod) return nil }
// NewSourceApiserver creates a config source that watches and pulls from the apiserver. func NewSourceApiserver(c *client.Client, nodeName string, updates chan<- interface{}) { lw := cache.NewListWatchFromClient(c, "pods", api.NamespaceAll, fields.OneTermEqualSelector(client.PodHost, nodeName)) newSourceApiserverFromLW(lw, updates) }
// Returns a cache.ListWatch that gets all changes to pods. func createEndpointsPodLW(kubeClient *kclient.Client) *kcache.ListWatch { return kcache.NewListWatchFromClient(kubeClient, "pods", kapi.NamespaceAll, kSelector.Everything()) }
// Returns a cache.ListWatch that gets all changes to services. func createServiceLW(kubeClient *kclient.Client) *kcache.ListWatch { return kcache.NewListWatchFromClient(kubeClient, "services", kapi.NamespaceAll, kSelector.Everything()) }
// createAllPodsLW returns a listWatch that finds all pods func createAllPodsLW(cl *client.Client) *cache.ListWatch { return cache.NewListWatchFromClient(cl, "pods", api.NamespaceAll, parseSelectorOrDie("")) }
func (ks *KubeletExecutorServer) createAndInitKubelet( kc *app.KubeletConfig, hks hyperkube.Interface, clientConfig *client.Config, shutdownCloser io.Closer, ) (app.KubeletBootstrap, *kconfig.PodConfig, error) { // TODO(k8s): block until all sources have delivered at least one update to the channel, or break the sync loop // up into "per source" synchronizations // TODO(k8s): KubeletConfig.KubeClient should be a client interface, but client interface misses certain methods // used by kubelet. Since NewMainKubelet expects a client interface, we need to make sure we are not passing // a nil pointer to it when what we really want is a nil interface. var kubeClient client.Interface if kc.KubeClient == nil { kubeClient = nil } else { kubeClient = kc.KubeClient } gcPolicy := kubelet.ContainerGCPolicy{ MinAge: kc.MinimumGCAge, MaxPerPodContainer: kc.MaxPerPodContainerCount, MaxContainers: kc.MaxContainerCount, } pc := kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kc.Recorder) updates := pc.Channel(MESOS_CFG_SOURCE) klet, err := kubelet.NewMainKubelet( kc.Hostname, kc.NodeName, kc.DockerClient, kubeClient, kc.RootDirectory, kc.PodInfraContainerImage, kc.SyncFrequency, float32(kc.RegistryPullQPS), kc.RegistryBurst, kc.EventRecordQPS, kc.EventBurst, gcPolicy, pc.SeenAllSources, kc.RegisterNode, kc.StandaloneMode, kc.ClusterDomain, net.IP(kc.ClusterDNS), kc.MasterServiceNamespace, kc.VolumePlugins, kc.NetworkPlugins, kc.NetworkPluginName, kc.StreamingConnectionIdleTimeout, kc.Recorder, kc.CAdvisorInterface, kc.ImageGCPolicy, kc.DiskSpacePolicy, kc.Cloud, kc.NodeStatusUpdateFrequency, kc.ResourceContainer, kc.OSInterface, kc.CgroupRoot, kc.ContainerRuntime, kc.RktPath, kc.RktStage1Image, kc.Mounter, kc.Writer, kc.DockerDaemonContainer, kc.SystemContainer, kc.ConfigureCBR0, kc.PodCIDR, kc.MaxPods, kc.DockerExecHandler, kc.ResolverConfig, kc.CPUCFSQuota, &api.NodeDaemonEndpoints{ KubeletEndpoint: api.DaemonEndpoint{Port: int(kc.Port)}, }, true, // Serialize Image pulls ) if err != nil { return nil, nil, err } //TODO(jdef) either configure Watch here with something useful, or else // get rid of it from executor.Config kubeletFinished := make(chan struct{}) staticPodsConfigPath := filepath.Join(kc.RootDirectory, "static-pods") exec := executor.New(executor.Config{ Kubelet: klet, Updates: updates, SourceName: MESOS_CFG_SOURCE, APIClient: kc.KubeClient, Docker: kc.DockerClient, SuicideTimeout: ks.SuicideTimeout, KubeletFinished: kubeletFinished, ShutdownAlert: func() { if shutdownCloser != nil { if e := shutdownCloser.Close(); e != nil { log.Warningf("failed to signal shutdown to external watcher: %v", e) } } }, ExitFunc: os.Exit, PodStatusFunc: func(_ executor.KubeletInterface, pod *api.Pod) (*api.PodStatus, error) { return klet.GetRuntime().GetPodStatus(pod) }, StaticPodsConfigPath: staticPodsConfigPath, PodLW: cache.NewListWatchFromClient(kc.KubeClient, "pods", api.NamespaceAll, fields.OneTermEqualSelector(client.PodHost, kc.NodeName)), }) go exec.InitializeStaticPodsSource(func() { // Create file source only when we are called back. Otherwise, it is never marked unseen. fileSourceUpdates := pc.Channel(kubelet.FileSource) kconfig.NewSourceFile(staticPodsConfigPath, kc.Hostname, kc.FileCheckFrequency, fileSourceUpdates) }) k := &kubeletExecutor{ Kubelet: klet, address: ks.Address, dockerClient: kc.DockerClient, hks: hks, kubeletFinished: kubeletFinished, executorDone: exec.Done(), clientConfig: clientConfig, } dconfig := bindings.DriverConfig{ Executor: exec, HostnameOverride: ks.HostnameOverride, BindingAddress: ks.Address, } if driver, err := bindings.NewMesosExecutorDriver(dconfig); err != nil { log.Fatalf("failed to create executor driver: %v", err) } else { k.driver = driver } log.V(2).Infof("Initialize executor driver...") k.BirthCry() exec.Init(k.driver) k.StartGarbageCollection() return k, pc, nil }
// Returns a cache.ListWatch that gets all changes to controllers. func (factory *ConfigFactory) createControllerLW() *cache.ListWatch { return cache.NewListWatchFromClient(factory.Client, "replicationControllers", api.NamespaceAll, parseSelectorOrDie("")) }
// Returns a cache.ListWatch that gets all changes to services. func (factory *ConfigFactory) createServiceLW() *cache.ListWatch { return cache.NewListWatchFromClient(factory.Client, "services", api.NamespaceAll, parseSelectorOrDie("")) }
// createNodeLW returns a cache.ListWatch that gets all changes to nodes. func (factory *ConfigFactory) createNodeLW() *cache.ListWatch { // TODO: Filter out nodes that doesn't have NodeReady condition. fields := fields.Set{client.NodeUnschedulable: "false"}.AsSelector() return cache.NewListWatchFromClient(factory.Client, "nodes", api.NamespaceAll, fields) }
// Returns a cache.ListWatch that finds all pods that are // already scheduled. // TODO: return a ListerWatcher interface instead? func (factory *ConfigFactory) createAssignedPodLW() *cache.ListWatch { return cache.NewListWatchFromClient(factory.Client, "pods", api.NamespaceAll, parseSelectorOrDie(client.PodHost+"!=")) }
// Returns a cache.ListWatch that finds all pods that need to be // scheduled. func (factory *ConfigFactory) createUnassignedPodLW() *cache.ListWatch { return cache.NewListWatchFromClient(factory.Client, "pods", api.NamespaceAll, fields.Set{client.PodHost: ""}.AsSelector()) }
// NewLoadBalancerController creates a controller for gce loadbalancers. // - kubeClient: A kubernetes REST client. // - clusterManager: A ClusterManager capable of creating all cloud resources // required for L7 loadbalancing. // - resyncPeriod: Watchers relist from the Kubernetes API server this often. func NewLoadBalancerController(kubeClient *client.Client, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*loadBalancerController, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) lbc := loadBalancerController{ client: kubeClient, clusterManager: clusterManager, stopCh: make(chan struct{}), recorder: eventBroadcaster.NewRecorder( api.EventSource{Component: "loadbalancer-controller"}), } lbc.nodeQueue = NewTaskQueue(lbc.syncNodes) lbc.ingQueue = NewTaskQueue(lbc.sync) // Ingress watch handlers pathHandlers := framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { addIng := obj.(*extensions.Ingress) lbc.recorder.Eventf(addIng, "ADD", addIng.Name) lbc.ingQueue.enqueue(obj) }, DeleteFunc: lbc.ingQueue.enqueue, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { glog.V(3).Infof("Ingress %v changed, syncing", cur.(*extensions.Ingress).Name) } lbc.ingQueue.enqueue(cur) }, } lbc.ingLister.Store, lbc.ingController = framework.NewInformer( &cache.ListWatch{ ListFunc: ingressListFunc(lbc.client, namespace), WatchFunc: ingressWatchFunc(lbc.client, namespace), }, &extensions.Ingress{}, resyncPeriod, pathHandlers) // Service watch handlers svcHandlers := framework.ResourceEventHandlerFuncs{ AddFunc: lbc.enqueueIngressForService, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { lbc.enqueueIngressForService(cur) } }, // Ingress deletes matter, service deletes don't. } lbc.svcLister.Store, lbc.svcController = framework.NewInformer( cache.NewListWatchFromClient( lbc.client, "services", namespace, fields.Everything()), &api.Service{}, resyncPeriod, svcHandlers) nodeHandlers := framework.ResourceEventHandlerFuncs{ AddFunc: lbc.nodeQueue.enqueue, DeleteFunc: lbc.nodeQueue.enqueue, // Nodes are updated every 10s and we don't care, so no update handler. } // Node watch handlers lbc.nodeLister.Store, lbc.nodeController = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return lbc.client.Get(). Resource("nodes"). FieldsSelectorParam(fields.Everything()). Do(). Get() }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return lbc.client.Get(). Prefix("watch"). Resource("nodes"). FieldsSelectorParam(fields.Everything()). Param("resourceVersion", options.ResourceVersion).Watch() }, }, &api.Node{}, 0, nodeHandlers) lbc.tr = &gceTranslator{&lbc} glog.Infof("Created new loadbalancer controller") return &lbc, nil }