func Test_NewAttachDetachController_Positive(t *testing.T) { // Arrange fakeKubeClient := controllervolumetesting.CreateTestClient() resyncPeriod := 5 * time.Minute podInformer := informers.CreateSharedPodIndexInformer(fakeKubeClient, resyncPeriod) nodeInformer := informers.CreateSharedNodeIndexInformer(fakeKubeClient, resyncPeriod) pvcInformer := informers.CreateSharedPVCIndexInformer(fakeKubeClient, resyncPeriod) pvInformer := informers.CreateSharedPVIndexInformer(fakeKubeClient, resyncPeriod) // Act _, err := NewAttachDetachController( fakeKubeClient, podInformer, nodeInformer, pvcInformer, pvInformer, nil, /* cloud */ nil /* plugins */) // Assert if err != nil { t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err) } }
func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig *restclient.Config, stop <-chan struct{}) error { podInformer := informers.CreateSharedPodIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pod-informer")), ResyncPeriod(s)()) nodeInformer := informers.CreateSharedNodeIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-informer")), ResyncPeriod(s)()) pvcInformer := informers.CreateSharedPVCIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pvc-informer")), ResyncPeriod(s)()) pvInformer := informers.CreateSharedPVIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pv-informer")), ResyncPeriod(s)()) informers := map[reflect.Type]framework.SharedIndexInformer{} informers[reflect.TypeOf(&api.Pod{})] = podInformer informers[reflect.TypeOf(&api.Node{})] = nodeInformer informers[reflect.TypeOf(&api.PersistentVolumeClaim{})] = pvcInformer informers[reflect.TypeOf(&api.PersistentVolume{})] = pvInformer go endpointcontroller.NewEndpointController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller"))). Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) go replicationcontroller.NewReplicationManager( podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")), ResyncPeriod(s), replicationcontroller.BurstReplicas, int(s.LookupCacheSizeForRC), ).Run(int(s.ConcurrentRCSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) if s.TerminatedPodGCThreshold > 0 { go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), int(s.TerminatedPodGCThreshold)). Run(wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } _, clusterCIDR, err := net.ParseCIDR(s.ClusterCIDR) if err != nil { glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", s.ClusterCIDR, err) } _, serviceCIDR, err := net.ParseCIDR(s.ServiceCIDR) if err != nil { glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", s.ServiceCIDR, err) } nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")), s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)), flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)), s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod.Duration) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) serviceController := servicecontroller.New(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName) if err := serviceController.Run(s.ServiceSyncPeriod.Duration, s.NodeSyncPeriod.Duration); err != nil { glog.Errorf("Failed to start service controller: %v", err) } time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) if s.AllocateNodeCIDRs && s.ConfigureCloudRoutes { if cloud == nil { glog.Warning("configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes.") } else if routes, ok := cloud.Routes(); !ok { glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") } else { routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, clusterCIDR) routeController.Run(s.NodeSyncPeriod.Duration) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } } else if s.ConfigureCloudRoutes && !s.AllocateNodeCIDRs { glog.Warningf("allocate-node-cidrs set to %v, will not configure cloud provider routes.", s.AllocateNodeCIDRs) } else if s.AllocateNodeCIDRs && !s.ConfigureCloudRoutes { glog.Infof("configure-cloud-routes is set to %v, will not configure cloud provider routes.", s.ConfigureCloudRoutes) } resourceQuotaControllerClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "resourcequota-controller")) resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient) groupKindsToReplenish := []unversioned.GroupKind{ api.Kind("Pod"), api.Kind("Service"), api.Kind("ReplicationController"), api.Kind("PersistentVolumeClaim"), api.Kind("Secret"), api.Kind("ConfigMap"), } resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{ KubeClient: resourceQuotaControllerClient, ResyncPeriod: controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration), Registry: resourceQuotaRegistry, ControllerFactory: resourcequotacontroller.NewReplenishmentControllerFactory(podInformer, resourceQuotaControllerClient), ReplenishmentResyncPeriod: ResyncPeriod(s), GroupKindsToReplenish: groupKindsToReplenish, } go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) // If apiserver is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and controller manager at the same time. var versionStrings []string err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) { if versionStrings, err = restclient.ServerAPIVersions(kubeconfig); err == nil { return true, nil } glog.Errorf("Failed to get api versions from server: %v", err) return false, nil }) if err != nil { glog.Fatalf("Failed to get api versions from server: %v", err) } versions := &unversioned.APIVersions{Versions: versionStrings} resourceMap, err := kubeClient.Discovery().ServerResources() if err != nil { glog.Fatalf("Failed to get supported resources from server: %v", err) } // Find the list of namespaced resources via discovery that the namespace controller must manage namespaceKubeClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "namespace-controller")) namespaceClientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "namespace-controller"), dynamic.LegacyAPIPathResolverFunc) groupVersionResources, err := namespaceKubeClient.Discovery().ServerPreferredNamespacedResources() if err != nil { glog.Fatalf("Failed to get supported resources from server: %v", err) } namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes) go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) groupVersion := "extensions/v1beta1" resources, found := resourceMap[groupVersion] // TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver if containsVersion(versions, groupVersion) && found { glog.Infof("Starting %s apis", groupVersion) if containsResource(resources, "horizontalpodautoscalers") { glog.Infof("Starting horizontal pod controller.") hpaClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler")) metricsClient := metrics.NewHeapsterMetricsClient( hpaClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort, ) go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration). Run(wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } if containsResource(resources, "daemonsets") { glog.Infof("Starting daemon set controller") go daemon.NewDaemonSetsController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), int(s.LookupCacheSizeForDaemonSet)). Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } if containsResource(resources, "jobs") { glog.Infof("Starting job controller") go job.NewJobController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller"))). Run(int(s.ConcurrentJobSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } if containsResource(resources, "deployments") { glog.Infof("Starting deployment controller") go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)). Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } if containsResource(resources, "replicasets") { glog.Infof("Starting ReplicaSet controller") go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)). Run(int(s.ConcurrentRSSyncs), wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } } groupVersion = "apps/v1alpha1" resources, found = resourceMap[groupVersion] glog.Infof("Attempting to start petset, full resource map %+v", resourceMap) if containsVersion(versions, groupVersion) && found { glog.Infof("Starting %s apis", groupVersion) if containsResource(resources, "petsets") { glog.Infof("Starting PetSet controller") resyncPeriod := ResyncPeriod(s)() go petset.NewPetSetController( podInformer, // TODO: Switch to using clientset kubeClient, resyncPeriod, ).Run(1, wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } } provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration) if err != nil { glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.") } volumeController := persistentvolumecontroller.NewPersistentVolumeController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration, provisioner, ProbeRecyclableVolumePlugins(s.VolumeConfiguration), cloud, s.ClusterName, nil, nil, nil, ) volumeController.Run() time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) attachDetachController, attachDetachControllerErr := volume.NewAttachDetachController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")), podInformer, nodeInformer, pvcInformer, pvInformer, cloud, ProbeAttachableVolumePlugins(s.VolumeConfiguration)) if attachDetachControllerErr != nil { glog.Fatalf("Failed to start attach/detach controller: %v", attachDetachControllerErr) } else { go attachDetachController.Run(wait.NeverStop) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } var rootCA []byte if s.RootCAFile != "" { rootCA, err = ioutil.ReadFile(s.RootCAFile) if err != nil { return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err) } if _, err := crypto.CertsFromPEM(rootCA); err != nil { return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err) } } else { rootCA = kubeconfig.CAData } if len(s.ServiceAccountKeyFile) > 0 { privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile) if err != nil { glog.Errorf("Error reading key for service account token controller: %v", err) } else { serviceaccountcontroller.NewTokensController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "tokens-controller")), serviceaccountcontroller.TokensControllerOptions{ TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey), RootCA: rootCA, }, ).Run() time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) } } serviceaccountcontroller.NewServiceAccountsController( clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-account-controller")), serviceaccountcontroller.DefaultServiceAccountsControllerOptions(), ).Run() time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) if s.EnableGarbageCollector { gcClientset := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "generic-garbage-collector")) groupVersionResources, err := gcClientset.Discovery().ServerPreferredResources() if err != nil { glog.Fatalf("Failed to get supported resources from server: %v", err) } clientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "generic-garbage-collector"), dynamic.LegacyAPIPathResolverFunc) garbageCollector, err := garbagecollector.NewGarbageCollector(clientPool, groupVersionResources) if err != nil { glog.Errorf("Failed to start the generic garbage collector") } else { // TODO: make this a flag of kube-controller-manager workers := 5 go garbageCollector.Run(workers, wait.NeverStop) } } // run the shared informers for _, informer := range informers { go informer.Run(wait.NeverStop) } select {} }