示例#1
0
// RunGarbageCollectorController starts generic garbage collection for the cluster.
func (c *MasterConfig) RunGarbageCollectorController(client *osclient.Client, config *restclient.Config) {
	if !c.ControllerManager.EnableGarbageCollector {
		return
	}

	groupVersionResources, err := client.Discovery().ServerPreferredResources()
	if err != nil {
		glog.Fatalf("Failed to get supported resources from server: %v", err)
	}

	config = restclient.AddUserAgent(config, "generic-garbage-collector")
	config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	// TODO: needs to take GVR
	metaOnlyClientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc)
	config.ContentConfig.NegotiatedSerializer = nil
	// TODO: needs to take GVR
	clientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc)
	garbageCollector, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, groupVersionResources)
	if err != nil {
		glog.Fatalf("Failed to start the garbage collector: %v", err)
	}

	workers := int(c.ControllerManager.ConcurrentGCSyncs)
	go garbageCollector.Run(workers, utilwait.NeverStop)
}
func setup(t *testing.T) (*httptest.Server, *garbagecollector.GarbageCollector, clientset.Interface) {
	masterConfig := framework.NewIntegrationTestMasterConfig()
	masterConfig.EnableCoreControllers = false
	masterConfig.EnableGarbageCollection = true
	_, s := framework.RunAMaster(masterConfig)

	clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
	if err != nil {
		t.Fatalf("Error in create clientset: %v", err)
	}
	groupVersionResources, err := clientSet.Discovery().ServerPreferredResources()
	if err != nil {
		t.Fatalf("Failed to get supported resources from server: %v", err)
	}
	config := &restclient.Config{Host: s.URL}
	config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	config.ContentConfig.NegotiatedSerializer = nil
	clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	gc, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, registered.RESTMapper(), groupVersionResources)
	if err != nil {
		t.Fatalf("Failed to create garbage collector")
	}
	return s, gc, clientSet
}
示例#3
0
func startGarbageCollectorController(ctx ControllerContext) (bool, error) {
	if !ctx.Options.EnableGarbageCollector {
		return false, nil
	}

	// TODO: should use a dynamic RESTMapper built from the discovery results.
	restMapper := api.Registry.RESTMapper()

	gcClientset := ctx.ClientBuilder.ClientOrDie("generic-garbage-collector")
	preferredResources, err := gcClientset.Discovery().ServerPreferredResources()
	if err != nil {
		return true, fmt.Errorf("failed to get supported resources from server: %v", err)
	}
	deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete"}}, preferredResources)
	deletableGroupVersionResources, err := discovery.GroupVersionResources(deletableResources)
	if err != nil {
		return true, fmt.Errorf("Failed to parse resources from server: %v", err)
	}

	config := ctx.ClientBuilder.ConfigOrDie("generic-garbage-collector")
	config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
	config.ContentConfig = dynamic.ContentConfig()
	clientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
	garbageCollector, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, restMapper, deletableGroupVersionResources)
	if err != nil {
		return true, fmt.Errorf("Failed to start the generic garbage collector: %v", err)
	}
	workers := int(ctx.Options.ConcurrentGCSyncs)
	go garbageCollector.Run(workers, ctx.Stop)

	return true, nil
}
// TestDependentsRace relies on golang's data race detector to check if there is
// data race among in the dependents field.
func TestDependentsRace(t *testing.T) {
	config := &restclient.Config{}
	config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc)
	config.ContentConfig.NegotiatedSerializer = nil
	clientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc)
	podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}}
	gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, podResource)
	if err != nil {
		t.Fatal(err)
	}

	const updates = 100
	owner := &node{dependents: make(map[*node]struct{})}
	ownerUID := types.UID("owner")
	gc.propagator.uidToNode.Write(owner)
	go func() {
		for i := 0; i < updates; i++ {
			dependent := &node{}
			gc.propagator.addDependentToOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}})
			gc.propagator.removeDependentFromOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}})
		}
	}()
	go func() {
		gc.orphanQueue.Add(owner)
		for i := 0; i < updates; i++ {
			gc.orphanFinalizer()
		}
	}()
}
// test the processItem function making the expected actions.
func TestProcessItem(t *testing.T) {
	pod := newDanglingPod()
	podBytes, err := json.Marshal(pod)
	if err != nil {
		t.Fatal(err)
	}
	testHandler := &fakeActionHandler{
		response: map[string]FakeResponse{
			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
				404,
				[]byte{},
			},
			"GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
				200,
				podBytes,
			},
		},
	}
	podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}}
	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
	defer srv.Close()
	clientConfig.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(clientConfig, dynamic.LegacyAPIPathResolverFunc)
	clientConfig.ContentConfig.NegotiatedSerializer = nil
	clientPool := dynamic.NewClientPool(clientConfig, dynamic.LegacyAPIPathResolverFunc)
	gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, podResource)
	if err != nil {
		t.Fatal(err)
	}
	item := &node{
		identity: objectReference{
			OwnerReference: metatypes.OwnerReference{
				Kind:       pod.Kind,
				APIVersion: pod.APIVersion,
				Name:       pod.Name,
				UID:        pod.UID,
			},
			Namespace: pod.Namespace,
		},
		// owners are intentionally left empty. The processItem routine should get the latest item from the server.
		owners: nil,
	}
	err = gc.processItem(item)
	if err != nil {
		t.Errorf("Unexpected Error: %v", err)
	}
	expectedActionSet := sets.NewString()
	expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
	expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
	expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")

	actualActionSet := sets.NewString()
	for _, action := range testHandler.actions {
		actualActionSet.Insert(action.String())
	}
	if !expectedActionSet.Equal(actualActionSet) {
		t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
			actualActionSet, expectedActionSet.Difference(actualActionSet))
	}
}
func setupGC(t *testing.T, config *restclient.Config) *GarbageCollector {
	config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	config.ContentConfig.NegotiatedSerializer = nil
	clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	podResource := []schema.GroupVersionResource{{Version: "v1", Resource: "pods"}}
	gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, registered.RESTMapper(), podResource)
	if err != nil {
		t.Fatal(err)
	}
	return gc
}
func TestNewGarbageCollector(t *testing.T) {
	config := &restclient.Config{}
	config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc)
	config.ContentConfig.NegotiatedSerializer = nil
	clientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc)
	podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}}
	gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, podResource)
	if err != nil {
		t.Fatal(err)
	}
	assert.Equal(t, 1, len(gc.monitors))
}
func setup(t *testing.T) (*garbagecollector.GarbageCollector, clientset.Interface) {
	var m *master.Master
	s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		m.Handler.ServeHTTP(w, req)
	}))
	// TODO: close the http server

	masterConfig := framework.NewIntegrationTestMasterConfig()
	masterConfig.EnableCoreControllers = false
	m, err := master.New(masterConfig)
	if err != nil {
		t.Fatalf("Error in bringing up the master: %v", err)
	}

	framework.DeleteAllEtcdKeys()
	clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
	if err != nil {
		t.Fatalf("Error in create clientset: %v", err)
	}
	groupVersionResources, err := clientSet.Discovery().ServerPreferredResources()
	if err != nil {
		t.Fatalf("Failed to get supported resources from server: %v", err)
	}
	clientPool := dynamic.NewClientPool(&restclient.Config{Host: s.URL}, dynamic.LegacyAPIPathResolverFunc)
	gc, err := garbagecollector.NewGarbageCollector(clientPool, groupVersionResources)
	if err != nil {
		t.Fatalf("Failed to create garbage collector")
	}
	return gc, clientSet
}
示例#9
0
func startNamespaceController(ctx ControllerContext) (bool, error) {
	// TODO: should use a dynamic RESTMapper built from the discovery results.
	restMapper := api.Registry.RESTMapper()

	// Find the list of namespaced resources via discovery that the namespace controller must manage
	namespaceKubeClient := ctx.ClientBuilder.ClientOrDie("namespace-controller")
	namespaceClientPool := dynamic.NewClientPool(ctx.ClientBuilder.ConfigOrDie("namespace-controller"), restMapper, dynamic.LegacyAPIPathResolverFunc)
	// TODO: consider using a list-watch + cache here rather than polling
	resources, err := namespaceKubeClient.Discovery().ServerResources()
	if err != nil {
		return true, fmt.Errorf("failed to get preferred server resources: %v", err)
	}
	gvrs, err := discovery.GroupVersionResources(resources)
	if err != nil {
		return true, fmt.Errorf("failed to parse preferred server resources: %v", err)
	}
	discoverResourcesFn := namespaceKubeClient.Discovery().ServerPreferredNamespacedResources
	if _, found := gvrs[extensions.SchemeGroupVersion.WithResource("thirdpartyresource")]; !found {
		// make discovery static
		snapshot, err := discoverResourcesFn()
		if err != nil {
			return true, fmt.Errorf("failed to get server resources: %v", err)
		}
		discoverResourcesFn = func() ([]*metav1.APIResourceList, error) {
			return snapshot, nil
		}
	}
	namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, discoverResourcesFn, ctx.Options.NamespaceSyncPeriod.Duration, v1.FinalizerKubernetes)
	go namespaceController.Run(int(ctx.Options.ConcurrentNamespaceSyncs), ctx.Stop)

	return true, nil

}
// TestDependentsRace relies on golang's data race detector to check if there is
// data race among in the dependents field.
func TestDependentsRace(t *testing.T) {
	clientPool := dynamic.NewClientPool(&restclient.Config{}, dynamic.LegacyAPIPathResolverFunc)
	podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}}
	gc, err := NewGarbageCollector(clientPool, podResource)
	if err != nil {
		t.Fatal(err)
	}

	const updates = 100
	owner := &node{dependentsLock: &sync.RWMutex{}, dependents: make(map[*node]struct{})}
	ownerUID := types.UID("owner")
	gc.propagator.uidToNode.Write(owner)
	go func() {
		for i := 0; i < updates; i++ {
			dependent := &node{}
			gc.propagator.addDependentToOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}})
			gc.propagator.removeDependentFromOwners(dependent, []metatypes.OwnerReference{{UID: ownerUID}})
		}
	}()
	go func() {
		gc.orphanQueue.Add(owner)
		for i := 0; i < updates; i++ {
			gc.orphanFinalizer()
		}
	}()
}
func TestNewGarbageCollector(t *testing.T) {
	clientPool := dynamic.NewClientPool(&restclient.Config{}, dynamic.LegacyAPIPathResolverFunc)
	podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}}
	gc, err := NewGarbageCollector(clientPool, podResource)
	if err != nil {
		t.Fatal(err)
	}
	assert.Equal(t, 1, len(gc.monitors))
}
示例#12
0
func startNamespaceController() {
	// Use the default QPS
	config := restclient.AddUserAgent(&restclient.Config{Host: framework.TestContext.Host}, "node-e2e-namespace-controller")
	client, err := clientset.NewForConfig(config)
	Expect(err).NotTo(HaveOccurred())
	clientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc)
	resources, err := client.Discovery().ServerPreferredNamespacedResources()
	Expect(err).NotTo(HaveOccurred())
	nc := namespacecontroller.NewNamespaceController(client, clientPool, resources, ncResyncPeriod, api.FinalizerKubernetes)
	go nc.Run(ncConcurrency, wait.NeverStop)
}
// Start starts the namespace controller.
func (n *NamespaceController) Start() error {
	// Use the default QPS
	config := restclient.AddUserAgent(&restclient.Config{Host: framework.TestContext.Host}, ncName)
	client, err := clientset.NewForConfig(config)
	if err != nil {
		return err
	}
	clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	discoverResourcesFn := client.Discovery().ServerPreferredNamespacedResources
	nc := namespacecontroller.NewNamespaceController(client, clientPool, discoverResourcesFn, ncResyncPeriod, v1.FinalizerKubernetes)
	go nc.Run(ncConcurrency, n.stopCh)
	return nil
}
示例#14
0
func setup(t *testing.T) (*httptest.Server, *garbagecollector.GarbageCollector, clientset.Interface) {
	masterConfig := framework.NewIntegrationTestMasterConfig()
	masterConfig.EnableCoreControllers = false
	_, s := framework.RunAMaster(masterConfig)

	clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
	if err != nil {
		t.Fatalf("Error in create clientset: %v", err)
	}
	groupVersionResources, err := clientSet.Discovery().ServerPreferredResources()
	if err != nil {
		t.Fatalf("Failed to get supported resources from server: %v", err)
	}
	clientPool := dynamic.NewClientPool(&restclient.Config{Host: s.URL}, dynamic.LegacyAPIPathResolverFunc)
	gc, err := garbagecollector.NewGarbageCollector(clientPool, groupVersionResources)
	if err != nil {
		t.Fatalf("Failed to create garbage collector")
	}
	return s, gc, clientSet
}
// test the list and watch functions correctly converts the ListOptions
func TestGCListWatcher(t *testing.T) {
	testHandler := &fakeActionHandler{}
	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
	defer srv.Close()
	clientPool := dynamic.NewClientPool(clientConfig, dynamic.LegacyAPIPathResolverFunc)
	podResource := unversioned.GroupVersionResource{Version: "v1", Resource: "pods"}
	client, err := clientPool.ClientForGroupVersion(podResource.GroupVersion())
	if err != nil {
		t.Fatal(err)
	}
	lw := gcListWatcher(client, podResource)
	lw.Watch(api.ListOptions{ResourceVersion: "1"})
	lw.List(api.ListOptions{ResourceVersion: "1"})
	if e, a := 2, len(testHandler.actions); e != a {
		t.Errorf("expect %d requests, got %d", e, a)
	}
	if e, a := "resourceVersion=1", testHandler.actions[0].query; e != a {
		t.Errorf("expect %s, got %s", e, a)
	}
	if e, a := "resourceVersion=1", testHandler.actions[1].query; e != a {
		t.Errorf("expect %s, got %s", e, a)
	}
}
func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder) error {
	client := func(serviceAccountName string) clientset.Interface {
		return rootClientBuilder.ClientOrDie(serviceAccountName)
	}
	discoveryClient := client("controller-discovery").Discovery()
	sharedInformers := informers.NewSharedInformerFactory(client("shared-informers"), ResyncPeriod(s)())

	// always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest
	if len(s.ServiceAccountKeyFile) > 0 {
		privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
		if err != nil {
			return fmt.Errorf("Error reading key for service account token controller: %v", err)
		} else {
			var rootCA []byte
			if s.RootCAFile != "" {
				rootCA, err = ioutil.ReadFile(s.RootCAFile)
				if err != nil {
					return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
				}
				if _, err := certutil.ParseCertsPEM(rootCA); err != nil {
					return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
				}
			} else {
				rootCA = kubeconfig.CAData
			}

			go serviceaccountcontroller.NewTokensController(
				rootClientBuilder.ClientOrDie("tokens-controller"),
				serviceaccountcontroller.TokensControllerOptions{
					TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
					RootCA:         rootCA,
				},
			).Run(int(s.ConcurrentSATokenSyncs), wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	}

	go endpointcontroller.NewEndpointController(sharedInformers.Pods().Informer(), client("endpoint-controller")).
		Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	go replicationcontroller.NewReplicationManager(
		sharedInformers.Pods().Informer(),
		clientBuilder.ClientOrDie("replication-controller"),
		ResyncPeriod(s),
		replicationcontroller.BurstReplicas,
		int(s.LookupCacheSizeForRC),
		s.EnableGarbageCollector,
	).Run(int(s.ConcurrentRCSyncs), wait.NeverStop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	if s.TerminatedPodGCThreshold > 0 {
		go podgc.NewPodGC(client("pod-garbage-collector"), sharedInformers.Pods().Informer(),
			int(s.TerminatedPodGCThreshold)).Run(wait.NeverStop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	if err != nil {
		glog.Fatalf("Cloud provider could not be initialized: %v", err)
	}

	_, clusterCIDR, err := net.ParseCIDR(s.ClusterCIDR)
	if err != nil {
		glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", s.ClusterCIDR, err)
	}
	_, serviceCIDR, err := net.ParseCIDR(s.ServiceCIDR)
	if err != nil {
		glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", s.ServiceCIDR, err)
	}
	nodeController, err := nodecontroller.NewNodeController(
		sharedInformers.Pods(), sharedInformers.Nodes(), sharedInformers.DaemonSets(),
		cloud, client("node-controller"),
		s.PodEvictionTimeout.Duration, s.NodeEvictionRate, s.SecondaryNodeEvictionRate, s.LargeClusterSizeThreshold, s.UnhealthyZoneThreshold, s.NodeMonitorGracePeriod.Duration,
		s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR,
		int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
	if err != nil {
		glog.Fatalf("Failed to initialize nodecontroller: %v", err)
	}
	nodeController.Run()
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	serviceController, err := servicecontroller.New(cloud, client("service-controller"), s.ClusterName)
	if err != nil {
		glog.Errorf("Failed to start service controller: %v", err)
	} else {
		serviceController.Run(int(s.ConcurrentServiceSyncs))
	}
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	if s.AllocateNodeCIDRs && s.ConfigureCloudRoutes {
		if cloud == nil {
			glog.Warning("configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes.")
		} else if routes, ok := cloud.Routes(); !ok {
			glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
		} else {
			routeController := routecontroller.New(routes, client("route-controller"), s.ClusterName, clusterCIDR)
			routeController.Run(s.RouteReconciliationPeriod.Duration)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	} else {
		glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", s.AllocateNodeCIDRs, s.ConfigureCloudRoutes)
	}

	resourceQuotaControllerClient := client("resourcequota-controller")
	resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient)
	groupKindsToReplenish := []unversioned.GroupKind{
		api.Kind("Pod"),
		api.Kind("Service"),
		api.Kind("ReplicationController"),
		api.Kind("PersistentVolumeClaim"),
		api.Kind("Secret"),
		api.Kind("ConfigMap"),
	}
	resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
		KubeClient:                resourceQuotaControllerClient,
		ResyncPeriod:              controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration),
		Registry:                  resourceQuotaRegistry,
		ControllerFactory:         resourcequotacontroller.NewReplenishmentControllerFactory(sharedInformers.Pods().Informer(), resourceQuotaControllerClient),
		ReplenishmentResyncPeriod: ResyncPeriod(s),
		GroupKindsToReplenish:     groupKindsToReplenish,
	}
	go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	// If apiserver is not running we should wait for some time and fail only then. This is particularly
	// important when we start apiserver and controller manager at the same time.
	var versionStrings []string
	err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
		if versionStrings, err = restclient.ServerAPIVersions(kubeconfig); err == nil {
			return true, nil
		}
		glog.Errorf("Failed to get api versions from server: %v", err)
		return false, nil
	})
	if err != nil {
		glog.Fatalf("Failed to get api versions from server: %v", err)
	}
	versions := &unversioned.APIVersions{Versions: versionStrings}

	resourceMap, err := discoveryClient.ServerResources()
	if err != nil {
		glog.Fatalf("Failed to get supported resources from server: %v", err)
	}

	// TODO: should use a dynamic RESTMapper built from the discovery results.
	restMapper := registered.RESTMapper()

	// Find the list of namespaced resources via discovery that the namespace controller must manage
	namespaceKubeClient := client("namespace-controller")
	namespaceClientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "namespace-controller"), restMapper, dynamic.LegacyAPIPathResolverFunc)
	groupVersionResources, err := namespaceKubeClient.Discovery().ServerPreferredNamespacedResources()
	if err != nil {
		glog.Fatalf("Failed to get supported resources from server: %v", err)
	}
	namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes)
	go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	groupVersion := "extensions/v1beta1"
	resources, found := resourceMap[groupVersion]
	// TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
	if containsVersion(versions, groupVersion) && found {
		glog.Infof("Starting %s apis", groupVersion)
		if containsResource(resources, "horizontalpodautoscalers") {
			glog.Infof("Starting horizontal pod controller.")
			hpaClient := client("horizontal-pod-autoscaler")
			metricsClient := metrics.NewHeapsterMetricsClient(
				hpaClient,
				metrics.DefaultHeapsterNamespace,
				metrics.DefaultHeapsterScheme,
				metrics.DefaultHeapsterService,
				metrics.DefaultHeapsterPort,
			)
			go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient.Autoscaling(), metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
				Run(wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}

		if containsResource(resources, "daemonsets") {
			glog.Infof("Starting daemon set controller")
			go daemon.NewDaemonSetsController(sharedInformers.DaemonSets(), sharedInformers.Pods(), sharedInformers.Nodes(), client("daemon-set-controller"), int(s.LookupCacheSizeForDaemonSet)).
				Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}

		if containsResource(resources, "jobs") {
			glog.Infof("Starting job controller")
			go job.NewJobController(sharedInformers.Pods().Informer(), client("job-controller")).
				Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}

		if containsResource(resources, "deployments") {
			glog.Infof("Starting deployment controller")
			go deployment.NewDeploymentController(client("deployment-controller"), ResyncPeriod(s)).
				Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}

		if containsResource(resources, "replicasets") {
			glog.Infof("Starting ReplicaSet controller")
			go replicaset.NewReplicaSetController(sharedInformers.Pods().Informer(), client("replicaset-controller"), ResyncPeriod(s), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS), s.EnableGarbageCollector).
				Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	}

	groupVersion = "policy/v1alpha1"
	resources, found = resourceMap[groupVersion]
	glog.Infof("Attempting to start disruption controller, full resource map %+v", resourceMap)
	if containsVersion(versions, groupVersion) && found {
		glog.Infof("Starting %s apis", groupVersion)
		if containsResource(resources, "poddisruptionbudgets") {
			glog.Infof("Starting disruption controller")
			go disruption.NewDisruptionController(sharedInformers.Pods().Informer(), client("disruption-controller")).Run(wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	}

	groupVersion = "apps/v1alpha1"
	resources, found = resourceMap[groupVersion]
	glog.Infof("Attempting to start petset, full resource map %+v", resourceMap)
	if containsVersion(versions, groupVersion) && found {
		glog.Infof("Starting %s apis", groupVersion)
		if containsResource(resources, "petsets") {
			glog.Infof("Starting PetSet controller")
			resyncPeriod := ResyncPeriod(s)()
			go petset.NewPetSetController(
				sharedInformers.Pods().Informer(),
				client("petset-controller"),
				resyncPeriod,
			).Run(1, wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	}

	groupVersion = "batch/v2alpha1"
	resources, found = resourceMap[groupVersion]
	if containsVersion(versions, groupVersion) && found {
		glog.Infof("Starting %s apis", groupVersion)
		if containsResource(resources, "scheduledjobs") {
			glog.Infof("Starting scheduledjob controller")
			// // TODO: this is a temp fix for allowing kubeClient list v2alpha1 sj, should switch to using clientset
			kubeconfig.ContentConfig.GroupVersion = &unversioned.GroupVersion{Group: batch.GroupName, Version: "v2alpha1"}
			go scheduledjob.NewScheduledJobController(client("scheduledjob-controller")).
				Run(wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	} else {
		glog.Infof("Not starting %s apis", groupVersion)
	}

	alphaProvisioner, err := NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration)
	if err != nil {
		glog.Fatalf("An backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
	}
	params := persistentvolumecontroller.ControllerParameters{
		KubeClient:                client("persistent-volume-binder"),
		SyncPeriod:                s.PVClaimBinderSyncPeriod.Duration,
		AlphaProvisioner:          alphaProvisioner,
		VolumePlugins:             ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
		Cloud:                     cloud,
		ClusterName:               s.ClusterName,
		EnableDynamicProvisioning: s.VolumeConfiguration.EnableDynamicProvisioning,
	}
	volumeController := persistentvolumecontroller.NewController(params)
	volumeController.Run(wait.NeverStop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	attachDetachController, attachDetachControllerErr :=
		attachdetach.NewAttachDetachController(
			client("attachdetach-controller"),
			sharedInformers.Pods().Informer(),
			sharedInformers.Nodes().Informer(),
			sharedInformers.PersistentVolumeClaims().Informer(),
			sharedInformers.PersistentVolumes().Informer(),
			cloud,
			ProbeAttachableVolumePlugins(s.VolumeConfiguration),
			recorder)
	if attachDetachControllerErr != nil {
		glog.Fatalf("Failed to start attach/detach controller: %v", attachDetachControllerErr)
	}
	go attachDetachController.Run(wait.NeverStop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	groupVersion = "certificates.k8s.io/v1alpha1"
	resources, found = resourceMap[groupVersion]
	glog.Infof("Attempting to start certificates, full resource map %+v", resourceMap)
	if containsVersion(versions, groupVersion) && found {
		glog.Infof("Starting %s apis", groupVersion)
		if containsResource(resources, "certificatesigningrequests") {
			glog.Infof("Starting certificate request controller")
			resyncPeriod := ResyncPeriod(s)()
			certController, err := certcontroller.NewCertificateController(
				client("certificate-controller"),
				resyncPeriod,
				s.ClusterSigningCertFile,
				s.ClusterSigningKeyFile,
				s.ApproveAllKubeletCSRsForGroup,
			)
			if err != nil {
				glog.Errorf("Failed to start certificate controller: %v", err)
			} else {
				go certController.Run(1, wait.NeverStop)
			}
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	}

	serviceaccountcontroller.NewServiceAccountsController(
		client("service-account-controller"),
		serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
	).Run()
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	if s.EnableGarbageCollector {
		gcClientset := client("generic-garbage-collector")
		groupVersionResources, err := gcClientset.Discovery().ServerPreferredResources()
		if err != nil {
			glog.Fatalf("Failed to get supported resources from server: %v", err)
		}

		config := restclient.AddUserAgent(kubeconfig, "generic-garbage-collector")
		config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
		metaOnlyClientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
		config.ContentConfig.NegotiatedSerializer = nil
		clientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
		garbageCollector, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, restMapper, groupVersionResources)
		if err != nil {
			glog.Errorf("Failed to start the generic garbage collector: %v", err)
		} else {
			workers := int(s.ConcurrentGCSyncs)
			go garbageCollector.Run(workers, wait.NeverStop)
		}
	}

	sharedInformers.Start(stop)

	select {}
}
func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIVersions) {
	now := unversioned.Now()
	namespaceName := "test"
	testNamespacePendingFinalize := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:              namespaceName,
			ResourceVersion:   "1",
			DeletionTimestamp: &now,
		},
		Spec: api.NamespaceSpec{
			Finalizers: []api.FinalizerName{"kubernetes"},
		},
		Status: api.NamespaceStatus{
			Phase: api.NamespaceTerminating,
		},
	}
	testNamespaceFinalizeComplete := &api.Namespace{
		ObjectMeta: api.ObjectMeta{
			Name:              namespaceName,
			ResourceVersion:   "1",
			DeletionTimestamp: &now,
		},
		Spec: api.NamespaceSpec{},
		Status: api.NamespaceStatus{
			Phase: api.NamespaceTerminating,
		},
	}

	// when doing a delete all of content, we will do a GET of a collection, and DELETE of a collection by default
	dynamicClientActionSet := sets.NewString()
	groupVersionResources := testGroupVersionResources()
	for _, groupVersionResource := range groupVersionResources {
		urlPath := path.Join([]string{
			dynamic.LegacyAPIPathResolverFunc(groupVersionResource.GroupVersion()),
			groupVersionResource.Group,
			groupVersionResource.Version,
			"namespaces",
			namespaceName,
			groupVersionResource.Resource,
		}...)
		dynamicClientActionSet.Insert((&fakeAction{method: "GET", path: urlPath}).String())
		dynamicClientActionSet.Insert((&fakeAction{method: "DELETE", path: urlPath}).String())
	}

	scenarios := map[string]struct {
		testNamespace          *api.Namespace
		kubeClientActionSet    sets.String
		dynamicClientActionSet sets.String
	}{
		"pending-finalize": {
			testNamespace: testNamespacePendingFinalize,
			kubeClientActionSet: sets.NewString(
				strings.Join([]string{"get", "namespaces", ""}, "-"),
				strings.Join([]string{"create", "namespaces", "finalize"}, "-"),
				strings.Join([]string{"list", "pods", ""}, "-"),
				strings.Join([]string{"delete", "namespaces", ""}, "-"),
			),
			dynamicClientActionSet: dynamicClientActionSet,
		},
		"complete-finalize": {
			testNamespace: testNamespaceFinalizeComplete,
			kubeClientActionSet: sets.NewString(
				strings.Join([]string{"get", "namespaces", ""}, "-"),
				strings.Join([]string{"delete", "namespaces", ""}, "-"),
			),
			dynamicClientActionSet: sets.NewString(),
		},
	}

	for scenario, testInput := range scenarios {
		testHandler := &fakeActionHandler{statusCode: 200}
		srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
		defer srv.Close()

		mockClient := fake.NewSimpleClientset(testInput.testNamespace)
		clientPool := dynamic.NewClientPool(clientConfig, dynamic.LegacyAPIPathResolverFunc)

		err := syncNamespace(mockClient, clientPool, &operationNotSupportedCache{m: make(map[operationKey]bool)}, groupVersionResources, testInput.testNamespace, api.FinalizerKubernetes)
		if err != nil {
			t.Errorf("scenario %s - Unexpected error when synching namespace %v", scenario, err)
		}

		// validate traffic from kube client
		actionSet := sets.NewString()
		for _, action := range mockClient.Actions() {
			actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
		}
		if !actionSet.Equal(testInput.kubeClientActionSet) {
			t.Errorf("scenario %s - mock client expected actions:\n%v\n but got:\n%v\nDifference:\n%v", scenario,
				testInput.kubeClientActionSet, actionSet, testInput.kubeClientActionSet.Difference(actionSet))
		}

		// validate traffic from dynamic client
		actionSet = sets.NewString()
		for _, action := range testHandler.actions {
			actionSet.Insert(action.String())
		}
		if !actionSet.Equal(testInput.dynamicClientActionSet) {
			t.Errorf("scenario %s - dynamic client expected actions:\n%v\n but got:\n%v\nDifference:\n%v", scenario,
				testInput.dynamicClientActionSet, actionSet, testInput.dynamicClientActionSet.Difference(actionSet))
		}
	}
}
示例#18
0
func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig *restclient.Config, stop <-chan struct{}) error {
	podInformer := informers.CreateSharedPodIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pod-informer")), ResyncPeriod(s)())
	nodeInformer := informers.CreateSharedNodeIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-informer")), ResyncPeriod(s)())
	pvcInformer := informers.CreateSharedPVCIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pvc-informer")), ResyncPeriod(s)())
	pvInformer := informers.CreateSharedPVIndexInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pv-informer")), ResyncPeriod(s)())
	informers := map[reflect.Type]framework.SharedIndexInformer{}
	informers[reflect.TypeOf(&api.Pod{})] = podInformer
	informers[reflect.TypeOf(&api.Node{})] = nodeInformer
	informers[reflect.TypeOf(&api.PersistentVolumeClaim{})] = pvcInformer
	informers[reflect.TypeOf(&api.PersistentVolume{})] = pvInformer

	go endpointcontroller.NewEndpointController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller"))).
		Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	go replicationcontroller.NewReplicationManager(
		podInformer,
		clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")),
		ResyncPeriod(s),
		replicationcontroller.BurstReplicas,
		int(s.LookupCacheSizeForRC),
	).Run(int(s.ConcurrentRCSyncs), wait.NeverStop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	if s.TerminatedPodGCThreshold > 0 {
		go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), int(s.TerminatedPodGCThreshold)).
			Run(wait.NeverStop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	if err != nil {
		glog.Fatalf("Cloud provider could not be initialized: %v", err)
	}

	_, clusterCIDR, err := net.ParseCIDR(s.ClusterCIDR)
	if err != nil {
		glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", s.ClusterCIDR, err)
	}
	_, serviceCIDR, err := net.ParseCIDR(s.ServiceCIDR)
	if err != nil {
		glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", s.ServiceCIDR, err)
	}
	nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
		s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
		flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
		s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
	nodeController.Run(s.NodeSyncPeriod.Duration)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	serviceController := servicecontroller.New(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
	if err := serviceController.Run(s.ServiceSyncPeriod.Duration, s.NodeSyncPeriod.Duration); err != nil {
		glog.Errorf("Failed to start service controller: %v", err)
	}
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	if s.AllocateNodeCIDRs && s.ConfigureCloudRoutes {
		if cloud == nil {
			glog.Warning("configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes.")
		} else if routes, ok := cloud.Routes(); !ok {
			glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
		} else {
			routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, clusterCIDR)
			routeController.Run(s.NodeSyncPeriod.Duration)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	} else if s.ConfigureCloudRoutes && !s.AllocateNodeCIDRs {
		glog.Warningf("allocate-node-cidrs set to %v, will not configure cloud provider routes.", s.AllocateNodeCIDRs)
	} else if s.AllocateNodeCIDRs && !s.ConfigureCloudRoutes {
		glog.Infof("configure-cloud-routes is set to %v, will not configure cloud provider routes.", s.ConfigureCloudRoutes)
	}

	resourceQuotaControllerClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "resourcequota-controller"))
	resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient)
	groupKindsToReplenish := []unversioned.GroupKind{
		api.Kind("Pod"),
		api.Kind("Service"),
		api.Kind("ReplicationController"),
		api.Kind("PersistentVolumeClaim"),
		api.Kind("Secret"),
		api.Kind("ConfigMap"),
	}
	resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
		KubeClient:                resourceQuotaControllerClient,
		ResyncPeriod:              controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration),
		Registry:                  resourceQuotaRegistry,
		ControllerFactory:         resourcequotacontroller.NewReplenishmentControllerFactory(podInformer, resourceQuotaControllerClient),
		ReplenishmentResyncPeriod: ResyncPeriod(s),
		GroupKindsToReplenish:     groupKindsToReplenish,
	}
	go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	// If apiserver is not running we should wait for some time and fail only then. This is particularly
	// important when we start apiserver and controller manager at the same time.
	var versionStrings []string
	err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
		if versionStrings, err = restclient.ServerAPIVersions(kubeconfig); err == nil {
			return true, nil
		}
		glog.Errorf("Failed to get api versions from server: %v", err)
		return false, nil
	})
	if err != nil {
		glog.Fatalf("Failed to get api versions from server: %v", err)
	}
	versions := &unversioned.APIVersions{Versions: versionStrings}

	resourceMap, err := kubeClient.Discovery().ServerResources()
	if err != nil {
		glog.Fatalf("Failed to get supported resources from server: %v", err)
	}

	// Find the list of namespaced resources via discovery that the namespace controller must manage
	namespaceKubeClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "namespace-controller"))
	namespaceClientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "namespace-controller"), dynamic.LegacyAPIPathResolverFunc)
	groupVersionResources, err := namespaceKubeClient.Discovery().ServerPreferredNamespacedResources()
	if err != nil {
		glog.Fatalf("Failed to get supported resources from server: %v", err)
	}
	namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes)
	go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	groupVersion := "extensions/v1beta1"
	resources, found := resourceMap[groupVersion]
	// TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
	if containsVersion(versions, groupVersion) && found {
		glog.Infof("Starting %s apis", groupVersion)
		if containsResource(resources, "horizontalpodautoscalers") {
			glog.Infof("Starting horizontal pod controller.")
			hpaClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
			metricsClient := metrics.NewHeapsterMetricsClient(
				hpaClient,
				metrics.DefaultHeapsterNamespace,
				metrics.DefaultHeapsterScheme,
				metrics.DefaultHeapsterService,
				metrics.DefaultHeapsterPort,
			)
			go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
				Run(wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}

		if containsResource(resources, "daemonsets") {
			glog.Infof("Starting daemon set controller")
			go daemon.NewDaemonSetsController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), int(s.LookupCacheSizeForDaemonSet)).
				Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}

		if containsResource(resources, "jobs") {
			glog.Infof("Starting job controller")
			go job.NewJobController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller"))).
				Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}

		if containsResource(resources, "deployments") {
			glog.Infof("Starting deployment controller")
			go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)).
				Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}

		if containsResource(resources, "replicasets") {
			glog.Infof("Starting ReplicaSet controller")
			go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)).
				Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	}

	groupVersion = "apps/v1alpha1"
	resources, found = resourceMap[groupVersion]
	glog.Infof("Attempting to start petset, full resource map %+v", resourceMap)
	if containsVersion(versions, groupVersion) && found {
		glog.Infof("Starting %s apis", groupVersion)
		if containsResource(resources, "petsets") {
			glog.Infof("Starting PetSet controller")
			resyncPeriod := ResyncPeriod(s)()
			go petset.NewPetSetController(
				podInformer,
				// TODO: Switch to using clientset
				kubeClient,
				resyncPeriod,
			).Run(1, wait.NeverStop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	}

	provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
	if err != nil {
		glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
	}

	volumeController := persistentvolumecontroller.NewPersistentVolumeController(
		clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
		s.PVClaimBinderSyncPeriod.Duration,
		provisioner,
		ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
		cloud,
		s.ClusterName,
		nil, nil, nil,
	)
	volumeController.Run()
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	attachDetachController, attachDetachControllerErr :=
		volume.NewAttachDetachController(
			clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")),
			podInformer,
			nodeInformer,
			pvcInformer,
			pvInformer,
			cloud,
			ProbeAttachableVolumePlugins(s.VolumeConfiguration))
	if attachDetachControllerErr != nil {
		glog.Fatalf("Failed to start attach/detach controller: %v", attachDetachControllerErr)
	} else {
		go attachDetachController.Run(wait.NeverStop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	var rootCA []byte

	if s.RootCAFile != "" {
		rootCA, err = ioutil.ReadFile(s.RootCAFile)
		if err != nil {
			return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
		}
		if _, err := crypto.CertsFromPEM(rootCA); err != nil {
			return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
		}
	} else {
		rootCA = kubeconfig.CAData
	}

	if len(s.ServiceAccountKeyFile) > 0 {
		privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
		if err != nil {
			glog.Errorf("Error reading key for service account token controller: %v", err)
		} else {
			serviceaccountcontroller.NewTokensController(
				clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "tokens-controller")),
				serviceaccountcontroller.TokensControllerOptions{
					TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
					RootCA:         rootCA,
				},
			).Run()
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	}

	serviceaccountcontroller.NewServiceAccountsController(
		clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-account-controller")),
		serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
	).Run()
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	if s.EnableGarbageCollector {
		gcClientset := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "generic-garbage-collector"))
		groupVersionResources, err := gcClientset.Discovery().ServerPreferredResources()
		if err != nil {
			glog.Fatalf("Failed to get supported resources from server: %v", err)
		}
		clientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "generic-garbage-collector"), dynamic.LegacyAPIPathResolverFunc)
		garbageCollector, err := garbagecollector.NewGarbageCollector(clientPool, groupVersionResources)
		if err != nil {
			glog.Errorf("Failed to start the generic garbage collector")
		} else {
			// TODO: make this a flag of kube-controller-manager
			workers := 5
			go garbageCollector.Run(workers, wait.NeverStop)
		}
	}

	// run the shared informers
	for _, informer := range informers {
		go informer.Run(wait.NeverStop)
	}

	select {}
}
示例#19
0
func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig *restclient.Config, stop <-chan struct{}) error {
	go endpointcontroller.NewEndpointController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller")), ResyncPeriod(s)).
		Run(s.ConcurrentEndpointSyncs, wait.NeverStop)

	go replicationcontroller.NewReplicationManager(
		clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")),
		ResyncPeriod(s),
		replicationcontroller.BurstReplicas,
		s.LookupCacheSizeForRC,
	).Run(s.ConcurrentRCSyncs, wait.NeverStop)

	if s.TerminatedPodGCThreshold > 0 {
		go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), s.TerminatedPodGCThreshold).
			Run(wait.NeverStop)
	}

	cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	if err != nil {
		glog.Fatalf("Cloud provider could not be initialized: %v", err)
	}

	// this cidr has been validated already
	_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
	nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
		s.PodEvictionTimeout.Duration, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
		util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
		s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
	nodeController.Run(s.NodeSyncPeriod.Duration)

	if len(s.NetworkProvider) > 0 {
		if err := ProbeNetworkProviders(s.NetworkProvider); err != nil {
			glog.Fatalf("Network provider could not be initialized: %v", err)
		}
		networkProvider, err := networkprovider.InitNetworkProvider(s.NetworkProvider)
		if err != nil {
			glog.Errorf("Network provider could not be initialized: %v", err)
		}

		if networkProvider != nil {
			networkController := networkcontroller.NewNetworkController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "network-controller")), networkProvider)
			go networkController.Run(wait.NeverStop)
		} else {
			glog.Errorf("NetController should not be run without a networkprovider.")
		}
	}

	serviceController := servicecontroller.New(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
	if err := serviceController.Run(s.ServiceSyncPeriod.Duration, s.NodeSyncPeriod.Duration); err != nil {
		glog.Errorf("Failed to start service controller: %v", err)
	}

	if s.AllocateNodeCIDRs {
		if cloud == nil {
			glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.")
		} else if routes, ok := cloud.Routes(); !ok {
			glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
		} else {
			routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, clusterCIDR)
			routeController.Run(s.NodeSyncPeriod.Duration)
		}
	} else {
		glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.AllocateNodeCIDRs)
	}

	resourceQuotaControllerClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "resourcequota-controller"))
	resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient)
	groupKindsToReplenish := []unversioned.GroupKind{
		api.Kind("Pod"),
		api.Kind("Service"),
		api.Kind("ReplicationController"),
		api.Kind("PersistentVolumeClaim"),
		api.Kind("Secret"),
		api.Kind("ConfigMap"),
	}
	resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
		KubeClient:                resourceQuotaControllerClient,
		ResyncPeriod:              controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration),
		Registry:                  resourceQuotaRegistry,
		ControllerFactory:         resourcequotacontroller.NewReplenishmentControllerFactory(resourceQuotaControllerClient),
		ReplenishmentResyncPeriod: ResyncPeriod(s),
		GroupKindsToReplenish:     groupKindsToReplenish,
	}
	go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)

	// If apiserver is not running we should wait for some time and fail only then. This is particularly
	// important when we start apiserver and controller manager at the same time.
	var versionStrings []string
	err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
		if versionStrings, err = restclient.ServerAPIVersions(kubeconfig); err == nil {
			return true, nil
		}
		glog.Errorf("Failed to get api versions from server: %v", err)
		return false, nil
	})
	if err != nil {
		glog.Fatalf("Failed to get api versions from server: %v", err)
	}
	versions := &unversioned.APIVersions{Versions: versionStrings}

	resourceMap, err := kubeClient.Discovery().ServerResources()
	if err != nil {
		glog.Fatalf("Failed to get supported resources from server: %v", err)
	}

	// Find the list of namespaced resources via discovery that the namespace controller must manage
	namespaceKubeClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "namespace-controller"))
	namespaceClientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "namespace-controller"), dynamic.LegacyAPIPathResolverFunc)
	groupVersionResources, err := namespacecontroller.ServerPreferredNamespacedGroupVersionResources(namespaceKubeClient.Discovery())
	if err != nil {
		glog.Fatalf("Failed to get supported resources from server: %v", err)
	}
	namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes)
	go namespaceController.Run(s.ConcurrentNamespaceSyncs, wait.NeverStop)

	groupVersion := "extensions/v1beta1"
	resources, found := resourceMap[groupVersion]
	// TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
	if containsVersion(versions, groupVersion) && found {
		glog.Infof("Starting %s apis", groupVersion)
		if containsResource(resources, "horizontalpodautoscalers") {
			glog.Infof("Starting horizontal pod controller.")
			hpaClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
			metricsClient := metrics.NewHeapsterMetricsClient(
				hpaClient,
				metrics.DefaultHeapsterNamespace,
				metrics.DefaultHeapsterScheme,
				metrics.DefaultHeapsterService,
				metrics.DefaultHeapsterPort,
			)
			go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
				Run(wait.NeverStop)
		}

		if containsResource(resources, "daemonsets") {
			glog.Infof("Starting daemon set controller")
			go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), s.LookupCacheSizeForDaemonSet).
				Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
		}

		if containsResource(resources, "jobs") {
			glog.Infof("Starting job controller")
			go job.NewJobController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller")), ResyncPeriod(s)).
				Run(s.ConcurrentJobSyncs, wait.NeverStop)
		}

		if containsResource(resources, "deployments") {
			glog.Infof("Starting deployment controller")
			go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)).
				Run(s.ConcurrentDeploymentSyncs, wait.NeverStop)
		}

		if containsResource(resources, "replicasets") {
			glog.Infof("Starting ReplicaSet controller")
			go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, s.LookupCacheSizeForRS).
				Run(s.ConcurrentRSSyncs, wait.NeverStop)
		}
	}

	volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
	provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
	if err != nil {
		glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
	}

	pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration)
	pvclaimBinder.Run()

	pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
		clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
		s.PVClaimBinderSyncPeriod.Duration,
		s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
		ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
		cloud,
	)
	if err != nil {
		glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
	}
	pvRecycler.Run()

	if provisioner != nil {
		pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod.Duration, s.ClusterName, volumePlugins, provisioner, cloud)
		if err != nil {
			glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
		}
		pvController.Run()
	}

	var rootCA []byte

	if s.RootCAFile != "" {
		rootCA, err = ioutil.ReadFile(s.RootCAFile)
		if err != nil {
			return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
		}
		if _, err := util.CertsFromPEM(rootCA); err != nil {
			return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
		}
	} else {
		rootCA = kubeconfig.CAData
	}

	if len(s.ServiceAccountKeyFile) > 0 {
		privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
		if err != nil {
			glog.Errorf("Error reading key for service account token controller: %v", err)
		} else {
			serviceaccountcontroller.NewTokensController(
				clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "tokens-controller")),
				serviceaccountcontroller.TokensControllerOptions{
					TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
					RootCA:         rootCA,
				},
			).Run()
		}
	}

	serviceaccountcontroller.NewServiceAccountsController(
		clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-account-controller")),
		serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
	).Run()

	select {}
}
示例#20
0
func (s *CMServer) Run(_ []string) error {
	if s.Kubeconfig == "" && s.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}

	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
	if err != nil {
		return err
	}

	kubeconfig.QPS = 20.0
	kubeconfig.Burst = 30

	kubeClient, err := client.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	go func() {
		mux := http.NewServeMux()
		healthz.InstallHandler(mux)
		if s.EnableProfiling {
			profile.InstallHandler(mux)
		}
		mux.Handle("/metrics", prometheus.Handler())
		server := &http.Server{
			Addr:    net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
			Handler: mux,
		}
		glog.Fatal(server.ListenAndServe())
	}()

	endpoints := s.createEndpointController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller")))
	go endpoints.Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop)

	go replicationcontroller.NewReplicationManagerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")), s.resyncPeriod, replicationcontroller.BurstReplicas, int(s.LookupCacheSizeForRC)).
		Run(int(s.ConcurrentRCSyncs), wait.NeverStop)

	if s.TerminatedPodGCThreshold > 0 {
		go podgc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "pod-garbage-collector")), s.resyncPeriod, int(s.TerminatedPodGCThreshold)).
			Run(wait.NeverStop)
	}

	//TODO(jdef) should eventually support more cloud providers here
	if s.CloudProvider != mesos.ProviderName {
		glog.Fatalf("Only provider %v is supported, you specified %v", mesos.ProviderName, s.CloudProvider)
	}
	cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	if err != nil {
		glog.Fatalf("Cloud provider could not be initialized: %v", err)
	}
	_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
	_, serviceCIDR, _ := net.ParseCIDR(s.ServiceCIDR)
	nodeController, err := nodecontroller.NewNodeControllerFromClient(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
		s.PodEvictionTimeout.Duration, s.NodeEvictionRate, s.SecondaryNodeEvictionRate, s.LargeClusterSizeThreshold, s.UnhealthyZoneThreshold,
		s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
	if err != nil {
		glog.Fatalf("Failed to initialize nodecontroller: %v", err)
	}
	nodeController.Run()

	nodeStatusUpdaterController := node.NewStatusUpdater(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-status-controller")), s.NodeMonitorPeriod.Duration, time.Now)
	if err := nodeStatusUpdaterController.Run(wait.NeverStop); err != nil {
		glog.Fatalf("Failed to start node status update controller: %v", err)
	}

	serviceController, err := servicecontroller.New(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-controller")), s.ClusterName)
	if err != nil {
		glog.Errorf("Failed to start service controller: %v", err)
	} else {
		serviceController.Run(int(s.ConcurrentServiceSyncs))
	}

	if s.AllocateNodeCIDRs && s.ConfigureCloudRoutes {
		if cloud == nil {
			glog.Warning("configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes.")
		} else if routes, ok := cloud.Routes(); !ok {
			glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
		} else {
			routeController := routecontroller.New(routes, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "route-controller")), s.ClusterName, clusterCIDR)
			routeController.Run(s.NodeSyncPeriod.Duration)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	} else {
		glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", s.AllocateNodeCIDRs, s.ConfigureCloudRoutes)
	}

	resourceQuotaControllerClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "resource-quota-controller"))
	resourceQuotaRegistry := quotainstall.NewRegistry(resourceQuotaControllerClient)
	groupKindsToReplenish := []unversioned.GroupKind{
		api.Kind("Pod"),
		api.Kind("Service"),
		api.Kind("ReplicationController"),
		api.Kind("PersistentVolumeClaim"),
		api.Kind("Secret"),
	}
	resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
		KubeClient:                resourceQuotaControllerClient,
		ResyncPeriod:              controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod.Duration),
		Registry:                  resourceQuotaRegistry,
		GroupKindsToReplenish:     groupKindsToReplenish,
		ReplenishmentResyncPeriod: s.resyncPeriod,
		ControllerFactory:         resourcequotacontroller.NewReplenishmentControllerFactoryFromClient(resourceQuotaControllerClient),
	}
	go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop)

	// If apiserver is not running we should wait for some time and fail only then. This is particularly
	// important when we start apiserver and controller manager at the same time.
	var versionStrings []string
	err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
		if versionStrings, err = restclient.ServerAPIVersions(kubeconfig); err == nil {
			return true, nil
		}
		glog.Errorf("Failed to get api versions from server: %v", err)
		return false, nil
	})
	if err != nil {
		glog.Fatalf("Failed to get api versions from server: %v", err)
	}
	versions := &unversioned.APIVersions{Versions: versionStrings}

	resourceMap, err := kubeClient.Discovery().ServerResources()
	if err != nil {
		glog.Fatalf("Failed to get supported resources from server: %v", err)
	}

	// Find the list of namespaced resources via discovery that the namespace controller must manage
	namespaceKubeClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "namespace-controller"))
	namespaceClientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "namespace-controller"), dynamic.LegacyAPIPathResolverFunc)
	groupVersionResources, err := namespaceKubeClient.Discovery().ServerPreferredNamespacedResources()
	if err != nil {
		glog.Fatalf("Failed to get supported resources from server: %v", err)
	}
	namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes)
	go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop)

	groupVersion := "extensions/v1beta1"
	resources, found := resourceMap[groupVersion]
	// TODO(k8s): this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
	if containsVersion(versions, groupVersion) && found {
		glog.Infof("Starting %s apis", groupVersion)
		if containsResource(resources, "horizontalpodautoscalers") {
			glog.Infof("Starting horizontal pod controller.")
			hpaClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
			metricsClient := metrics.NewHeapsterMetricsClient(
				hpaClient,
				metrics.DefaultHeapsterNamespace,
				metrics.DefaultHeapsterScheme,
				metrics.DefaultHeapsterService,
				metrics.DefaultHeapsterPort,
			)
			go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
				Run(wait.NeverStop)
		}

		if containsResource(resources, "daemonsets") {
			glog.Infof("Starting daemon set controller")
			go daemon.NewDaemonSetsControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod, int(s.LookupCacheSizeForDaemonSet)).
				Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
		}

		if containsResource(resources, "jobs") {
			glog.Infof("Starting job controller")
			go job.NewJobControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller")), s.resyncPeriod).
				Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
		}

		if containsResource(resources, "deployments") {
			glog.Infof("Starting deployment controller")
			go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), s.resyncPeriod).
				Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
		}

		if containsResource(resources, "replicasets") {
			glog.Infof("Starting ReplicaSet controller")
			go replicaset.NewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), s.resyncPeriod, replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)).
				Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
		}
	}

	alphaProvisioner, err := kubecontrollermanager.NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration)
	if err != nil {
		glog.Fatalf("An backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
	}
	volumeController := persistentvolumecontroller.NewPersistentVolumeController(
		clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
		s.PVClaimBinderSyncPeriod.Duration,
		alphaProvisioner,
		kubecontrollermanager.ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
		cloud,
		s.ClusterName,
		nil, // volumeSource
		nil, // claimSource
		nil, // classSource
		nil, // eventRecorder
		s.VolumeConfiguration.EnableDynamicProvisioning,
	)
	volumeController.Run(wait.NeverStop)

	var rootCA []byte

	if s.RootCAFile != "" {
		rootCA, err = ioutil.ReadFile(s.RootCAFile)
		if err != nil {
			return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
		}
		if _, err := crypto.CertsFromPEM(rootCA); err != nil {
			return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
		}
	} else {
		rootCA = kubeconfig.CAData
	}

	if len(s.ServiceAccountKeyFile) > 0 {
		privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
		if err != nil {
			glog.Errorf("Error reading key for service account token controller: %v", err)
		} else {
			go serviceaccountcontroller.NewTokensController(
				clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "tokens-controller")),
				serviceaccountcontroller.TokensControllerOptions{
					TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
					RootCA:         rootCA,
				},
			).Run(int(s.ConcurrentSATokenSyncs), wait.NeverStop)
		}
	}

	serviceaccountcontroller.NewServiceAccountsController(
		clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-account-controller")),
		serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
	).Run()

	select {}
}
示例#21
0
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
	// The fact that we need this feels like a bug in ginkgo.
	// https://github.com/onsi/ginkgo/issues/222
	f.cleanupHandle = AddCleanupAction(f.AfterEach)
	if f.ClientSet == nil {
		By("Creating a kubernetes client")
		config, err := LoadConfig()
		Expect(err).NotTo(HaveOccurred())
		config.QPS = f.options.ClientQPS
		config.Burst = f.options.ClientBurst
		if f.options.GroupVersion != nil {
			config.GroupVersion = f.options.GroupVersion
		}
		if TestContext.KubeAPIContentType != "" {
			config.ContentType = TestContext.KubeAPIContentType
		}
		f.ClientSet, err = internalclientset.NewForConfig(config)
		Expect(err).NotTo(HaveOccurred())
		f.ClientSet_1_5, err = release_1_5.NewForConfig(config)
		Expect(err).NotTo(HaveOccurred())
		clientRepoConfig := getClientRepoConfig(config)
		f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
		Expect(err).NotTo(HaveOccurred())
		f.ClientPool = dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	}

	if f.federated {
		if f.FederationClientset_1_5 == nil {
			By("Creating a release 1.4 federation Clientset")
			var err error
			f.FederationClientset_1_5, err = LoadFederationClientset_1_5()
			Expect(err).NotTo(HaveOccurred())
		}
		By("Waiting for federation-apiserver to be ready")
		err := WaitForFederationApiserverReady(f.FederationClientset_1_5)
		Expect(err).NotTo(HaveOccurred())
		By("federation-apiserver is ready")

		By("Creating a federation namespace")
		ns, err := f.createFederationNamespace(f.BaseName)
		Expect(err).NotTo(HaveOccurred())
		f.FederationNamespace = ns
		By(fmt.Sprintf("Created federation namespace %s", ns.Name))
	}

	By("Building a namespace api object")
	namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
		"e2e-framework": f.BaseName,
	})
	Expect(err).NotTo(HaveOccurred())

	f.Namespace = namespace

	if TestContext.VerifyServiceAccount {
		By("Waiting for a default service account to be provisioned in namespace")
		err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
		Expect(err).NotTo(HaveOccurred())
	} else {
		Logf("Skipping waiting for service account")
	}

	if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
		f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
			inKubemark: ProviderIs("kubemark"),
			masterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
		})
		if err != nil {
			Logf("Error while creating NewResourceUsageGatherer: %v", err)
		} else {
			go f.gatherer.startGatheringData()
		}
	}

	if TestContext.GatherLogsSizes {
		f.logsSizeWaitGroup = sync.WaitGroup{}
		f.logsSizeWaitGroup.Add(1)
		f.logsSizeCloseChannel = make(chan bool)
		f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
		go func() {
			f.logsSizeVerifier.Run()
			f.logsSizeWaitGroup.Done()
		}()
	}
}
示例#22
0
// startControllers launches the controllers
func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) error {
	if oc.Options.Controllers == configapi.ControllersDisabled {
		return nil
	}

	go func() {
		oc.ControllerPlugStart()
		// when a manual shutdown (DELETE /controllers) or lease lost occurs, the process should exit
		// this ensures no code is still running as a controller, and allows a process manager to reset
		// the controller to come back into a candidate state and compete for the lease
		if err := oc.ControllerPlug.WaitForStop(); err != nil {
			glog.Fatalf("Controller shutdown due to lease being lost: %v", err)
		}
		glog.Fatalf("Controller graceful shutdown requested")
	}()

	oc.ControllerPlug.WaitForStart()
	glog.Infof("Controllers starting (%s)", oc.Options.Controllers)

	// Get configured options (or defaults) for k8s controllers
	controllerManagerOptions := cmapp.NewCMServer()
	if kc != nil && kc.ControllerManager != nil {
		controllerManagerOptions = kc.ControllerManager
	}

	// Start these first, because they provide credentials for other controllers' clients
	oc.RunServiceAccountsController()
	oc.RunServiceAccountTokensController(controllerManagerOptions)
	// used by admission controllers
	oc.RunServiceAccountPullSecretsControllers()
	oc.RunSecurityAllocationController()

	if kc != nil {
		_, _, rcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraReplicationControllerServiceAccountName)
		if err != nil {
			glog.Fatalf("Could not get client for replication controller: %v", err)
		}
		_, _, jobClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraJobControllerServiceAccountName)
		if err != nil {
			glog.Fatalf("Could not get client for job controller: %v", err)
		}
		_, hpaOClient, hpaKClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraHPAControllerServiceAccountName)
		if err != nil {
			glog.Fatalf("Could not get client for HPA controller: %v", err)
		}

		_, _, recyclerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeRecyclerControllerServiceAccountName)
		if err != nil {
			glog.Fatalf("Could not get client for persistent volume recycler controller: %v", err)
		}

		_, _, binderClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeBinderControllerServiceAccountName)
		if err != nil {
			glog.Fatalf("Could not get client for persistent volume binder controller: %v", err)
		}

		_, _, provisionerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeProvisionerControllerServiceAccountName)
		if err != nil {
			glog.Fatalf("Could not get client for persistent volume provisioner controller: %v", err)
		}

		_, _, daemonSetClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraDaemonSetControllerServiceAccountName)
		if err != nil {
			glog.Fatalf("Could not get client for daemonset controller: %v", err)
		}

		_, _, gcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraGCControllerServiceAccountName)
		if err != nil {
			glog.Fatalf("Could not get client for pod gc controller: %v", err)
		}

		_, _, serviceLoadBalancerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraServiceLoadBalancerControllerServiceAccountName)
		if err != nil {
			glog.Fatalf("Could not get client for pod gc controller: %v", err)
		}

		namespaceControllerClientConfig, _, namespaceControllerKubeClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraNamespaceControllerServiceAccountName)
		if err != nil {
			glog.Fatalf("Could not get client for namespace controller: %v", err)
		}
		namespaceControllerClientSet := clientadapter.FromUnversionedClient(namespaceControllerKubeClient)
		namespaceControllerClientPool := dynamic.NewClientPool(namespaceControllerClientConfig, dynamic.LegacyAPIPathResolverFunc)

		// called by admission control
		kc.RunResourceQuotaManager()
		oc.RunResourceQuotaManager(controllerManagerOptions)

		// no special order
		kc.RunNodeController()
		kc.RunScheduler()
		kc.RunReplicationController(rcClient)

		extensionsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, extensions.GroupName)) > 0

		// TODO: enable this check once the job controller can use the batch API if the extensions API is disabled
		// batchEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, batch.GroupName)) > 0
		if extensionsEnabled /*|| batchEnabled*/ {
			kc.RunJobController(jobClient)
		}
		// TODO: enable this check once the HPA controller can use the autoscaling API if the extensions API is disabled
		// autoscalingEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, autoscaling.GroupName)) > 0
		if extensionsEnabled /*|| autoscalingEnabled*/ {
			kc.RunHPAController(hpaOClient, hpaKClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace)
		}
		if extensionsEnabled {
			kc.RunDaemonSetsController(daemonSetClient)
		}

		kc.RunEndpointController()
		kc.RunNamespaceController(namespaceControllerClientSet, namespaceControllerClientPool)
		kc.RunPersistentVolumeClaimBinder(binderClient)
		if oc.Options.VolumeConfig.DynamicProvisioningEnabled {
			kc.RunPersistentVolumeProvisioner(provisionerClient)
		}
		kc.RunPersistentVolumeClaimRecycler(oc.ImageFor("recycler"), recyclerClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace)
		kc.RunGCController(gcClient)

		kc.RunServiceLoadBalancerController(serviceLoadBalancerClient)

		glog.Infof("Started Kubernetes Controllers")
	} else {
		oc.RunResourceQuotaManager(nil)
	}

	// no special order
	if configapi.IsBuildEnabled(&oc.Options) {
		oc.RunBuildController()
		oc.RunBuildPodController()
		oc.RunBuildConfigChangeController()
		oc.RunBuildImageChangeTriggerController()
	}
	oc.RunDeploymentController()
	oc.RunDeployerPodController()
	oc.RunDeploymentConfigController()
	oc.RunDeploymentTriggerController()
	oc.RunDeploymentImageChangeTriggerController()
	oc.RunImageImportController()
	oc.RunOriginNamespaceController()
	oc.RunSDNController()

	_, _, serviceServingCertClient, err := oc.GetServiceAccountClients(bootstrappolicy.ServiceServingCertServiceAccountName)
	if err != nil {
		glog.Fatalf("Could not get client: %v", err)
	}
	oc.RunServiceServingCertController(serviceServingCertClient)

	glog.Infof("Started Origin Controllers")

	return nil
}
示例#23
0
func StartControllers(controllers map[string]InitFunc, s *options.CMServer, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) error {
	sharedInformers := informers.NewSharedInformerFactory(rootClientBuilder.ClientOrDie("shared-informers"), nil, ResyncPeriod(s)())

	// always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest
	if len(s.ServiceAccountKeyFile) > 0 {
		privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
		if err != nil {
			return fmt.Errorf("error reading key for service account token controller: %v", err)
		} else {
			var rootCA []byte
			if s.RootCAFile != "" {
				rootCA, err = ioutil.ReadFile(s.RootCAFile)
				if err != nil {
					return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
				}
				if _, err := certutil.ParseCertsPEM(rootCA); err != nil {
					return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
				}
			} else {
				rootCA = rootClientBuilder.ConfigOrDie("tokens-controller").CAData
			}

			go serviceaccountcontroller.NewTokensController(
				rootClientBuilder.ClientOrDie("tokens-controller"),
				serviceaccountcontroller.TokensControllerOptions{
					TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
					RootCA:         rootCA,
				},
			).Run(int(s.ConcurrentSATokenSyncs), stop)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	}

	availableResources, err := getAvailableResources(clientBuilder)
	if err != nil {
		return err
	}

	ctx := ControllerContext{
		ClientBuilder:      clientBuilder,
		InformerFactory:    sharedInformers,
		Options:            *s,
		AvailableResources: availableResources,
		Stop:               stop,
	}

	for controllerName, initFn := range controllers {
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

		glog.V(1).Infof("Starting %q", controllerName)
		started, err := initFn(ctx)
		if err != nil {
			glog.Errorf("Error starting %q", controllerName)
			return err
		}
		if !started {
			glog.Warningf("Skipping %q", controllerName)
		}
		glog.Infof("Started %q", controllerName)
	}

	cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	if err != nil {
		return fmt.Errorf("cloud provider could not be initialized: %v", err)
	}

	_, clusterCIDR, err := net.ParseCIDR(s.ClusterCIDR)
	if err != nil {
		glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", s.ClusterCIDR, err)
	}
	_, serviceCIDR, err := net.ParseCIDR(s.ServiceCIDR)
	if err != nil {
		glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", s.ServiceCIDR, err)
	}
	nodeController, err := nodecontroller.NewNodeController(
		sharedInformers.Pods(), sharedInformers.Nodes(), sharedInformers.DaemonSets(),
		cloud, clientBuilder.ClientOrDie("node-controller"),
		s.PodEvictionTimeout.Duration, s.NodeEvictionRate, s.SecondaryNodeEvictionRate, s.LargeClusterSizeThreshold, s.UnhealthyZoneThreshold, s.NodeMonitorGracePeriod.Duration,
		s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR,
		int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
	if err != nil {
		return fmt.Errorf("failed to initialize nodecontroller: %v", err)
	}
	nodeController.Run()
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	serviceController, err := servicecontroller.New(cloud, clientBuilder.ClientOrDie("service-controller"), s.ClusterName)
	if err != nil {
		glog.Errorf("Failed to start service controller: %v", err)
	} else {
		serviceController.Run(int(s.ConcurrentServiceSyncs))
	}
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	if s.AllocateNodeCIDRs && s.ConfigureCloudRoutes {
		if cloud == nil {
			glog.Warning("configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes.")
		} else if routes, ok := cloud.Routes(); !ok {
			glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
		} else {
			routeController := routecontroller.New(routes, clientBuilder.ClientOrDie("route-controller"), s.ClusterName, clusterCIDR)
			routeController.Run(s.RouteReconciliationPeriod.Duration)
			time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
		}
	} else {
		glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", s.AllocateNodeCIDRs, s.ConfigureCloudRoutes)
	}

	if availableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"}] {
		go daemon.NewDaemonSetsController(sharedInformers.DaemonSets(), sharedInformers.Pods(), sharedInformers.Nodes(), clientBuilder.ClientOrDie("daemon-set-controller"), int(s.LookupCacheSizeForDaemonSet)).
			Run(int(s.ConcurrentDaemonSetSyncs), stop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	if availableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "jobs"}] {
		glog.Infof("Starting job controller")
		go job.NewJobController(sharedInformers.Pods().Informer(), sharedInformers.Jobs(), clientBuilder.ClientOrDie("job-controller")).
			Run(int(s.ConcurrentJobSyncs), stop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	if availableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] {
		glog.Infof("Starting deployment controller")
		go deployment.NewDeploymentController(sharedInformers.Deployments(), sharedInformers.ReplicaSets(), sharedInformers.Pods(), clientBuilder.ClientOrDie("deployment-controller")).
			Run(int(s.ConcurrentDeploymentSyncs), stop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	if availableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicasets"}] {
		glog.Infof("Starting ReplicaSet controller")
		go replicaset.NewReplicaSetController(sharedInformers.ReplicaSets(), sharedInformers.Pods(), clientBuilder.ClientOrDie("replicaset-controller"), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS), s.EnableGarbageCollector).
			Run(int(s.ConcurrentRSSyncs), stop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	if availableResources[schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"}] {
		glog.Infof("Starting horizontal pod autoscaler controller.")
		hpaClient := clientBuilder.ClientOrDie("horizontal-pod-autoscaler")
		metricsClient := metrics.NewHeapsterMetricsClient(
			hpaClient,
			metrics.DefaultHeapsterNamespace,
			metrics.DefaultHeapsterScheme,
			metrics.DefaultHeapsterService,
			metrics.DefaultHeapsterPort,
		)
		replicaCalc := podautoscaler.NewReplicaCalculator(metricsClient, hpaClient.Core())
		go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient.Autoscaling(), replicaCalc, s.HorizontalPodAutoscalerSyncPeriod.Duration).
			Run(stop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	if availableResources[schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"}] {
		glog.Infof("Starting disruption controller")
		go disruption.NewDisruptionController(sharedInformers.Pods().Informer(), clientBuilder.ClientOrDie("disruption-controller")).Run(stop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	if availableResources[schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "statefulsets"}] {
		glog.Infof("Starting StatefulSet controller")
		resyncPeriod := ResyncPeriod(s)()
		go petset.NewStatefulSetController(
			sharedInformers.Pods().Informer(),
			clientBuilder.ClientOrDie("statefulset-controller"),
			resyncPeriod,
		).Run(1, stop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	if availableResources[schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}] {
		glog.Infof("Starting cronjob controller")
		// TODO: this is a temp fix for allowing kubeClient list v2alpha1 sj, should switch to using clientset
		cronjobConfig := rootClientBuilder.ConfigOrDie("cronjob-controller")
		cronjobConfig.ContentConfig.GroupVersion = &schema.GroupVersion{Group: batch.GroupName, Version: "v2alpha1"}
		go cronjob.NewCronJobController(clientset.NewForConfigOrDie(cronjobConfig)).Run(stop)
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	alphaProvisioner, err := NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration)
	if err != nil {
		return fmt.Errorf("an backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
	}
	params := persistentvolumecontroller.ControllerParameters{
		KubeClient:                clientBuilder.ClientOrDie("persistent-volume-binder"),
		SyncPeriod:                s.PVClaimBinderSyncPeriod.Duration,
		AlphaProvisioner:          alphaProvisioner,
		VolumePlugins:             ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
		Cloud:                     cloud,
		ClusterName:               s.ClusterName,
		EnableDynamicProvisioning: s.VolumeConfiguration.EnableDynamicProvisioning,
	}
	volumeController := persistentvolumecontroller.NewController(params)
	volumeController.Run(stop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	attachDetachController, attachDetachControllerErr :=
		attachdetach.NewAttachDetachController(
			clientBuilder.ClientOrDie("attachdetach-controller"),
			sharedInformers.Pods().Informer(),
			sharedInformers.Nodes().Informer(),
			sharedInformers.PersistentVolumeClaims().Informer(),
			sharedInformers.PersistentVolumes().Informer(),
			cloud,
			ProbeAttachableVolumePlugins(s.VolumeConfiguration))
	if attachDetachControllerErr != nil {
		return fmt.Errorf("failed to start attach/detach controller: %v", attachDetachControllerErr)
	}
	go attachDetachController.Run(stop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	if availableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1alpha1", Resource: "certificatesigningrequests"}] {
		glog.Infof("Starting certificate request controller")
		resyncPeriod := ResyncPeriod(s)()
		c := clientBuilder.ClientOrDie("certificate-controller")
		certController, err := certcontroller.NewCertificateController(
			c,
			resyncPeriod,
			s.ClusterSigningCertFile,
			s.ClusterSigningKeyFile,
			certcontroller.NewGroupApprover(c.Certificates().CertificateSigningRequests(), s.ApproveAllKubeletCSRsForGroup),
		)
		if err != nil {
			glog.Errorf("Failed to start certificate controller: %v", err)
		} else {
			go certController.Run(1, stop)
		}
		time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
	}

	go serviceaccountcontroller.NewServiceAccountsController(
		sharedInformers.ServiceAccounts(), sharedInformers.Namespaces(),
		clientBuilder.ClientOrDie("service-account-controller"),
		serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
	).Run(1, stop)
	time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

	if s.EnableGarbageCollector {
		// TODO: should use a dynamic RESTMapper built from the discovery results.
		restMapper := registered.RESTMapper()

		gcClientset := clientBuilder.ClientOrDie("generic-garbage-collector")
		preferredResources, err := gcClientset.Discovery().ServerPreferredResources()
		if err != nil {
			return fmt.Errorf("failed to get supported resources from server: %v", err)
		}
		deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete"}}, preferredResources)
		deletableGroupVersionResources, err := discovery.GroupVersionResources(deletableResources)
		if err != nil {
			glog.Errorf("Failed to parse resources from server: %v", err)
		}

		config := rootClientBuilder.ConfigOrDie("generic-garbage-collector")
		config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
		metaOnlyClientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
		config.ContentConfig = dynamic.ContentConfig()
		clientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
		garbageCollector, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, restMapper, deletableGroupVersionResources)
		if err != nil {
			glog.Errorf("Failed to start the generic garbage collector: %v", err)
		} else {
			workers := int(s.ConcurrentGCSyncs)
			go garbageCollector.Run(workers, stop)
		}
	}

	sharedInformers.Start(stop)

	select {}
}