func TestUpdateClusterStatusOK(t *testing.T) { clusterName := "foobarCluster" // create dummy httpserver testClusterServer := httptest.NewServer(createHttptestFakeHandlerForCluster(true)) defer testClusterServer.Close() federationCluster := newCluster(clusterName, testClusterServer.URL) federationClusterList := newClusterList(federationCluster) testFederationServer := httptest.NewServer(createHttptestFakeHandlerForFederation(federationClusterList, true)) defer testFederationServer.Close() restClientCfg, err := clientcmd.BuildConfigFromFlags(testFederationServer.URL, "") if err != nil { t.Errorf("Failed to build client config") } federationClientSet := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, "cluster-controller")) manager := NewclusterController(federationClientSet, 5) err = manager.UpdateClusterStatus() if err != nil { t.Errorf("Failed to Update Cluster Status: %v", err) } clusterStatus, found := manager.clusterClusterStatusMap[clusterName] if !found { t.Errorf("Failed to Update Cluster Status") } else { if (clusterStatus.Conditions[1].Status != v1.ConditionFalse) || (clusterStatus.Conditions[1].Type != federation_v1alpha1.ClusterOffline) { t.Errorf("Failed to Update Cluster Status") } } }
func BuildClusterConfig(c *federation_v1beta1.Cluster) (*restclient.Config, error) { var serverAddress string var clusterConfig *restclient.Config hostIP, err := utilnet.ChooseHostInterface() if err != nil { return nil, err } for _, item := range c.Spec.ServerAddressByClientCIDRs { _, cidrnet, err := net.ParseCIDR(item.ClientCIDR) if err != nil { return nil, err } myaddr := net.ParseIP(hostIP.String()) if cidrnet.Contains(myaddr) == true { serverAddress = item.ServerAddress break } } if serverAddress != "" { if c.Spec.SecretRef == nil { glog.Infof("didn't find secretRef for cluster %s. Trying insecure access", c.Name) clusterConfig, err = clientcmd.BuildConfigFromFlags(serverAddress, "") } else { kubeconfigGetter := KubeconfigGetterForCluster(c) clusterConfig, err = clientcmd.BuildConfigFromKubeconfigGetter(serverAddress, kubeconfigGetter) } if err != nil { return nil, err } clusterConfig.QPS = KubeAPIQPS clusterConfig.Burst = KubeAPIBurst } return clusterConfig, nil }
func NewClusterClientSet(c *federation_v1alpha1.Cluster) (*ClusterClient, error) { var serverAddress string hostIP, err := utilnet.ChooseHostInterface() if err != nil { return nil, err } for _, item := range c.Spec.ServerAddressByClientCIDRs { _, cidrnet, err := net.ParseCIDR(item.ClientCIDR) if err != nil { return nil, err } myaddr := net.ParseIP(hostIP.String()) if cidrnet.Contains(myaddr) == true { serverAddress = item.ServerAddress break } } var clusterClientSet = ClusterClient{} if serverAddress != "" { clusterConfig, err := clientcmd.BuildConfigFromFlags(serverAddress, "") if err != nil { return nil, err } clusterConfig.QPS = KubeAPIQPS clusterConfig.Burst = KubeAPIBurst clusterClientSet.discoveryClient = discovery.NewDiscoveryClientForConfigOrDie((restclient.AddUserAgent(clusterConfig, UserAgentName))) if clusterClientSet.discoveryClient == nil { return nil, nil } } return &clusterClientSet, err }
func newClusterClientset(c *federation.Cluster) (*clientset.Clientset, error) { clusterConfig, err := clientcmd.BuildConfigFromFlags(c.Spec.ServerAddressByClientCIDRs[0].ServerAddress, "") if err != nil { return nil, err } clusterConfig.QPS = KubeAPIQPS clusterConfig.Burst = KubeAPIBurst clientset := clientset.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, UserAgentName)) return clientset, nil }
// TODO(jdef): hacked from plugin/cmd/kube-scheduler/app/server.go func (s *SchedulerServer) createAPIServerClientConfig() (*restclient.Config, error) { kubeconfig, err := clientcmd.BuildConfigFromFlags(s.apiServerList[0], s.kubeconfig) if err != nil { return nil, err } // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.kubeAPIQPS kubeconfig.Burst = s.kubeAPIBurst return kubeconfig, nil }
// Run runs the specified SchedulerServer. This should never exit. func (s *SchedulerServer) Run(_ []string) error { kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) if err != nil { return err } // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.KubeAPIQPS kubeconfig.Burst = s.KubeAPIBurst kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() configFactory := factory.NewConfigFactory(kubeClient, util.NewTokenBucketRateLimiter(s.BindPodsQPS, s.BindPodsBurst)) config, err := s.createConfig(configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) sched := scheduler.New(config) sched.Run() select {} }
func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) { kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) if err != nil { return nil, fmt.Errorf("unable to build config from flags: %v", err) } kubeconfig.ContentType = s.ContentType // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.KubeAPIQPS kubeconfig.Burst = int(s.KubeAPIBurst) cli, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election")) if err != nil { return nil, fmt.Errorf("invalid API configuration: %v", err) } return cli, nil }
func TestUpdateClusterStatusOK(t *testing.T) { clusterName := "foobarCluster" // create dummy httpserver testClusterServer := httptest.NewServer(createHttptestFakeHandlerForCluster(true)) defer testClusterServer.Close() federationCluster := newCluster(clusterName, testClusterServer.URL) federationClusterList := newClusterList(federationCluster) testFederationServer := httptest.NewServer(createHttptestFakeHandlerForFederation(federationClusterList, true)) defer testFederationServer.Close() restClientCfg, err := clientcmd.BuildConfigFromFlags(testFederationServer.URL, "") if err != nil { t.Errorf("Failed to build client config") } federationClientSet := federationclientset.NewForConfigOrDie(restclient.AddUserAgent(restClientCfg, "cluster-controller")) // Override KubeconfigGetterForCluster to avoid having to setup service accounts and mount files with secret tokens. originalGetter := controllerutil.KubeconfigGetterForCluster controllerutil.KubeconfigGetterForCluster = func(c *federationv1beta1.Cluster) clientcmd.KubeconfigGetter { return func() (*clientcmdapi.Config, error) { return &clientcmdapi.Config{}, nil } } manager := NewclusterController(federationClientSet, 5) err = manager.UpdateClusterStatus() if err != nil { t.Errorf("Failed to Update Cluster Status: %v", err) } clusterStatus, found := manager.clusterClusterStatusMap[clusterName] if !found { t.Errorf("Failed to Update Cluster Status") } else { if (clusterStatus.Conditions[1].Status != v1.ConditionFalse) || (clusterStatus.Conditions[1].Type != federationv1beta1.ClusterOffline) { t.Errorf("Failed to Update Cluster Status") } } // Reset KubeconfigGetterForCluster controllerutil.KubeconfigGetterForCluster = originalGetter }
// Run runs the CMServer. This should never exit. func Run(s *options.CMServer) error { if c, err := configz.New("componentconfig"); err == nil { c.Set(s.ControllerManagerConfiguration) } else { glog.Errorf("unable to register configz: %s", err) } restClientCfg, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) if err != nil { return err } // Override restClientCfg qps/burst settings from flags restClientCfg.QPS = s.APIServerQPS restClientCfg.Burst = s.APIServerBurst go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() run := func() { err := StartControllers(s, restClientCfg) glog.Fatalf("error running controllers: %v", err) panic("unreachable") } run() panic("unreachable") }
// Run runs the CMServer. This should never exit. func Run(s *options.CMServer) error { if c, err := configz.New("componentconfig"); err == nil { c.Set(s.KubeControllerManagerConfiguration) } else { glog.Errorf("unable to register configz: %s", err) } kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) if err != nil { return err } kubeconfig.ContentConfig.ContentType = s.ContentType // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.KubeAPIQPS kubeconfig.Burst = int(s.KubeAPIBurst) kubeClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "controller-manager")) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election")) go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } configz.InstallHandler(mux) mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controller-manager"}) run := func(stop <-chan struct{}) { rootClientBuilder := controller.SimpleControllerClientBuilder{ ClientConfig: kubeconfig, } var clientBuilder controller.ControllerClientBuilder if len(s.ServiceAccountKeyFile) > 0 { clientBuilder = controller.SAControllerClientBuilder{ ClientConfig: restclient.AnonymousClientConfig(kubeconfig), CoreClient: kubeClient.Core(), Namespace: "kube-system", } } else { clientBuilder = rootClientBuilder } err := StartControllers(s, kubeconfig, rootClientBuilder, clientBuilder, stop, recorder) glog.Fatalf("error running controllers: %v", err) panic("unreachable") } if !s.LeaderElection.LeaderElect { run(nil) panic("unreachable") } id, err := os.Hostname() if err != nil { return err } // TODO: enable other lock types rl := resourcelock.EndpointsLock{ EndpointsMeta: api.ObjectMeta{ Namespace: "kube-system", Name: "kube-controller-manager", }, Client: leaderElectionClient, LockConfig: resourcelock.ResourceLockConfig{ Identity: id, EventRecorder: recorder, }, } leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ Lock: &rl, LeaseDuration: s.LeaderElection.LeaseDuration.Duration, RenewDeadline: s.LeaderElection.RenewDeadline.Duration, RetryPeriod: s.LeaderElection.RetryPeriod.Duration, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { glog.Fatalf("leaderelection lost") }, }, }) panic("unreachable") }
// Run runs the specified SchedulerServer. This should never exit. func Run(s *options.SchedulerServer) error { if c, err := configz.New("componentconfig"); err == nil { c.Set(s.KubeSchedulerConfiguration) } else { glog.Errorf("unable to register configz: %s", err) } kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) if err != nil { return err } kubeconfig.ContentType = s.ContentType // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.KubeAPIQPS kubeconfig.Burst = int(s.KubeAPIBurst) kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } configz.InstallHandler(mux) mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() configFactory := factory.NewConfigFactory(kubeClient, s.SchedulerName) config, err := createConfig(s, configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: s.SchedulerName}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) sched := scheduler.New(config) run := func(_ <-chan struct{}) { sched.Run() select {} } if !s.LeaderElection.LeaderElect { run(nil) glog.Fatal("this statement is unreachable") panic("unreachable") } id, err := os.Hostname() if err != nil { return err } leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ EndpointsMeta: api.ObjectMeta{ Namespace: "kube-system", Name: "kube-scheduler", }, Client: kubeClient, Identity: id, EventRecorder: config.Recorder, LeaseDuration: s.LeaderElection.LeaseDuration.Duration, RenewDeadline: s.LeaderElection.RenewDeadline.Duration, RetryPeriod: s.LeaderElection.RetryPeriod.Duration, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { glog.Fatalf("lost master") }, }, }) glog.Fatal("this statement is unreachable") panic("unreachable") }
// Run runs the CMServer. This should never exit. func Run(s *options.CMServer) error { if c, err := configz.New("componentconfig"); err == nil { c.Set(s.KubeControllerManagerConfiguration) } else { glog.Errorf("unable to register configz: %s", err) } kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) if err != nil { return err } // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.KubeAPIQPS kubeconfig.Burst = s.KubeAPIBurst kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() run := func(stop <-chan struct{}) { err := StartControllers(s, kubeClient, kubeconfig, stop) glog.Fatalf("error running controllers: %v", err) panic("unreachable") } if !s.LeaderElection.LeaderElect { run(nil) panic("unreachable") } eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controller-manager"}) id, err := os.Hostname() if err != nil { return err } leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ EndpointsMeta: api.ObjectMeta{ Namespace: "kube-system", Name: "kube-controller-manager", }, Client: kubeClient, Identity: id, EventRecorder: recorder, LeaseDuration: s.LeaderElection.LeaseDuration.Duration, RenewDeadline: s.LeaderElection.RenewDeadline.Duration, RetryPeriod: s.LeaderElection.RetryPeriod.Duration, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { glog.Fatalf("leaderelection lost") }, }, }) panic("unreachable") }
// Run runs the CMServer. This should never exit. func Run(s *options.CMServer) error { glog.Infof("%+v", version.Get()) if c, err := configz.New("componentconfig"); err == nil { c.Set(s.ControllerManagerConfiguration) } else { glog.Errorf("unable to register configz: %s", err) } // If s.Kubeconfig flag is empty, try with the deprecated name in 1.5. // TODO(madhusudancs): Remove this in 1.6. var restClientCfg *restclient.Config var err error if len(s.Kubeconfig) <= 0 { restClientCfg, err = restClientConfigFromSecret(s.Master) if err != nil { return err } } else { // Create the config to talk to federation-apiserver. restClientCfg, err = clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) if err != nil || restClientCfg == nil { // Retry with the deprecated name in 1.5. // TODO(madhusudancs): Remove this in 1.6. glog.V(2).Infof("Couldn't build the rest client config from flags: %v", err) glog.V(2).Infof("Trying with deprecated secret: %s", DeprecatedKubeconfigSecretName) restClientCfg, err = restClientConfigFromSecret(s.Master) if err != nil { return err } } } // Override restClientCfg qps/burst settings from flags restClientCfg.QPS = s.APIServerQPS restClientCfg.Burst = s.APIServerBurst go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() run := func() { err := StartControllers(s, restClientCfg) glog.Fatalf("error running controllers: %v", err) panic("unreachable") } run() panic("unreachable") }
// Run runs the CMServer. This should never exit. func (s *CMServer) Run(_ []string) error { kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) if err != nil { return err } // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.KubeAPIQPS kubeconfig.Burst = s.KubeAPIBurst kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() go endpointcontroller.NewEndpointController(clientForUserAgentOrDie(*kubeconfig, "endpoint-controller"), s.ResyncPeriod). Run(s.ConcurrentEndpointSyncs, util.NeverStop) go replicationcontroller.NewReplicationManager( clientForUserAgentOrDie(*kubeconfig, "replication-controller"), s.ResyncPeriod, replicationcontroller.BurstReplicas, ).Run(s.ConcurrentRCSyncs, util.NeverStop) if s.TerminatedPodGCThreshold > 0 { go gc.New(clientForUserAgentOrDie(*kubeconfig, "garbage-collector"), s.ResyncPeriod, s.TerminatedPodGCThreshold). Run(util.NeverStop) } cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } nodeController := nodecontroller.NewNodeController(cloud, clientForUserAgentOrDie(*kubeconfig, "node-controller"), s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst), s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs) nodeController.Run(s.NodeSyncPeriod) serviceController := servicecontroller.New(cloud, clientForUserAgentOrDie(*kubeconfig, "service-controller"), s.ClusterName) if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil { glog.Errorf("Failed to start service controller: %v", err) } if s.AllocateNodeCIDRs { if cloud == nil { glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.") } else if routes, ok := cloud.Routes(); !ok { glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.") } else { routeController := routecontroller.New(routes, clientForUserAgentOrDie(*kubeconfig, "route-controller"), s.ClusterName, &s.ClusterCIDR) routeController.Run(s.NodeSyncPeriod) } } else { glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.AllocateNodeCIDRs) } go resourcequotacontroller.NewResourceQuotaController( clientForUserAgentOrDie(*kubeconfig, "resourcequota-controller"), controller.StaticResyncPeriodFunc(s.ResourceQuotaSyncPeriod)).Run(s.ConcurrentResourceQuotaSyncs, util.NeverStop) // If apiserver is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and controller manager at the same time. var versionStrings []string err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) { if versionStrings, err = client.ServerAPIVersions(kubeconfig); err == nil { return true, nil } glog.Errorf("Failed to get api versions from server: %v", err) return false, nil }) if err != nil { glog.Fatalf("Failed to get api versions from server: %v", err) } versions := &unversioned.APIVersions{Versions: versionStrings} resourceMap, err := kubeClient.Discovery().ServerResources() if err != nil { glog.Fatalf("Failed to get supported resources from server: %v", err) } namespacecontroller.NewNamespaceController(clientForUserAgentOrDie(*kubeconfig, "namespace-controller"), versions, s.NamespaceSyncPeriod).Run() groupVersion := "extensions/v1beta1" resources, found := resourceMap[groupVersion] // TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver if containsVersion(versions, groupVersion) && found { glog.Infof("Starting %s apis", groupVersion) if containsResource(resources, "horizontalpodautoscalers") { glog.Infof("Starting horizontal pod controller.") hpaClient := clientForUserAgentOrDie(*kubeconfig, "horizontal-pod-autoscaler") metricsClient := metrics.NewHeapsterMetricsClient( hpaClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort, ) podautoscaler.NewHorizontalController(hpaClient, metricsClient). Run(s.HorizontalPodAutoscalerSyncPeriod) } if containsResource(resources, "daemonsets") { glog.Infof("Starting daemon set controller") go daemon.NewDaemonSetsController(clientForUserAgentOrDie(*kubeconfig, "daemon-set-controller"), s.ResyncPeriod). Run(s.ConcurrentDSCSyncs, util.NeverStop) } if containsResource(resources, "jobs") { glog.Infof("Starting job controller") go job.NewJobController(clientForUserAgentOrDie(*kubeconfig, "job-controller"), s.ResyncPeriod). Run(s.ConcurrentJobSyncs, util.NeverStop) } if containsResource(resources, "deployments") { glog.Infof("Starting deployment controller") deployment.New(clientForUserAgentOrDie(*kubeconfig, "deployment-controller")). Run(s.DeploymentControllerSyncPeriod) } } volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfigFlags) provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfigFlags) if err != nil { glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.") } pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-binder"), s.PVClaimBinderSyncPeriod) pvclaimBinder.Run() pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-recycler"), s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags), cloud) if err != nil { glog.Fatalf("Failed to start persistent volume recycler: %+v", err) } pvRecycler.Run() if provisioner != nil { pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-provisioner")), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud) if err != nil { glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err) } pvController.Run() } var rootCA []byte if s.RootCAFile != "" { rootCA, err = ioutil.ReadFile(s.RootCAFile) if err != nil { return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err) } if _, err := util.CertsFromPEM(rootCA); err != nil { return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err) } } else { rootCA = kubeconfig.CAData } if len(s.ServiceAccountKeyFile) > 0 { privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile) if err != nil { glog.Errorf("Error reading key for service account token controller: %v", err) } else { serviceaccount.NewTokensController( clientForUserAgentOrDie(*kubeconfig, "tokens-controller"), serviceaccount.TokensControllerOptions{ TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey), RootCA: rootCA, }, ).Run() } } serviceaccount.NewServiceAccountsController( clientForUserAgentOrDie(*kubeconfig, "service-account-controller"), serviceaccount.DefaultServiceAccountsControllerOptions(), ).Run() select {} }
// Run runs the specified SchedulerServer. This should never exit. func Run(s *options.SchedulerServer) error { if c, err := configz.New("componentconfig"); err == nil { c.Set(s.KubeSchedulerConfiguration) } else { glog.Errorf("unable to register configz: %s", err) } kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig) if err != nil { glog.Errorf("unable to build config from flags: %v", err) return err } kubeconfig.ContentType = s.ContentType // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = s.KubeAPIQPS kubeconfig.Burst = int(s.KubeAPIBurst) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } leaderElectionClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election")) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } go func() { mux := http.NewServeMux() healthz.InstallHandler(mux) if s.EnableProfiling { mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/profile", pprof.Profile) mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) if s.EnableContentionProfiling { goruntime.SetBlockProfileRate(1) } } configz.InstallHandler(mux) mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))), Handler: mux, } glog.Fatal(server.ListenAndServe()) }() configFactory := factory.NewConfigFactory(leaderElectionClient, s.SchedulerName, s.HardPodAffinitySymmetricWeight, s.FailureDomains) config, err := createConfig(s, configFactory) if err != nil { glog.Fatalf("Failed to create scheduler configuration: %v", err) } eventBroadcaster := record.NewBroadcaster() config.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: s.SchedulerName}) eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: leaderElectionClient.Core().Events("")}) sched := scheduler.New(config) run := func(_ <-chan struct{}) { sched.Run() select {} } if !s.LeaderElection.LeaderElect { run(nil) glog.Fatal("this statement is unreachable") panic("unreachable") } id, err := os.Hostname() if err != nil { glog.Errorf("unable to get hostname: %v", err) return err } // TODO: enable other lock types rl := resourcelock.EndpointsLock{ EndpointsMeta: v1.ObjectMeta{ Namespace: "kube-system", Name: "kube-scheduler", }, Client: leaderElectionClient, LockConfig: resourcelock.ResourceLockConfig{ Identity: id, EventRecorder: config.Recorder, }, } leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ Lock: &rl, LeaseDuration: s.LeaderElection.LeaseDuration.Duration, RenewDeadline: s.LeaderElection.RenewDeadline.Duration, RetryPeriod: s.LeaderElection.RetryPeriod.Duration, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { glog.Fatalf("lost master") }, }, }) glog.Fatal("this statement is unreachable") panic("unreachable") }