// startControllers launches the controllers func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) error { if oc.Options.Controllers == configapi.ControllersDisabled { return nil } go func() { oc.ControllerPlugStart() // when a manual shutdown (DELETE /controllers) or lease lost occurs, the process should exit // this ensures no code is still running as a controller, and allows a process manager to reset // the controller to come back into a candidate state and compete for the lease if err := oc.ControllerPlug.WaitForStop(); err != nil { glog.Fatalf("Controller shutdown due to lease being lost: %v", err) } glog.Fatalf("Controller graceful shutdown requested") }() oc.ControllerPlug.WaitForStart() glog.Infof("Controllers starting (%s)", oc.Options.Controllers) // Get configured options (or defaults) for k8s controllers controllerManagerOptions := cmapp.NewCMServer() if kc != nil && kc.ControllerManager != nil { controllerManagerOptions = kc.ControllerManager } // Start these first, because they provide credentials for other controllers' clients oc.RunServiceAccountsController() oc.RunServiceAccountTokensController(controllerManagerOptions) // used by admission controllers oc.RunServiceAccountPullSecretsControllers() oc.RunSecurityAllocationController() if kc != nil { _, _, rcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraReplicationControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for replication controller: %v", err) } _, _, jobClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraJobControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for job controller: %v", err) } _, hpaOClient, hpaKClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraHPAControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for HPA controller: %v", err) } _, _, recyclerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeRecyclerControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for persistent volume recycler controller: %v", err) } _, _, binderClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeBinderControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for persistent volume binder controller: %v", err) } _, _, provisionerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeProvisionerControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for persistent volume provisioner controller: %v", err) } _, _, daemonSetClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraDaemonSetControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for daemonset controller: %v", err) } _, _, gcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraGCControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for pod gc controller: %v", err) } _, _, serviceLoadBalancerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraServiceLoadBalancerControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for pod gc controller: %v", err) } namespaceControllerClientConfig, _, namespaceControllerKubeClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraNamespaceControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for namespace controller: %v", err) } namespaceControllerClientSet := clientadapter.FromUnversionedClient(namespaceControllerKubeClient) namespaceControllerClientPool := dynamic.NewClientPool(namespaceControllerClientConfig, dynamic.LegacyAPIPathResolverFunc) // called by admission control kc.RunResourceQuotaManager() oc.RunResourceQuotaManager(controllerManagerOptions) // no special order kc.RunNodeController() kc.RunScheduler() kc.RunReplicationController(rcClient) extensionsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, extensions.GroupName)) > 0 // TODO: enable this check once the job controller can use the batch API if the extensions API is disabled // batchEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, batch.GroupName)) > 0 if extensionsEnabled /*|| batchEnabled*/ { kc.RunJobController(jobClient) } // TODO: enable this check once the HPA controller can use the autoscaling API if the extensions API is disabled // autoscalingEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, autoscaling.GroupName)) > 0 if extensionsEnabled /*|| autoscalingEnabled*/ { kc.RunHPAController(hpaOClient, hpaKClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace) } if extensionsEnabled { kc.RunDaemonSetsController(daemonSetClient) } kc.RunEndpointController() kc.RunNamespaceController(namespaceControllerClientSet, namespaceControllerClientPool) kc.RunPersistentVolumeClaimBinder(binderClient) if oc.Options.VolumeConfig.DynamicProvisioningEnabled { kc.RunPersistentVolumeProvisioner(provisionerClient) } kc.RunPersistentVolumeClaimRecycler(oc.ImageFor("recycler"), recyclerClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace) kc.RunGCController(gcClient) kc.RunServiceLoadBalancerController(serviceLoadBalancerClient) glog.Infof("Started Kubernetes Controllers") } else { oc.RunResourceQuotaManager(nil) } // no special order if configapi.IsBuildEnabled(&oc.Options) { oc.RunBuildController() oc.RunBuildPodController() oc.RunBuildConfigChangeController() oc.RunBuildImageChangeTriggerController() } oc.RunDeploymentController() oc.RunDeployerPodController() oc.RunDeploymentConfigController() oc.RunDeploymentTriggerController() oc.RunDeploymentImageChangeTriggerController() oc.RunImageImportController() oc.RunOriginNamespaceController() oc.RunSDNController() _, _, serviceServingCertClient, err := oc.GetServiceAccountClients(bootstrappolicy.ServiceServingCertServiceAccountName) if err != nil { glog.Fatalf("Could not get client: %v", err) } oc.RunServiceServingCertController(serviceServingCertClient) glog.Infof("Started Origin Controllers") return nil }
func startHealth(openshiftConfig *origin.MasterConfig) error { openshiftConfig.RunHealth() return nil }
// StartAPI starts the components of the master that are considered part of the API - the Kubernetes // API and core controllers, the Origin API, the group, policy, project, and authorization caches, // etcd, the asset server (for the UI), the OAuth server endpoints, and the DNS server. // TODO: allow to be more granularly targeted func StartAPI(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) error { // start etcd if oc.Options.EtcdConfig != nil { etcdserver.RunEtcd(oc.Options.EtcdConfig) } // verify we can connect to etcd with the provided config if etcdClient, err := etcd.GetAndTestEtcdClient(oc.Options.EtcdClientInfo); err != nil { return err } else { etcdClient.Close() } // Must start policy caching immediately oc.RunGroupCache() oc.RunPolicyCache() oc.RunProjectCache() unprotectedInstallers := []origin.APIInstaller{} if oc.Options.OAuthConfig != nil { authConfig, err := origin.BuildAuthConfig(oc) if err != nil { return err } unprotectedInstallers = append(unprotectedInstallers, authConfig) } var standaloneAssetConfig *origin.AssetConfig if oc.WebConsoleEnabled() { var overrideConfig *overrideapi.ClusterResourceOverrideConfig = nil if oc.Options.KubernetesMasterConfig != nil { // external kube gets you a nil pointer here if overridePluginConfigFile, err := pluginconfig.GetPluginConfigFile(oc.Options.KubernetesMasterConfig.AdmissionConfig.PluginConfig, overrideapi.PluginName, ""); err != nil { return err } else if overridePluginConfigFile != "" { configFile, err := os.Open(overridePluginConfigFile) if err != nil { return err } if overrideConfig, err = override.ReadConfig(configFile); err != nil { return err } } } config, err := origin.NewAssetConfig(*oc.Options.AssetConfig, overrideConfig) if err != nil { return err } if oc.Options.AssetConfig.ServingInfo.BindAddress == oc.Options.ServingInfo.BindAddress { unprotectedInstallers = append(unprotectedInstallers, config) } else { standaloneAssetConfig = config } } if kc != nil { oc.Run([]origin.APIInstaller{kc}, unprotectedInstallers) } else { _, kubeClientConfig, err := configapi.GetKubeClient(oc.Options.MasterClients.ExternalKubernetesKubeConfig) if err != nil { return err } proxy := &kubernetes.ProxyConfig{ ClientConfig: kubeClientConfig, } oc.Run([]origin.APIInstaller{proxy}, unprotectedInstallers) } oc.InitializeObjects() if standaloneAssetConfig != nil { standaloneAssetConfig.Run() } if oc.Options.DNSConfig != nil { oc.RunDNSServer() } oc.RunProjectAuthorizationCache() return nil }
// startControllers launches the controllers func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) error { if oc.Options.Controllers == configapi.ControllersDisabled { return nil } go func() { oc.ControllerPlugStart() // when a manual shutdown (DELETE /controllers) or lease lost occurs, the process should exit // this ensures no code is still running as a controller, and allows a process manager to reset // the controller to come back into a candidate state and compete for the lease oc.ControllerPlug.WaitForStop() glog.Fatalf("Controller shutdown requested") }() oc.ControllerPlug.WaitForStart() glog.Infof("Controllers starting (%s)", oc.Options.Controllers) // Start these first, because they provide credentials for other controllers' clients oc.RunServiceAccountsController() oc.RunServiceAccountTokensController() // used by admission controllers oc.RunServiceAccountPullSecretsControllers() oc.RunSecurityAllocationController() if kc != nil { _, rcClient, err := oc.GetServiceAccountClients(oc.ReplicationControllerServiceAccount) if err != nil { glog.Fatalf("Could not get client for replication controller: %v", err) } _, jobClient, err := oc.GetServiceAccountClients(oc.JobControllerServiceAccount) if err != nil { glog.Fatalf("Could not get client for job controller: %v", err) } hpaOClient, hpaKClient, err := oc.GetServiceAccountClients(oc.HPAControllerServiceAccount) if err != nil { glog.Fatalf("Could not get client for HPA controller: %v", err) } _, pvKClient, err := oc.GetServiceAccountClients(oc.PersistentVolumeControllerServiceAccount) if err != nil { glog.Fatalf("Could not get client for persistent volume controller: %v", err) } // called by admission control kc.RunResourceQuotaManager() // no special order kc.RunNodeController() kc.RunScheduler() kc.RunReplicationController(rcClient) if kc.Master.EnableExp { kc.RunJobController(jobClient) kc.RunHPAController(hpaOClient, hpaKClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace) } kc.RunEndpointController() kc.RunNamespaceController() kc.RunPersistentVolumeClaimBinder() kc.RunPersistentVolumeClaimRecycler(oc.ImageFor("recycler"), pvKClient) glog.Infof("Started Kubernetes Controllers") } // no special order if configapi.IsBuildEnabled(&oc.Options) { oc.RunBuildController() oc.RunBuildPodController() oc.RunBuildConfigChangeController() oc.RunBuildImageChangeTriggerController() } oc.RunDeploymentController() oc.RunDeployerPodController() oc.RunDeploymentConfigController() oc.RunDeploymentConfigChangeController() oc.RunDeploymentImageChangeTriggerController() oc.RunImageImportController() oc.RunOriginNamespaceController() oc.RunSDNController() glog.Infof("Started Origin Controllers") return nil }
func BuildKubernetesMasterConfig(openshiftConfig *origin.MasterConfig) (*kubernetes.MasterConfig, error) { if openshiftConfig.Options.KubernetesMasterConfig == nil { return nil, nil } kubeConfig, err := kubernetes.BuildKubernetesMasterConfig(openshiftConfig.Options, openshiftConfig.RequestContextMapper, openshiftConfig.KubeClient(), openshiftConfig.Informers, openshiftConfig.PluginInitializer) return kubeConfig, err }
// startAPI starts the components of the master that are considered part of the API - the Kubernetes // API and core controllers, the Origin API, the group, policy, project, and authorization caches, // etcd, the asset server (for the UI), the OAuth server endpoints, and the DNS server. // TODO: allow to be more granularly targeted func startAPI(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) error { // start etcd if oc.Options.EtcdConfig != nil { etcd.RunEtcd(oc.Options.EtcdConfig) } // verify we can connect to etcd with the provided config if err := etcd.TestEtcdClient(oc.EtcdClient); err != nil { return err } // Must start policy caching immediately oc.RunGroupCache() oc.RunPolicyCache() oc.RunProjectCache() unprotectedInstallers := []origin.APIInstaller{} if oc.Options.OAuthConfig != nil { authConfig, err := origin.BuildAuthConfig(oc.Options) if err != nil { return err } unprotectedInstallers = append(unprotectedInstallers, authConfig) } var standaloneAssetConfig *origin.AssetConfig if oc.WebConsoleEnabled() { config, err := origin.BuildAssetConfig(*oc.Options.AssetConfig) if err != nil { return err } if oc.Options.AssetConfig.ServingInfo.BindAddress == oc.Options.ServingInfo.BindAddress { unprotectedInstallers = append(unprotectedInstallers, config) } else { standaloneAssetConfig = config } } if kc != nil { oc.Run([]origin.APIInstaller{kc}, unprotectedInstallers) } else { _, kubeClientConfig, err := configapi.GetKubeClient(oc.Options.MasterClients.ExternalKubernetesKubeConfig) if err != nil { return err } proxy := &kubernetes.ProxyConfig{ ClientConfig: kubeClientConfig, } oc.Run([]origin.APIInstaller{proxy}, unprotectedInstallers) } oc.InitializeObjects() if standaloneAssetConfig != nil { standaloneAssetConfig.Run() } if oc.Options.DNSConfig != nil { oc.RunDNSServer() } oc.RunProjectAuthorizationCache() return nil }
func buildKubernetesMasterConfig(openshiftConfig *origin.MasterConfig) (*kubernetes.MasterConfig, error) { if openshiftConfig.Options.KubernetesMasterConfig == nil { return nil, nil } kubeConfig, err := kubernetes.BuildKubernetesMasterConfig(openshiftConfig.Options, openshiftConfig.RequestContextMapper, openshiftConfig.KubeClient()) return kubeConfig, err }
// StartAPI starts the components of the master that are considered part of the API - the Kubernetes // API and core controllers, the Origin API, the group, policy, project, and authorization caches, // etcd, the asset server (for the UI), the OAuth server endpoints, and the DNS server. // TODO: allow to be more granularly targeted func StartAPI(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) error { // start etcd if oc.Options.EtcdConfig != nil { etcdserver.RunEtcd(oc.Options.EtcdConfig) } // verify we can connect to etcd with the provided config if _, err := etcd.GetAndTestEtcdClient(oc.Options.EtcdClientInfo); err != nil { return err } // Must start policy caching immediately oc.Informers.StartCore(utilwait.NeverStop) oc.RunClusterQuotaMappingController() oc.RunGroupCache() oc.RunProjectCache() unprotectedInstallers := []origin.APIInstaller{} if oc.Options.OAuthConfig != nil { authConfig, err := origin.BuildAuthConfig(oc) if err != nil { return err } unprotectedInstallers = append(unprotectedInstallers, authConfig) } var standaloneAssetConfig *origin.AssetConfig if oc.WebConsoleEnabled() { overrideConfig, err := getResourceOverrideConfig(oc) if err != nil { return err } config, err := origin.NewAssetConfig(*oc.Options.AssetConfig, overrideConfig) if err != nil { return err } if oc.Options.AssetConfig.ServingInfo.BindAddress == oc.Options.ServingInfo.BindAddress { unprotectedInstallers = append(unprotectedInstallers, config) } else { standaloneAssetConfig = config } } if kc != nil { oc.Run([]origin.APIInstaller{kc}, unprotectedInstallers) } else { _, kubeClientConfig, err := configapi.GetKubeClient(oc.Options.MasterClients.ExternalKubernetesKubeConfig, oc.Options.MasterClients.ExternalKubernetesClientConnectionOverrides) if err != nil { return err } proxy := &kubernetes.ProxyConfig{ ClientConfig: kubeClientConfig, } oc.Run([]origin.APIInstaller{proxy}, unprotectedInstallers) } // start up the informers that we're trying to use in the API server oc.Informers.Start(utilwait.NeverStop) oc.InitializeObjects() if standaloneAssetConfig != nil { standaloneAssetConfig.Run() } if oc.Options.DNSConfig != nil { oc.RunDNSServer() } oc.RunProjectAuthorizationCache() return nil }
func BuildKubernetesMasterConfig(openshiftConfig *origin.MasterConfig) (*kubernetes.MasterConfig, error) { if openshiftConfig.Options.KubernetesMasterConfig == nil { return nil, nil } kubeConfig, err := kubernetes.BuildKubernetesMasterConfig(openshiftConfig.Options, openshiftConfig.RequestContextMapper, openshiftConfig.KubeClient(), openshiftConfig.Informers, openshiftConfig.KubeAdmissionControl, openshiftConfig.Authenticator) return kubeConfig, err }
// StartControllers launches the controllers func StartControllers(openshiftConfig *origin.MasterConfig, kubeMasterConfig *kubernetes.MasterConfig) error { if openshiftConfig.Options.Controllers == configapi.ControllersDisabled { return nil } go func() { openshiftConfig.ControllerPlug.WaitForStop() glog.Fatalf("Master shutdown requested") }() openshiftConfig.ControllerPlug.WaitForStart() glog.Infof("Master controllers starting (%s)", openshiftConfig.Options.Controllers) // Start these first, because they provide credentials for other controllers' clients openshiftConfig.RunServiceAccountsController() openshiftConfig.RunServiceAccountTokensController() // used by admission controllers openshiftConfig.RunServiceAccountPullSecretsControllers() openshiftConfig.RunSecurityAllocationController() if kubeMasterConfig != nil { _, rcClient, err := openshiftConfig.GetServiceAccountClients(openshiftConfig.ReplicationControllerServiceAccount) if err != nil { glog.Fatalf("Could not get client for replication controller: %v", err) } // called by admission control kubeMasterConfig.RunResourceQuotaManager() // no special order kubeMasterConfig.RunNodeController() kubeMasterConfig.RunScheduler() kubeMasterConfig.RunReplicationController(rcClient) kubeMasterConfig.RunEndpointController() kubeMasterConfig.RunNamespaceController() kubeMasterConfig.RunPersistentVolumeClaimBinder() kubeMasterConfig.RunPersistentVolumeClaimRecycler(openshiftConfig.ImageFor("deployer")) } // no special order\ if configapi.IsBuildEnabled(&openshiftConfig.Options) { openshiftConfig.RunBuildController() openshiftConfig.RunBuildPodController() openshiftConfig.RunBuildConfigChangeController() openshiftConfig.RunBuildImageChangeTriggerController() } openshiftConfig.RunDeploymentController() openshiftConfig.RunDeployerPodController() openshiftConfig.RunDeploymentConfigController() openshiftConfig.RunDeploymentConfigChangeController() openshiftConfig.RunDeploymentImageChangeTriggerController() openshiftConfig.RunImageImportController() openshiftConfig.RunOriginNamespaceController() openshiftConfig.RunSDNController() return nil }