// NewControllersCommand provides a CLI handler for the 'controller-manager' command func NewControllersCommand(name, fullName string, out io.Writer) *cobra.Command { controllerOptions := controlleroptions.NewCMServer() cmd := &cobra.Command{ Use: name, Short: "Launch Kubernetes controller manager (kube-controller-manager)", Long: controllersLong, Run: func(c *cobra.Command, args []string) { startProfiler() util.InitLogs() defer util.FlushLogs() if err := controllerapp.Run(controllerOptions); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }, } cmd.SetOutput(out) flags := cmd.Flags() flags.SetNormalizeFunc(kflag.WordSepNormalizeFunc) flags.AddGoFlagSet(flag.CommandLine) controllerOptions.AddFlags(flags) return cmd }
// NewCMServer creates a new CMServer with a default config. func NewCMServer() *CMServer { s := &CMServer{ CMServer: options.NewCMServer(), } s.CloudProvider = mesos.ProviderName s.UseHostPortEndpoints = true return s }
// NewKubeControllerManager creates a new hyperkube Server object that includes the // description and flags. func NewKubeControllerManager() *Server { s := options.NewCMServer() hks := Server{ SimpleUsage: "controller-manager", Long: "A server that runs a set of active components. This includes replication controllers, service endpoints and nodes.", Run: func(_ *Server, args []string) error { return app.Run(s) }, } s.AddFlags(hks.Flags()) return &hks }
func main() { s := options.NewCMServer() s.AddFlags(pflag.CommandLine, app.KnownControllers(), app.ControllersDisabledByDefault.List()) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func main() { s := options.NewCMServer() s.AddFlags(pflag.CommandLine) flag.InitFlags() logs.InitLogs() defer logs.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func Test_nodeWithUpdatedStatus(t *testing.T) { now := time.Now() testNode := func(d time.Duration, s api.ConditionStatus, r string) *api.Node { return &api.Node{ Status: api.NodeStatus{ Conditions: []api.NodeCondition{{ Type: api.NodeOutOfDisk, }, { Type: api.NodeReady, Status: s, Reason: r, Message: "some message we don't care about here", LastTransitionTime: unversioned.Time{now.Add(-time.Minute)}, LastHeartbeatTime: unversioned.Time{now.Add(d)}, }}, }, } } cm := cmoptions.NewCMServer() kubecfg := kubeletoptions.NewKubeletServer() assert.True(t, kubecfg.NodeStatusUpdateFrequency.Duration*3 < cm.NodeControllerOptions.NodeMonitorGracePeriod) // sanity check for defaults n := testNode(0, api.ConditionTrue, "KubeletReady") su := NewStatusUpdater(nil, cm.NodeControllerOptions.NodeMonitorPeriod, func() time.Time { return now }) _, updated, err := su.nodeWithUpdatedStatus(n) assert.NoError(t, err) assert.False(t, updated, "no update expected b/c kubelet updated heartbeat just now") n = testNode(-cm.NodeControllerOptions.NodeMonitorGracePeriod, api.ConditionTrue, "KubeletReady") n2, updated, err := su.nodeWithUpdatedStatus(n) assert.NoError(t, err) assert.True(t, updated, "update expected b/c kubelet's update is older than DefaultNodeMonitorGracePeriod") assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Reason, slaveReadyReason) assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Message, slaveReadyMessage) n = testNode(-kubecfg.NodeStatusUpdateFrequency.Duration, api.ConditionTrue, "KubeletReady") n2, updated, err = su.nodeWithUpdatedStatus(n) assert.NoError(t, err) assert.False(t, updated, "no update expected b/c kubelet's update was missed only once") n = testNode(-kubecfg.NodeStatusUpdateFrequency.Duration*3, api.ConditionTrue, "KubeletReady") n2, updated, err = su.nodeWithUpdatedStatus(n) assert.NoError(t, err) assert.True(t, updated, "update expected b/c kubelet's update is older than 3*DefaultNodeStatusUpdateFrequency") assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Reason, slaveReadyReason) assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Message, slaveReadyMessage) }
func StartControllerManagerServer(lk LocalkubeServer) func() error { config := options.NewCMServer() config.Master = lk.GetAPIServerInsecureURL() // defaults from command config.DeletingPodsQps = 0.1 config.DeletingPodsBurst = 10 config.EnableProfiling = true config.ServiceAccountKeyFile = lk.GetPrivateKeyCertPath() config.RootCAFile = lk.GetCAPublicKeyCertPath() return func() error { return controllerManager.Run(config) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) s := options.NewCMServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := app.Run(s); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func NewBootkube(config Config) (*bootkube, error) { apiServer := apiserver.NewAPIServer() fs := pflag.NewFlagSet("apiserver", pflag.ExitOnError) apiServer.AddFlags(fs) fs.Parse([]string{ "--bind-address=0.0.0.0", "--secure-port=443", "--insecure-port=8081", // NOTE: temp hack for single-apiserver "--allow-privileged=true", "--tls-private-key-file=" + filepath.Join(config.AssetDir, asset.AssetPathAPIServerKey), "--tls-cert-file=" + filepath.Join(config.AssetDir, asset.AssetPathAPIServerCert), "--client-ca-file=" + filepath.Join(config.AssetDir, asset.AssetPathCACert), "--etcd-servers=" + config.EtcdServer.String(), "--service-cluster-ip-range=10.3.0.0/24", "--service-account-key-file=" + filepath.Join(config.AssetDir, asset.AssetPathServiceAccountPubKey), "--admission-control=ServiceAccount", "--runtime-config=extensions/v1beta1/deployments=true,extensions/v1beta1/daemonsets=true", }) cmServer := controller.NewCMServer() fs = pflag.NewFlagSet("controllermanager", pflag.ExitOnError) cmServer.AddFlags(fs) fs.Parse([]string{ "--master=" + insecureAPIAddr, "--service-account-private-key-file=" + filepath.Join(config.AssetDir, asset.AssetPathServiceAccountPrivKey), "--root-ca-file=" + filepath.Join(config.AssetDir, asset.AssetPathCACert), "--leader-elect=true", }) schedServer := scheduler.NewSchedulerServer() fs = pflag.NewFlagSet("scheduler", pflag.ExitOnError) schedServer.AddFlags(fs) fs.Parse([]string{ "--master=" + insecureAPIAddr, "--leader-elect=true", }) return &bootkube{ apiServer: apiServer, controller: cmServer, scheduler: schedServer, assetDir: config.AssetDir, }, nil }
// NewControllerManagerCommand creates a *cobra.Command object with default parameters func NewControllerManagerCommand() *cobra.Command { s := options.NewCMServer() s.AddFlags(pflag.CommandLine) cmd := &cobra.Command{ Use: "kube-controller-manager", Long: `The Kubernetes controller manager is a daemon that embeds the core control loops shipped with Kubernetes. In applications of robotics and automation, a control loop is a non-terminating loop that regulates the state of the system. In Kubernetes, a controller is a control loop that watches the shared state of the cluster through the apiserver and makes changes attempting to move the current state towards the desired state. Examples of controllers that ship with Kubernetes today are the replication controller, endpoints controller, namespace controller, and serviceaccounts controller.`, Run: func(cmd *cobra.Command, args []string) { }, } return cmd }
// startControllers launches the controllers func startControllers(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) error { if oc.Options.Controllers == configapi.ControllersDisabled { return nil } go func() { oc.ControllerPlugStart() // when a manual shutdown (DELETE /controllers) or lease lost occurs, the process should exit // this ensures no code is still running as a controller, and allows a process manager to reset // the controller to come back into a candidate state and compete for the lease if err := oc.ControllerPlug.WaitForStop(); err != nil { glog.Fatalf("Controller shutdown due to lease being lost: %v", err) } glog.Fatalf("Controller graceful shutdown requested") }() oc.ControllerPlug.WaitForStart() glog.Infof("Controllers starting (%s)", oc.Options.Controllers) // Get configured options (or defaults) for k8s controllers controllerManagerOptions := cmapp.NewCMServer() if kc != nil && kc.ControllerManager != nil { controllerManagerOptions = kc.ControllerManager } // Start these first, because they provide credentials for other controllers' clients oc.RunServiceAccountsController() oc.RunServiceAccountTokensController(controllerManagerOptions) // used by admission controllers oc.RunServiceAccountPullSecretsControllers() oc.RunSecurityAllocationController() if kc != nil { _, _, rcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraReplicationControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for replication controller: %v", err) } _, _, jobClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraJobControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for job controller: %v", err) } _, hpaOClient, hpaKClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraHPAControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for HPA controller: %v", err) } _, _, recyclerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeRecyclerControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for persistent volume recycler controller: %v", err) } _, _, binderClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeBinderControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for persistent volume binder controller: %v", err) } _, _, provisionerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraPersistentVolumeProvisionerControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for persistent volume provisioner controller: %v", err) } _, _, daemonSetClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraDaemonSetControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for daemonset controller: %v", err) } _, _, gcClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraGCControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for pod gc controller: %v", err) } _, _, serviceLoadBalancerClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraServiceLoadBalancerControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for pod gc controller: %v", err) } namespaceControllerClientConfig, _, namespaceControllerKubeClient, err := oc.GetServiceAccountClients(bootstrappolicy.InfraNamespaceControllerServiceAccountName) if err != nil { glog.Fatalf("Could not get client for namespace controller: %v", err) } namespaceControllerClientSet := clientadapter.FromUnversionedClient(namespaceControllerKubeClient) namespaceControllerClientPool := dynamic.NewClientPool(namespaceControllerClientConfig, dynamic.LegacyAPIPathResolverFunc) // called by admission control kc.RunResourceQuotaManager() oc.RunResourceQuotaManager(controllerManagerOptions) // no special order kc.RunNodeController() kc.RunScheduler() kc.RunReplicationController(rcClient) extensionsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, extensions.GroupName)) > 0 // TODO: enable this check once the job controller can use the batch API if the extensions API is disabled // batchEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, batch.GroupName)) > 0 if extensionsEnabled /*|| batchEnabled*/ { kc.RunJobController(jobClient) } // TODO: enable this check once the HPA controller can use the autoscaling API if the extensions API is disabled // autoscalingEnabled := len(configapi.GetEnabledAPIVersionsForGroup(kc.Options, autoscaling.GroupName)) > 0 if extensionsEnabled /*|| autoscalingEnabled*/ { kc.RunHPAController(hpaOClient, hpaKClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace) } if extensionsEnabled { kc.RunDaemonSetsController(daemonSetClient) } kc.RunEndpointController() kc.RunNamespaceController(namespaceControllerClientSet, namespaceControllerClientPool) kc.RunPersistentVolumeClaimBinder(binderClient) if oc.Options.VolumeConfig.DynamicProvisioningEnabled { kc.RunPersistentVolumeProvisioner(provisionerClient) } kc.RunPersistentVolumeClaimRecycler(oc.ImageFor("recycler"), recyclerClient, oc.Options.PolicyConfig.OpenShiftInfrastructureNamespace) kc.RunGCController(gcClient) kc.RunServiceLoadBalancerController(serviceLoadBalancerClient) glog.Infof("Started Kubernetes Controllers") } else { oc.RunResourceQuotaManager(nil) } // no special order if configapi.IsBuildEnabled(&oc.Options) { oc.RunBuildController() oc.RunBuildPodController() oc.RunBuildConfigChangeController() oc.RunBuildImageChangeTriggerController() } oc.RunDeploymentController() oc.RunDeployerPodController() oc.RunDeploymentConfigController() oc.RunDeploymentTriggerController() oc.RunDeploymentImageChangeTriggerController() oc.RunImageImportController() oc.RunOriginNamespaceController() oc.RunSDNController() _, _, serviceServingCertClient, err := oc.GetServiceAccountClients(bootstrappolicy.ServiceServingCertServiceAccountName) if err != nil { glog.Fatalf("Could not get client: %v", err) } oc.RunServiceServingCertController(serviceServingCertClient) glog.Infof("Started Origin Controllers") return nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, pluginInitializer oadmission.PluginInitializer) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } // Defaults are tested in TestAPIServerDefaults server := apiserveroptions.NewAPIServer() // Adjust defaults server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") server.EnableLogsSupport = false // don't expose server logs // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } if len(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride) > 0 { server.AdmissionControl = strings.Join(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride, ",") } // Defaults are tested in TestCMServerDefaults cmserver := cmapp.NewCMServer() // Adjust defaults cmserver.Address = "" // no healthz endpoint cmserver.Port = 0 // no healthz endpoint cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout} // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case serviceadmit.ExternalIPPluginName: // this needs to be moved upstream to be part of core config reject, admit, err := serviceadmit.ParseCIDRRules(options.NetworkConfig.ExternalIPNetworkCIDRs) if err != nil { // should have been caught with validation return nil, err } plugins = append(plugins, serviceadmit.NewExternalIPRanger(reject, admit)) case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(internalclientset.FromUnversionedClient(kubeClient)) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: configFile, err := pluginconfig.GetPluginConfigFile(options.KubernetesMasterConfig.AdmissionConfig.PluginConfig, pluginName, server.AdmissionControlConfigFile) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, internalclientset.FromUnversionedClient(kubeClient), configFile) if plugin != nil { plugins = append(plugins, plugin) } } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } // TODO you have to know every APIGroup you're enabling or upstream will panic. It's alternative to panicing is Fataling // It needs a refactor to return errors storageDestinations := genericapiserver.NewStorageDestinations() // storageVersions is a map from API group to allowed versions that must be a version exposed by the REST API or it breaks. // We need to fix the upstream to stop using the storage version as a preferred api version. storageVersions := map[string]string{} enabledKubeVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupKube) if len(enabledKubeVersions) > 0 { kubeStorageVersion := unversioned.GroupVersion{Group: configapi.APIGroupKube, Version: options.EtcdStorageConfig.KubernetesStorageVersion} databaseStorage, err := NewEtcdStorage(etcdClient, kubeStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupKube, databaseStorage) storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion } // enable this if extensions API is enabled (or batch or autoscaling, since they persist to extensions/v1beta1 for now) // TODO: replace this with a loop over configured storage versions extensionsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions)) > 0 batchEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupBatch)) > 0 autoscalingEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupAutoscaling)) > 0 if extensionsEnabled || autoscalingEnabled || batchEnabled { // TODO: replace this with a configured storage version for extensions once configuration exposes this extensionsStorageVersion := unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"} databaseStorage, err := NewEtcdStorage(etcdClient, extensionsStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupExtensions, databaseStorage) storageVersions[configapi.APIGroupExtensions] = extensionsStorageVersion.String() } // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := knet.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ Config: &genericapiserver.Config{ PublicAddress: publicAddress, ReadWritePort: port, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, StorageDestinations: storageDestinations, StorageVersions: storageVersions, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, APIGroupVersionOverrides: getAPIGroupVersionOverrides(options), APIPrefix: KubeAPIPrefix, APIGroupPrefix: KubeAPIGroupPrefix, MasterCount: options.KubernetesMasterConfig.MasterCount, // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, Serializer: kapi.Codecs, }, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, KubeletClient: kubeletClient, EnableCoreControllers: true, } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func TestCMServerDefaults(t *testing.T) { defaults := cmapp.NewCMServer() // This is a snapshot of the default config // If the default changes (new fields are added, or default values change), we want to know // Once we've reacted to the changes appropriately in BuildKubernetesMasterConfig(), update this expected default to match the new upstream defaults expectedDefaults := &cmapp.CMServer{ KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{ Port: 10252, // disabled Address: "0.0.0.0", ConcurrentEndpointSyncs: 5, ConcurrentRCSyncs: 5, ConcurrentRSSyncs: 5, ConcurrentDaemonSetSyncs: 2, ConcurrentJobSyncs: 5, ConcurrentResourceQuotaSyncs: 5, ConcurrentDeploymentSyncs: 5, ConcurrentNamespaceSyncs: 2, ConcurrentSATokenSyncs: 5, LookupCacheSizeForRC: 4096, LookupCacheSizeForRS: 4096, LookupCacheSizeForDaemonSet: 1024, ConfigureCloudRoutes: true, NodeCIDRMaskSize: 24, ServiceSyncPeriod: unversioned.Duration{Duration: 5 * time.Minute}, NodeSyncPeriod: unversioned.Duration{Duration: 10 * time.Second}, ResourceQuotaSyncPeriod: unversioned.Duration{Duration: 5 * time.Minute}, NamespaceSyncPeriod: unversioned.Duration{Duration: 5 * time.Minute}, PVClaimBinderSyncPeriod: unversioned.Duration{Duration: 15 * time.Second}, HorizontalPodAutoscalerSyncPeriod: unversioned.Duration{Duration: 30 * time.Second}, DeploymentControllerSyncPeriod: unversioned.Duration{Duration: 30 * time.Second}, MinResyncPeriod: unversioned.Duration{Duration: 12 * time.Hour}, RegisterRetryCount: 10, PodEvictionTimeout: unversioned.Duration{Duration: 5 * time.Minute}, NodeMonitorGracePeriod: unversioned.Duration{Duration: 40 * time.Second}, NodeStartupGracePeriod: unversioned.Duration{Duration: 60 * time.Second}, NodeMonitorPeriod: unversioned.Duration{Duration: 5 * time.Second}, ClusterName: "kubernetes", TerminatedPodGCThreshold: 12500, VolumeConfiguration: componentconfig.VolumeConfiguration{ EnableDynamicProvisioning: true, EnableHostPathProvisioning: false, FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/", PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{ MaximumRetry: 3, MinimumTimeoutNFS: 300, IncrementTimeoutNFS: 30, MinimumTimeoutHostPath: 60, IncrementTimeoutHostPath: 30, }, }, ContentType: "application/vnd.kubernetes.protobuf", KubeAPIQPS: 20.0, KubeAPIBurst: 30, LeaderElection: componentconfig.LeaderElectionConfiguration{ LeaderElect: false, LeaseDuration: unversioned.Duration{Duration: 15 * time.Second}, RenewDeadline: unversioned.Duration{Duration: 10 * time.Second}, RetryPeriod: unversioned.Duration{Duration: 2 * time.Second}, }, }, } if !reflect.DeepEqual(defaults, expectedDefaults) { t.Logf("expected defaults, actual defaults: \n%s", diff.ObjectReflectDiff(expectedDefaults, defaults)) t.Errorf("Got different defaults than expected, adjust in BuildKubernetesMasterConfig and update expectedDefaults") } }
func ValidateControllerExtendedArguments(config api.ExtendedArguments, fldPath *field.Path) field.ErrorList { return ValidateExtendedArguments(config, controlleroptions.NewCMServer().AddFlags, fldPath) }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, informers shared.InformerFactory, admissionControl admission.Interface, originAuthenticator authenticator.Request) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } // Defaults are tested in TestCMServerDefaults cmserver := cmapp.NewCMServer() // Adjust defaults cmserver.Address = "" // no healthz endpoint cmserver.Port = 0 // no healthz endpoint cmserver.EnableGarbageCollector = false // disabled until we add the controller cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout} cmserver.VolumeConfiguration.EnableDynamicProvisioning = options.VolumeConfig.DynamicProvisioningEnabled // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors schedulerserver := scheduleroptions.NewSchedulerServer() schedulerserver.PolicyConfigFile = options.KubernetesMasterConfig.SchedulerConfigFile if err := cmdflags.Resolve(options.KubernetesMasterConfig.SchedulerArguments, schedulerserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", cmserver.CloudProvider, cmserver.CloudConfigFile) } var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } server, storageFactory, err := BuildDefaultAPIServer(options) if err != nil { return nil, err } // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := knet.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ Config: &genericapiserver.Config{ PublicAddress: publicAddress, ReadWritePort: server.SecurePort, Authenticator: originAuthenticator, // this is used to fulfill the tokenreviews endpoint which is used by node authentication Authorizer: authorizer.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionControl, StorageFactory: storageFactory, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, APIResourceConfigSource: getAPIResourceConfig(options), APIPrefix: server.APIPrefix, APIGroupPrefix: server.APIGroupPrefix, MasterCount: server.MasterCount, // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, Serializer: kapi.Codecs, EnableLogsSupport: server.EnableLogsSupport, EnableProfiling: server.EnableProfiling, EnableWatchCache: server.EnableWatchCache, MasterServiceNamespace: server.MasterServiceNamespace, ExternalHost: server.ExternalHost, MinRequestTimeout: server.MinRequestTimeout, KubernetesServiceNodePort: server.KubernetesServiceNodePort, }, EventTTL: server.EventTTL, KubeletClient: kubeletClient, EnableCoreControllers: true, DeleteCollectionWorkers: server.DeleteCollectionWorkers, } if server.EnableWatchCache { cachesize.SetWatchCacheSizes(server.WatchCacheSizes) } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: int32(dnsPort), Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: int32(dnsPort), Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, SchedulerServer: schedulerserver, Informers: informers, } return kmaster, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, informers shared.InformerFactory, pluginInitializer oadmission.PluginInitializer) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } // Defaults are tested in TestAPIServerDefaults server := apiserveroptions.NewAPIServer() // Adjust defaults server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") server.EnableLogsSupport = false // don't expose server logs server.EnableProfiling = false server.APIPrefix = KubeAPIPrefix server.APIGroupPrefix = KubeAPIGroupPrefix server.SecurePort = port server.MasterCount = options.KubernetesMasterConfig.MasterCount // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } if len(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride) > 0 { server.AdmissionControl = strings.Join(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride, ",") } // Defaults are tested in TestCMServerDefaults cmserver := cmapp.NewCMServer() // Adjust defaults cmserver.Address = "" // no healthz endpoint cmserver.Port = 0 // no healthz endpoint cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout} cmserver.VolumeConfiguration.EnableDynamicProvisioning = options.VolumeConfig.DynamicProvisioningEnabled // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case lifecycle.PluginName: // We need to include our infrastructure and shared resource namespaces in the immortal namespaces list immortalNamespaces := sets.NewString(kapi.NamespaceDefault) if len(options.PolicyConfig.OpenShiftSharedResourcesNamespace) > 0 { immortalNamespaces.Insert(options.PolicyConfig.OpenShiftSharedResourcesNamespace) } if len(options.PolicyConfig.OpenShiftInfrastructureNamespace) > 0 { immortalNamespaces.Insert(options.PolicyConfig.OpenShiftInfrastructureNamespace) } plugins = append(plugins, lifecycle.NewLifecycle(clientadapter.FromUnversionedClient(kubeClient), immortalNamespaces)) case serviceadmit.ExternalIPPluginName: // this needs to be moved upstream to be part of core config reject, admit, err := serviceadmit.ParseCIDRRules(options.NetworkConfig.ExternalIPNetworkCIDRs) if err != nil { // should have been caught with validation return nil, err } plugins = append(plugins, serviceadmit.NewExternalIPRanger(reject, admit)) case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(clientadapter.FromUnversionedClient(kubeClient)) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: configFile, err := pluginconfig.GetPluginConfigFile(options.KubernetesMasterConfig.AdmissionConfig.PluginConfig, pluginName, server.AdmissionControlConfigFile) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, clientadapter.FromUnversionedClient(kubeClient), configFile) if plugin != nil { plugins = append(plugins, plugin) } } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } resourceEncodingConfig := genericapiserver.NewDefaultResourceEncodingConfig() resourceEncodingConfig.SetVersionEncoding( kapi.GroupName, unversioned.GroupVersion{Group: kapi.GroupName, Version: options.EtcdStorageConfig.KubernetesStorageVersion}, kapi.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( extensions.GroupName, unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"}, extensions.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( batch.GroupName, unversioned.GroupVersion{Group: batch.GroupName, Version: "v1"}, batch.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( autoscaling.GroupName, unversioned.GroupVersion{Group: autoscaling.GroupName, Version: "v1"}, autoscaling.SchemeGroupVersion, ) etcdConfig := storagebackend.Config{ Prefix: options.EtcdStorageConfig.KubernetesStoragePrefix, ServerList: options.EtcdClientInfo.URLs, KeyFile: options.EtcdClientInfo.ClientCert.KeyFile, CertFile: options.EtcdClientInfo.ClientCert.CertFile, CAFile: options.EtcdClientInfo.CA, DeserializationCacheSize: genericapiserveroptions.DefaultDeserializationCacheSize, } storageFactory := genericapiserver.NewDefaultStorageFactory(etcdConfig, "", kapi.Codecs, resourceEncodingConfig, master.DefaultAPIResourceConfigSource()) // the order here is important, it defines which version will be used for storage storageFactory.AddCohabitatingResources(extensions.Resource("jobs"), batch.Resource("jobs")) storageFactory.AddCohabitatingResources(extensions.Resource("horizontalpodautoscalers"), autoscaling.Resource("horizontalpodautoscalers")) // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := knet.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ Config: &genericapiserver.Config{ PublicAddress: publicAddress, ReadWritePort: port, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, StorageFactory: storageFactory, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, APIResourceConfigSource: getAPIResourceConfig(options), APIPrefix: server.APIPrefix, APIGroupPrefix: server.APIGroupPrefix, MasterCount: server.MasterCount, // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, Serializer: kapi.Codecs, EnableLogsSupport: server.EnableLogsSupport, EnableProfiling: server.EnableProfiling, EnableWatchCache: server.EnableWatchCache, MasterServiceNamespace: server.MasterServiceNamespace, ExternalHost: server.ExternalHost, MinRequestTimeout: server.MinRequestTimeout, KubernetesServiceNodePort: server.KubernetesServiceNodePort, }, EventTTL: server.EventTTL, KubeletClient: kubeletClient, EnableCoreControllers: true, DeleteCollectionWorkers: server.DeleteCollectionWorkers, } if server.EnableWatchCache { cachesize.SetWatchCacheSizes(server.WatchCacheSizes) } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: int32(dnsPort), Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: int32(dnsPort), Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, Informers: informers, } return kmaster, nil }