// Run runs the specified APIServer. This should never exit. func Run(s *options.APIServer) error { verifyClusterIPFlags(s) // If advertise-address is not specified, use bind-address. If bind-address // is not usable (unset, 0.0.0.0, or loopback), we will use the host's default // interface as valid public addr for master (see: util/net#ValidPublicAddrForMaster) if s.AdvertiseAddress == nil || s.AdvertiseAddress.IsUnspecified() { hostIP, err := utilnet.ChooseBindAddress(s.BindAddress) if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v' . "+ "Try to set the AdvertiseAddress directly or provide a valid BindAddress to fix this.", err) } s.AdvertiseAddress = hostIP } glog.Infof("Will report %v as public IP address.", s.AdvertiseAddress) if len(s.EtcdServerList) == 0 { glog.Fatalf("--etcd-servers must be specified") } if s.KubernetesServiceNodePort > 0 && !s.ServiceNodePortRange.Contains(s.KubernetesServiceNodePort) { glog.Fatalf("Kubernetes service port range %v doesn't contain %v", s.ServiceNodePortRange, (s.KubernetesServiceNodePort)) } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. PrivilegedSources: capabilities.PrivilegedSources{ HostNetworkSources: []string{}, HostPIDSources: []string{}, HostIPCSources: []string{}, }, PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec, }) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } // Setup tunneler if needed var tunneler master.Tunneler var proxyDialerFn apiserver.ProxyDialerFunc if len(s.SSHUser) > 0 { // Get ssh key distribution func, if supported var installSSH master.InstallSSHKey if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } // Set up the tunneler tunneler = master.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, installSSH) // Use the tunneler's dialer to connect to the kubelet s.KubeletConfig.Dial = tunneler.Dial // Use the tunneler's dialer when proxying to pods, services, and nodes proxyDialerFn = tunneler.Dial } // Proxying to pods and services is IP-based... don't expect to be able to verify the hostname proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true} kubeletClient, err := kubeletclient.NewStaticKubeletClient(&s.KubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } apiGroupVersionOverrides, err := parseRuntimeConfig(s) if err != nil { glog.Fatalf("error in parsing runtime-config: %s", err) } clientConfig := &client.Config{ Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), } if len(s.DeprecatedStorageVersion) != 0 { gv, err := unversioned.ParseGroupVersion(s.DeprecatedStorageVersion) if err != nil { glog.Fatalf("error in parsing group version: %s", err) } clientConfig.GroupVersion = &gv } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } legacyV1Group, err := registered.Group(api.GroupName) if err != nil { return err } storageDestinations := genericapiserver.NewStorageDestinations() storageVersions := generateStorageVersionMap(s.DeprecatedStorageVersion, s.StorageVersions) if _, found := storageVersions[legacyV1Group.GroupVersion.Group]; !found { glog.Fatalf("Couldn't find the storage version for group: %q in storageVersions: %v", legacyV1Group.GroupVersion.Group, storageVersions) } etcdStorage, err := newEtcd(s.EtcdServerList, api.Codecs, storageVersions[legacyV1Group.GroupVersion.Group], s.EtcdPathPrefix) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } storageDestinations.AddAPIGroup("", etcdStorage) if !apiGroupVersionOverrides["extensions/v1beta1"].Disable { expGroup, err := registered.Group(extensions.GroupName) if err != nil { glog.Fatalf("Extensions API is enabled in runtime config, but not enabled in the environment variable KUBE_API_VERSIONS. Error: %v", err) } if _, found := storageVersions[expGroup.GroupVersion.Group]; !found { glog.Fatalf("Couldn't find the storage version for group: %q in storageVersions: %v", expGroup.GroupVersion.Group, storageVersions) } expEtcdStorage, err := newEtcd(s.EtcdServerList, api.Codecs, storageVersions[expGroup.GroupVersion.Group], s.EtcdPathPrefix) if err != nil { glog.Fatalf("Invalid extensions storage version or misconfigured etcd: %v", err) } storageDestinations.AddAPIGroup(extensions.GroupName, expEtcdStorage) } updateEtcdOverrides(s.EtcdServersOverrides, storageVersions, s.EtcdPathPrefix, &storageDestinations, newEtcd) n := s.ServiceClusterIPRange // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { if authenticator.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) { s.ServiceAccountKeyFile = s.TLSPrivateKeyFile } else { glog.Warning("No RSA key provided, service account token authentication disabled") } } var serviceAccountGetter serviceaccount.ServiceAccountTokenGetter if s.ServiceAccountLookup { // If we need to look up service accounts and tokens, // go directly to etcd to avoid recursive auth insanity serviceAccountGetter = serviceaccountcontroller.NewGetterFromStorageInterface(etcdStorage) } authenticator, err := authenticator.New(authenticator.AuthenticatorConfig{ BasicAuthFile: s.BasicAuthFile, ClientCAFile: s.ClientCAFile, TokenAuthFile: s.TokenAuthFile, OIDCIssuerURL: s.OIDCIssuerURL, OIDCClientID: s.OIDCClientID, OIDCCAFile: s.OIDCCAFile, OIDCUsernameClaim: s.OIDCUsernameClaim, ServiceAccountKeyFile: s.ServiceAccountKeyFile, ServiceAccountLookup: s.ServiceAccountLookup, ServiceAccountTokenGetter: serviceAccountGetter, KeystoneURL: s.KeystoneURL, }) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizationModeNames := strings.Split(s.AuthorizationMode, ",") authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, s.AuthorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile) if len(s.ExternalHost) == 0 { // TODO: extend for other providers if s.CloudProvider == "gce" { instances, supported := cloud.Instances() if !supported { glog.Fatalf("GCE cloud provider has no instances. this shouldn't happen. exiting.") } name, err := os.Hostname() if err != nil { glog.Fatalf("Failed to get hostname: %v", err) } addrs, err := instances.NodeAddresses(name) if err != nil { glog.Warningf("Unable to obtain external host address from cloud provider: %v", err) } else { for _, addr := range addrs { if addr.Type == api.NodeExternalIP { s.ExternalHost = addr.Address } } } } } config := &master.Config{ Config: &genericapiserver.Config{ StorageDestinations: storageDestinations, StorageVersions: storageVersions, ServiceClusterIPRange: &n, EnableLogsSupport: s.EnableLogsSupport, EnableUISupport: true, EnableSwaggerSupport: true, EnableProfiling: s.EnableProfiling, EnableWatchCache: s.EnableWatchCache, EnableIndex: true, APIPrefix: s.APIPrefix, APIGroupPrefix: s.APIGroupPrefix, CorsAllowedOriginList: s.CorsAllowedOriginList, ReadWritePort: s.SecurePort, PublicAddress: s.AdvertiseAddress, Authenticator: authenticator, SupportsBasicAuth: len(s.BasicAuthFile) > 0, Authorizer: authorizer, AdmissionControl: admissionController, APIGroupVersionOverrides: apiGroupVersionOverrides, MasterServiceNamespace: s.MasterServiceNamespace, MasterCount: s.MasterCount, ExternalHost: s.ExternalHost, MinRequestTimeout: s.MinRequestTimeout, ProxyDialer: proxyDialerFn, ProxyTLSClientConfig: proxyTLSClientConfig, ServiceNodePortRange: s.ServiceNodePortRange, KubernetesServiceNodePort: s.KubernetesServiceNodePort, Serializer: api.Codecs, }, EnableCoreControllers: true, EventTTL: s.EventTTL, KubeletClient: kubeletClient, Tunneler: tunneler, } m := master.New(config) m.Run(s.ServerRunOptions) return nil }
// Run runs the specified APIServer. This should never exit. func Run(s *options.APIServer) error { verifyClusterIPFlags(s) // If advertise-address is not specified, use bind-address. If bind-address // is not usable (unset, 0.0.0.0, or loopback), we will use the host's default // interface as valid public addr for master (see: util/net#ValidPublicAddrForMaster) if s.AdvertiseAddress == nil || s.AdvertiseAddress.IsUnspecified() { hostIP, err := utilnet.ChooseBindAddress(s.BindAddress) if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v' . "+ "Try to set the AdvertiseAddress directly or provide a valid BindAddress to fix this.", err) } s.AdvertiseAddress = hostIP } glog.Infof("Will report %v as public IP address.", s.AdvertiseAddress) if len(s.EtcdConfig.ServerList) == 0 { glog.Fatalf("--etcd-servers must be specified") } if s.KubernetesServiceNodePort > 0 && !s.ServiceNodePortRange.Contains(s.KubernetesServiceNodePort) { glog.Fatalf("Kubernetes service port range %v doesn't contain %v", s.ServiceNodePortRange, (s.KubernetesServiceNodePort)) } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. PrivilegedSources: capabilities.PrivilegedSources{ HostNetworkSources: []string{}, HostPIDSources: []string{}, HostIPCSources: []string{}, }, PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec, }) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } // Setup tunneler if needed var tunneler master.Tunneler var proxyDialerFn apiserver.ProxyDialerFunc if len(s.SSHUser) > 0 { // Get ssh key distribution func, if supported var installSSH master.InstallSSHKey if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } if s.KubeletConfig.Port == 0 { glog.Fatalf("Must enable kubelet port if proxy ssh-tunneling is specified.") } // Set up the tunneler // TODO(cjcullen): If we want this to handle per-kubelet ports or other // kubelet listen-addresses, we need to plumb through options. healthCheckPath := &url.URL{ Scheme: "https", Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)), Path: "healthz", } tunneler = master.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH) // Use the tunneler's dialer to connect to the kubelet s.KubeletConfig.Dial = tunneler.Dial // Use the tunneler's dialer when proxying to pods, services, and nodes proxyDialerFn = tunneler.Dial } // Proxying to pods and services is IP-based... don't expect to be able to verify the hostname proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true} kubeletClient, err := kubeletclient.NewStaticKubeletClient(&s.KubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } apiResourceConfigSource, err := parseRuntimeConfig(s) if err != nil { glog.Fatalf("error in parsing runtime-config: %s", err) } clientConfig := &restclient.Config{ Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), // Increase QPS limits. The client is currently passed to all admission plugins, // and those can be throttled in case of higher load on apiserver - see #22340 and #22422 // for more details. Once #22422 is fixed, we may want to remove it. QPS: 50, Burst: 100, } if len(s.DeprecatedStorageVersion) != 0 { gv, err := unversioned.ParseGroupVersion(s.DeprecatedStorageVersion) if err != nil { glog.Fatalf("error in parsing group version: %s", err) } clientConfig.GroupVersion = &gv } client, err := clientset.NewForConfig(clientConfig) if err != nil { glog.Errorf("Failed to create clientset: %v", err) } legacyV1Group, err := registered.Group(api.GroupName) if err != nil { return err } storageDestinations := genericapiserver.NewStorageDestinations() storageVersions := s.StorageGroupsToGroupVersions() if _, found := storageVersions[legacyV1Group.GroupVersion.Group]; !found { glog.Fatalf("Couldn't find the storage version for group: %q in storageVersions: %v", legacyV1Group.GroupVersion.Group, storageVersions) } etcdStorage, err := newEtcd(api.Codecs, storageVersions[legacyV1Group.GroupVersion.Group], "/__internal", s.EtcdConfig) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } storageDestinations.AddAPIGroup("", etcdStorage) if apiResourceConfigSource.AnyResourcesForVersionEnabled(extensionsapiv1beta1.SchemeGroupVersion) { glog.Infof("Configuring extensions/v1beta1 storage destination") expGroup, err := registered.Group(extensions.GroupName) if err != nil { glog.Fatalf("Extensions API is enabled in runtime config, but not enabled in the environment variable KUBE_API_VERSIONS. Error: %v", err) } if _, found := storageVersions[expGroup.GroupVersion.Group]; !found { glog.Fatalf("Couldn't find the storage version for group: %q in storageVersions: %v", expGroup.GroupVersion.Group, storageVersions) } expEtcdStorage, err := newEtcd(api.Codecs, storageVersions[expGroup.GroupVersion.Group], "extensions/__internal", s.EtcdConfig) if err != nil { glog.Fatalf("Invalid extensions storage version or misconfigured etcd: %v", err) } storageDestinations.AddAPIGroup(extensions.GroupName, expEtcdStorage) // Since HPA has been moved to the autoscaling group, we need to make // sure autoscaling has a storage destination. If the autoscaling group // itself is on, it will overwrite this decision below. storageDestinations.AddAPIGroup(autoscaling.GroupName, expEtcdStorage) // Since Job has been moved to the batch group, we need to make // sure batch has a storage destination. If the batch group // itself is on, it will overwrite this decision below. storageDestinations.AddAPIGroup(batch.GroupName, expEtcdStorage) } // autoscaling/v1/horizontalpodautoscalers is a move from extensions/v1beta1/horizontalpodautoscalers. // The storage version needs to be either extensions/v1beta1 or autoscaling/v1. // Users must roll forward while using 1.2, because we will require the latter for 1.3. if apiResourceConfigSource.AnyResourcesForVersionEnabled(autoscalingapiv1.SchemeGroupVersion) { glog.Infof("Configuring autoscaling/v1 storage destination") autoscalingGroup, err := registered.Group(autoscaling.GroupName) if err != nil { glog.Fatalf("Autoscaling API is enabled in runtime config, but not enabled in the environment variable KUBE_API_VERSIONS. Error: %v", err) } // Figure out what storage group/version we should use. storageGroupVersion, found := storageVersions[autoscalingGroup.GroupVersion.Group] if !found { glog.Fatalf("Couldn't find the storage version for group: %q in storageVersions: %v", autoscalingGroup.GroupVersion.Group, storageVersions) } if storageGroupVersion != "autoscaling/v1" && storageGroupVersion != "extensions/v1beta1" { glog.Fatalf("The storage version for autoscaling must be either 'autoscaling/v1' or 'extensions/v1beta1'") } glog.Infof("Using %v for autoscaling group storage version", storageGroupVersion) autoscalingEtcdStorage, err := newEtcd(api.Codecs, storageGroupVersion, "extensions/__internal", s.EtcdConfig) if err != nil { glog.Fatalf("Invalid extensions storage version or misconfigured etcd: %v", err) } storageDestinations.AddAPIGroup(autoscaling.GroupName, autoscalingEtcdStorage) } // batch/v1/job is a move from extensions/v1beta1/job. The storage // version needs to be either extensions/v1beta1 or batch/v1. Users // must roll forward while using 1.2, because we will require the // latter for 1.3. if apiResourceConfigSource.AnyResourcesForVersionEnabled(batchapiv1.SchemeGroupVersion) { glog.Infof("Configuring batch/v1 storage destination") batchGroup, err := registered.Group(batch.GroupName) if err != nil { glog.Fatalf("Batch API is enabled in runtime config, but not enabled in the environment variable KUBE_API_VERSIONS. Error: %v", err) } // Figure out what storage group/version we should use. storageGroupVersion, found := storageVersions[batchGroup.GroupVersion.Group] if !found { glog.Fatalf("Couldn't find the storage version for group: %q in storageVersions: %v", batchGroup.GroupVersion.Group, storageVersions) } if storageGroupVersion != "batch/v1" && storageGroupVersion != "extensions/v1beta1" { glog.Fatalf("The storage version for batch must be either 'batch/v1' or 'extensions/v1beta1'") } glog.Infof("Using %v for batch group storage version", storageGroupVersion) batchEtcdStorage, err := newEtcd(api.Codecs, storageGroupVersion, "extensions/__internal", s.EtcdConfig) if err != nil { glog.Fatalf("Invalid extensions storage version or misconfigured etcd: %v", err) } storageDestinations.AddAPIGroup(batch.GroupName, batchEtcdStorage) } updateEtcdOverrides(s.EtcdServersOverrides, storageVersions, s.EtcdConfig, &storageDestinations, newEtcd) n := s.ServiceClusterIPRange // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { if authenticator.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) { s.ServiceAccountKeyFile = s.TLSPrivateKeyFile } else { glog.Warning("No RSA key provided, service account token authentication disabled") } } var serviceAccountGetter serviceaccount.ServiceAccountTokenGetter if s.ServiceAccountLookup { // If we need to look up service accounts and tokens, // go directly to etcd to avoid recursive auth insanity serviceAccountGetter = serviceaccountcontroller.NewGetterFromStorageInterface(etcdStorage) } authenticator, err := authenticator.New(authenticator.AuthenticatorConfig{ BasicAuthFile: s.BasicAuthFile, ClientCAFile: s.ClientCAFile, TokenAuthFile: s.TokenAuthFile, OIDCIssuerURL: s.OIDCIssuerURL, OIDCClientID: s.OIDCClientID, OIDCCAFile: s.OIDCCAFile, OIDCUsernameClaim: s.OIDCUsernameClaim, OIDCGroupsClaim: s.OIDCGroupsClaim, ServiceAccountKeyFile: s.ServiceAccountKeyFile, ServiceAccountLookup: s.ServiceAccountLookup, ServiceAccountTokenGetter: serviceAccountGetter, KeystoneURL: s.KeystoneURL, }) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizationModeNames := strings.Split(s.AuthorizationMode, ",") authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, s.AuthorizationConfig) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile) if len(s.ExternalHost) == 0 { // TODO: extend for other providers if s.CloudProvider == "gce" { instances, supported := cloud.Instances() if !supported { glog.Fatalf("GCE cloud provider has no instances. this shouldn't happen. exiting.") } name, err := os.Hostname() if err != nil { glog.Fatalf("Failed to get hostname: %v", err) } addrs, err := instances.NodeAddresses(name) if err != nil { glog.Warningf("Unable to obtain external host address from cloud provider: %v", err) } else { for _, addr := range addrs { if addr.Type == api.NodeExternalIP { s.ExternalHost = addr.Address } } } } } config := &master.Config{ Config: &genericapiserver.Config{ StorageDestinations: storageDestinations, StorageVersions: storageVersions, ServiceClusterIPRange: &n, EnableLogsSupport: s.EnableLogsSupport, EnableUISupport: true, EnableSwaggerSupport: true, EnableSwaggerUI: s.EnableSwaggerUI, EnableProfiling: s.EnableProfiling, EnableWatchCache: s.EnableWatchCache, EnableIndex: true, APIPrefix: s.APIPrefix, APIGroupPrefix: s.APIGroupPrefix, CorsAllowedOriginList: s.CorsAllowedOriginList, ReadWritePort: s.SecurePort, PublicAddress: s.AdvertiseAddress, Authenticator: authenticator, SupportsBasicAuth: len(s.BasicAuthFile) > 0, Authorizer: authorizer, AdmissionControl: admissionController, APIResourceConfigSource: apiResourceConfigSource, MasterServiceNamespace: s.MasterServiceNamespace, MasterCount: s.MasterCount, ExternalHost: s.ExternalHost, MinRequestTimeout: s.MinRequestTimeout, ProxyDialer: proxyDialerFn, ProxyTLSClientConfig: proxyTLSClientConfig, ServiceNodePortRange: s.ServiceNodePortRange, KubernetesServiceNodePort: s.KubernetesServiceNodePort, Serializer: api.Codecs, }, EnableCoreControllers: true, DeleteCollectionWorkers: s.DeleteCollectionWorkers, EventTTL: s.EventTTL, KubeletClient: kubeletClient, Tunneler: tunneler, } if s.EnableWatchCache { cachesize.SetWatchCacheSizes(s.WatchCacheSizes) } m, err := master.New(config) if err != nil { return err } m.Run(s.ServerRunOptions) return nil }
// Run runs the specified APIServer. This should never exit. func Run(s *options.APIServer) error { genericvalidation.VerifyEtcdServersList(s.ServerRunOptions) genericapiserver.DefaultAndValidateRunOptions(s.ServerRunOptions) capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. PrivilegedSources: capabilities.PrivilegedSources{ HostNetworkSources: []string{}, HostPIDSources: []string{}, HostIPCSources: []string{}, }, PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec, }) // Setup tunneler if needed var tunneler genericapiserver.Tunneler var proxyDialerFn apiserver.ProxyDialerFunc if len(s.SSHUser) > 0 { // Get ssh key distribution func, if supported var installSSH genericapiserver.InstallSSHKey cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } if s.KubeletConfig.Port == 0 { glog.Fatalf("Must enable kubelet port if proxy ssh-tunneling is specified.") } // Set up the tunneler // TODO(cjcullen): If we want this to handle per-kubelet ports or other // kubelet listen-addresses, we need to plumb through options. healthCheckPath := &url.URL{ Scheme: "https", Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)), Path: "healthz", } tunneler = genericapiserver.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH) // Use the tunneler's dialer to connect to the kubelet s.KubeletConfig.Dial = tunneler.Dial // Use the tunneler's dialer when proxying to pods, services, and nodes proxyDialerFn = tunneler.Dial } // Proxying to pods and services is IP-based... don't expect to be able to verify the hostname proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true} kubeletClient, err := kubeletclient.NewStaticKubeletClient(&s.KubeletConfig) if err != nil { glog.Fatalf("Failed to start kubelet client: %v", err) } storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion() if err != nil { glog.Fatalf("error generating storage version map: %s", err) } storageFactory, err := genericapiserver.BuildDefaultStorageFactory( s.StorageConfig, s.DefaultStorageMediaType, api.Codecs, genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion, // FIXME: this GroupVersionResource override should be configurable []unversioned.GroupVersionResource{batch.Resource("scheduledjobs").WithVersion("v2alpha1")}, master.DefaultAPIResourceConfigSource(), s.RuntimeConfig) if err != nil { glog.Fatalf("error in initializing storage factory: %s", err) } storageFactory.AddCohabitatingResources(batch.Resource("jobs"), extensions.Resource("jobs")) storageFactory.AddCohabitatingResources(autoscaling.Resource("horizontalpodautoscalers"), extensions.Resource("horizontalpodautoscalers")) for _, override := range s.EtcdServersOverrides { tokens := strings.Split(override, "#") if len(tokens) != 2 { glog.Errorf("invalid value of etcd server overrides: %s", override) continue } apiresource := strings.Split(tokens[0], "/") if len(apiresource) != 2 { glog.Errorf("invalid resource definition: %s", tokens[0]) continue } group := apiresource[0] resource := apiresource[1] groupResource := unversioned.GroupResource{Group: group, Resource: resource} servers := strings.Split(tokens[1], ";") storageFactory.SetEtcdLocation(groupResource, servers) } // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { if authenticator.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) { s.ServiceAccountKeyFile = s.TLSPrivateKeyFile } else { glog.Warning("No RSA key provided, service account token authentication disabled") } } var serviceAccountGetter serviceaccount.ServiceAccountTokenGetter if s.ServiceAccountLookup { // If we need to look up service accounts and tokens, // go directly to etcd to avoid recursive auth insanity storageConfig, err := storageFactory.NewConfig(api.Resource("serviceaccounts")) if err != nil { glog.Fatalf("Unable to get serviceaccounts storage: %v", err) } serviceAccountGetter = serviceaccountcontroller.NewGetterFromStorageInterface(storageConfig, storageFactory.ResourcePrefix(api.Resource("serviceaccounts")), storageFactory.ResourcePrefix(api.Resource("secrets"))) } apiAuthenticator, err := authenticator.New(authenticator.AuthenticatorConfig{ Anonymous: s.AnonymousAuth, AnyToken: s.EnableAnyToken, BasicAuthFile: s.BasicAuthFile, ClientCAFile: s.ClientCAFile, TokenAuthFile: s.TokenAuthFile, OIDCIssuerURL: s.OIDCIssuerURL, OIDCClientID: s.OIDCClientID, OIDCCAFile: s.OIDCCAFile, OIDCUsernameClaim: s.OIDCUsernameClaim, OIDCGroupsClaim: s.OIDCGroupsClaim, ServiceAccountKeyFile: s.ServiceAccountKeyFile, ServiceAccountLookup: s.ServiceAccountLookup, ServiceAccountTokenGetter: serviceAccountGetter, KeystoneURL: s.KeystoneURL, WebhookTokenAuthnConfigFile: s.WebhookTokenAuthnConfigFile, WebhookTokenAuthnCacheTTL: s.WebhookTokenAuthnCacheTTL, }) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizationModeNames := strings.Split(s.AuthorizationMode, ",") modeEnabled := func(mode string) bool { for _, m := range authorizationModeNames { if m == mode { return true } } return false } authorizationConfig := authorizer.AuthorizationConfig{ PolicyFile: s.AuthorizationPolicyFile, WebhookConfigFile: s.AuthorizationWebhookConfigFile, WebhookCacheAuthorizedTTL: s.AuthorizationWebhookCacheAuthorizedTTL, WebhookCacheUnauthorizedTTL: s.AuthorizationWebhookCacheUnauthorizedTTL, RBACSuperUser: s.AuthorizationRBACSuperUser, } if modeEnabled(genericoptions.ModeRBAC) { mustGetRESTOptions := func(resource string) generic.RESTOptions { config, err := storageFactory.NewConfig(rbac.Resource(resource)) if err != nil { glog.Fatalf("Unable to get %s storage: %v", resource, err) } return generic.RESTOptions{StorageConfig: config, Decorator: generic.UndecoratedStorage, ResourcePrefix: storageFactory.ResourcePrefix(rbac.Resource(resource))} } // For initial bootstrapping go directly to etcd to avoid privillege escalation check. authorizationConfig.RBACRoleRegistry = role.NewRegistry(roleetcd.NewREST(mustGetRESTOptions("roles"))) authorizationConfig.RBACRoleBindingRegistry = rolebinding.NewRegistry(rolebindingetcd.NewREST(mustGetRESTOptions("rolebindings"))) authorizationConfig.RBACClusterRoleRegistry = clusterrole.NewRegistry(clusterroleetcd.NewREST(mustGetRESTOptions("clusterroles"))) authorizationConfig.RBACClusterRoleBindingRegistry = clusterrolebinding.NewRegistry(clusterrolebindingetcd.NewREST(mustGetRESTOptions("clusterrolebindings"))) } apiAuthorizer, err := authorizer.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, authorizationConfig) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") privilegedLoopbackToken := uuid.NewRandom().String() client, err := s.NewSelfClient(privilegedLoopbackToken) if err != nil { glog.Errorf("Failed to create clientset: %v", err) } // TODO(dims): We probably need to add an option "EnableLoopbackToken" if apiAuthenticator != nil { var uid = uuid.NewRandom().String() tokens := make(map[string]*user.DefaultInfo) tokens[privilegedLoopbackToken] = &user.DefaultInfo{ Name: "system:apiserver", UID: uid, Groups: []string{"system:masters"}, } tokenAuthenticator := authenticator.NewAuthenticatorFromTokens(tokens) apiAuthenticator = authenticatorunion.New(tokenAuthenticator, apiAuthenticator) tokenAuthorizer := authorizer.NewPrivilegedGroups("system:masters") apiAuthorizer = authorizerunion.New(tokenAuthorizer, apiAuthorizer) } sharedInformers := informers.NewSharedInformerFactory(client, 10*time.Minute) pluginInitializer := admission.NewPluginInitializer(sharedInformers) admissionController, err := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile, pluginInitializer) if err != nil { glog.Fatalf("Failed to initialize plugins: %v", err) } genericConfig := genericapiserver.NewConfig(s.ServerRunOptions) // TODO: Move the following to generic api server as well. genericConfig.Authenticator = apiAuthenticator genericConfig.SupportsBasicAuth = len(s.BasicAuthFile) > 0 genericConfig.Authorizer = apiAuthorizer genericConfig.AuthorizerRBACSuperUser = s.AuthorizationRBACSuperUser genericConfig.AdmissionControl = admissionController genericConfig.APIResourceConfigSource = storageFactory.APIResourceConfigSource genericConfig.MasterServiceNamespace = s.MasterServiceNamespace genericConfig.ProxyDialer = proxyDialerFn genericConfig.ProxyTLSClientConfig = proxyTLSClientConfig genericConfig.Serializer = api.Codecs genericConfig.OpenAPIInfo.Title = "Kubernetes" genericConfig.OpenAPIDefinitions = openapi.OpenAPIDefinitions genericConfig.EnableOpenAPISupport = true config := &master.Config{ GenericConfig: genericConfig, StorageFactory: storageFactory, EnableWatchCache: s.EnableWatchCache, EnableCoreControllers: true, DeleteCollectionWorkers: s.DeleteCollectionWorkers, EventTTL: s.EventTTL, KubeletClient: kubeletClient, EnableUISupport: true, EnableLogsSupport: true, Tunneler: tunneler, } if s.EnableWatchCache { glog.V(2).Infof("Initalizing cache sizes based on %dMB limit", s.TargetRAMMB) cachesize.InitializeWatchCacheSizes(s.TargetRAMMB) cachesize.SetWatchCacheSizes(s.WatchCacheSizes) } m, err := config.Complete().New() if err != nil { return err } sharedInformers.Start(wait.NeverStop) m.Run(s.ServerRunOptions) return nil }
// Run runs the specified APIServer. This should never exit. func Run(s *options.APIServer) error { verifyClusterIPFlags(s) // If advertise-address is not specified, use bind-address. If bind-address // is not usable (unset, 0.0.0.0, or loopback), we will use the host's default // interface as valid public addr for master (see: util#ValidPublicAddrForMaster) if s.AdvertiseAddress == nil || s.AdvertiseAddress.IsUnspecified() { hostIP, err := util.ValidPublicAddrForMaster(s.BindAddress) if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v' . "+ "Try to set the AdvertiseAddress directly or provide a valid BindAddress to fix this.", err) } s.AdvertiseAddress = hostIP } glog.Infof("Will report %v as public IP address.", s.AdvertiseAddress) if len(s.EtcdServerList) == 0 { glog.Fatalf("--etcd-servers must be specified") } if s.KubernetesServiceNodePort > 0 && !s.ServiceNodePortRange.Contains(s.KubernetesServiceNodePort) { glog.Fatalf("Kubernetes service port range %v doesn't contain %v", s.ServiceNodePortRange, (s.KubernetesServiceNodePort)) } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. PrivilegedSources: capabilities.PrivilegedSources{ HostNetworkSources: []string{}, HostPIDSources: []string{}, HostIPCSources: []string{}, }, PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec, }) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } // Setup tunneler if needed var tunneler master.Tunneler var proxyDialerFn apiserver.ProxyDialerFunc if len(s.SSHUser) > 0 { // Get ssh key distribution func, if supported var installSSH master.InstallSSHKey if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } // Set up the tunneler tunneler = master.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, installSSH) // Use the tunneler's dialer to connect to the kubelet s.KubeletConfig.Dial = tunneler.Dial // Use the tunneler's dialer when proxying to pods, services, and nodes proxyDialerFn = tunneler.Dial } // Proxying to pods and services is IP-based... don't expect to be able to verify the hostname proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true} kubeletClient, err := kubeletclient.NewStaticKubeletClient(&s.KubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } apiGroupVersionOverrides, err := parseRuntimeConfig(s) if err != nil { glog.Fatalf("error in parsing runtime-config: %s", err) } clientConfig := &client.Config{ Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), } if len(s.DeprecatedStorageVersion) != 0 { gv, err := unversioned.ParseGroupVersion(s.DeprecatedStorageVersion) if err != nil { glog.Fatalf("error in parsing group version: %s", err) } clientConfig.GroupVersion = &gv } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } legacyV1Group, err := latest.Group(api.GroupName) if err != nil { return err } storageDestinations := genericapiserver.NewStorageDestinations() storageVersions := generateStorageVersionMap(s.DeprecatedStorageVersion, s.StorageVersions) if _, found := storageVersions[legacyV1Group.GroupVersion.Group]; !found { glog.Fatalf("Couldn't find the storage version for group: %q in storageVersions: %v", legacyV1Group.GroupVersion.Group, storageVersions) } etcdStorage, err := newEtcd(s.EtcdServerList, legacyV1Group.InterfacesFor, storageVersions[legacyV1Group.GroupVersion.Group], s.EtcdPathPrefix) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } storageDestinations.AddAPIGroup("", etcdStorage) if !apiGroupVersionOverrides["extensions/v1beta1"].Disable { expGroup, err := latest.Group(extensions.GroupName) if err != nil { glog.Fatalf("Extensions API is enabled in runtime config, but not enabled in the environment variable KUBE_API_VERSIONS. Error: %v", err) } if _, found := storageVersions[expGroup.GroupVersion.Group]; !found { glog.Fatalf("Couldn't find the storage version for group: %q in storageVersions: %v", expGroup.GroupVersion.Group, storageVersions) } expEtcdStorage, err := newEtcd(s.EtcdServerList, expGroup.InterfacesFor, storageVersions[expGroup.GroupVersion.Group], s.EtcdPathPrefix) if err != nil { glog.Fatalf("Invalid extensions storage version or misconfigured etcd: %v", err) } storageDestinations.AddAPIGroup(extensions.GroupName, expEtcdStorage) } updateEtcdOverrides(s.EtcdServersOverrides, storageVersions, s.EtcdPathPrefix, &storageDestinations, newEtcd) n := s.ServiceClusterIPRange // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { if authenticator.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) { s.ServiceAccountKeyFile = s.TLSPrivateKeyFile } else { glog.Warning("No RSA key provided, service account token authentication disabled") } } var serviceAccountGetter serviceaccount.ServiceAccountTokenGetter if s.ServiceAccountLookup { // If we need to look up service accounts and tokens, // go directly to etcd to avoid recursive auth insanity serviceAccountGetter = serviceaccountcontroller.NewGetterFromStorageInterface(etcdStorage) } authenticator, err := authenticator.New(authenticator.AuthenticatorConfig{ BasicAuthFile: s.BasicAuthFile, ClientCAFile: s.ClientCAFile, TokenAuthFile: s.TokenAuthFile, OIDCIssuerURL: s.OIDCIssuerURL, OIDCClientID: s.OIDCClientID, OIDCCAFile: s.OIDCCAFile, OIDCUsernameClaim: s.OIDCUsernameClaim, ServiceAccountKeyFile: s.ServiceAccountKeyFile, ServiceAccountLookup: s.ServiceAccountLookup, ServiceAccountTokenGetter: serviceAccountGetter, KeystoneURL: s.KeystoneURL, }) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizationModeNames := strings.Split(s.AuthorizationMode, ",") authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, s.AuthorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile) if len(s.ExternalHost) == 0 { // TODO: extend for other providers if s.CloudProvider == "gce" { instances, supported := cloud.Instances() if !supported { glog.Fatalf("GCE cloud provider has no instances. this shouldn't happen. exiting.") } name, err := os.Hostname() if err != nil { glog.Fatalf("Failed to get hostname: %v", err) } addrs, err := instances.NodeAddresses(name) if err != nil { glog.Warningf("Unable to obtain external host address from cloud provider: %v", err) } else { for _, addr := range addrs { if addr.Type == api.NodeExternalIP { s.ExternalHost = addr.Address } } } } } config := &master.Config{ Config: &genericapiserver.Config{ StorageDestinations: storageDestinations, StorageVersions: storageVersions, ServiceClusterIPRange: &n, EnableLogsSupport: s.EnableLogsSupport, EnableUISupport: true, EnableSwaggerSupport: true, EnableProfiling: s.EnableProfiling, EnableWatchCache: s.EnableWatchCache, EnableIndex: true, APIPrefix: s.APIPrefix, APIGroupPrefix: s.APIGroupPrefix, CorsAllowedOriginList: s.CorsAllowedOriginList, ReadWritePort: s.SecurePort, PublicAddress: s.AdvertiseAddress, Authenticator: authenticator, SupportsBasicAuth: len(s.BasicAuthFile) > 0, Authorizer: authorizer, AdmissionControl: admissionController, APIGroupVersionOverrides: apiGroupVersionOverrides, MasterServiceNamespace: s.MasterServiceNamespace, MasterCount: s.MasterCount, ExternalHost: s.ExternalHost, MinRequestTimeout: s.MinRequestTimeout, ProxyDialer: proxyDialerFn, ProxyTLSClientConfig: proxyTLSClientConfig, ServiceNodePortRange: s.ServiceNodePortRange, KubernetesServiceNodePort: s.KubernetesServiceNodePort, }, EnableCoreControllers: true, EventTTL: s.EventTTL, KubeletClient: kubeletClient, Tunneler: tunneler, } m := master.New(config) // We serve on 2 ports. See docs/accessing_the_api.md secureLocation := "" if s.SecurePort != 0 { secureLocation = net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.SecurePort)) } insecureLocation := net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)) // See the flag commentary to understand our assumptions when opening the read-only and read-write ports. var sem chan bool if s.MaxRequestsInFlight > 0 { sem = make(chan bool, s.MaxRequestsInFlight) } longRunningRE := regexp.MustCompile(s.LongRunningRequestRE) longRunningTimeout := func(req *http.Request) (<-chan time.Time, string) { // TODO unify this with apiserver.MaxInFlightLimit if longRunningRE.MatchString(req.URL.Path) || req.URL.Query().Get("watch") == "true" { return nil, "" } return time.After(time.Minute), "" } if secureLocation != "" { handler := apiserver.TimeoutHandler(m.Handler, longRunningTimeout) secureServer := &http.Server{ Addr: secureLocation, Handler: apiserver.MaxInFlightLimit(sem, longRunningRE, apiserver.RecoverPanics(handler)), MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, }, } if len(s.ClientCAFile) > 0 { clientCAs, err := util.CertPoolFromFile(s.ClientCAFile) if err != nil { glog.Fatalf("Unable to load client CA file: %v", err) } // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types secureServer.TLSConfig.ClientAuth = tls.RequestClientCert // Specify allowed CAs for client certificates secureServer.TLSConfig.ClientCAs = clientCAs } glog.Infof("Serving securely on %s", secureLocation) if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" { s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt") s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key") // TODO (cjcullen): Is PublicAddress the right address to sign a cert with? alternateIPs := []net.IP{config.ServiceReadWriteIP} alternateDNS := []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"} // It would be nice to set a fqdn subject alt name, but only the kubelets know, the apiserver is clueless // alternateDNS = append(alternateDNS, "kubernetes.default.svc.CLUSTER.DNS.NAME") if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile, alternateIPs, alternateDNS); err != nil { glog.Errorf("Unable to generate self signed cert: %v", err) } else { glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile) } } go func() { defer util.HandleCrash() for { // err == systemd.SdNotifyNoSocket when not running on a systemd system if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket { glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) } if err := secureServer.ListenAndServeTLS(s.TLSCertFile, s.TLSPrivateKeyFile); err != nil { glog.Errorf("Unable to listen for secure (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } handler := apiserver.TimeoutHandler(m.InsecureHandler, longRunningTimeout) http := &http.Server{ Addr: insecureLocation, Handler: apiserver.RecoverPanics(handler), MaxHeaderBytes: 1 << 20, } if secureLocation == "" { // err == systemd.SdNotifyNoSocket when not running on a systemd system if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket { glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) } } glog.Infof("Serving insecurely on %s", insecureLocation) glog.Fatal(http.ListenAndServe()) return nil }
// Run runs the specified APIServer. This should never exit. func Run(s *options.ServerRunOptions) error { genericvalidation.VerifyEtcdServersList(s.GenericServerRunOptions) genericapiserver.DefaultAndValidateRunOptions(s.GenericServerRunOptions) genericConfig := genericapiserver.NewConfig(). // create the new config ApplyOptions(s.GenericServerRunOptions). // apply the options selected Complete() // set default values based on the known values serviceIPRange, apiServerServiceIP, err := genericapiserver.DefaultServiceIPRange(s.GenericServerRunOptions.ServiceClusterIPRange) if err != nil { glog.Fatalf("Error determining service IP ranges: %v", err) } if err := genericConfig.MaybeGenerateServingCerts(apiServerServiceIP); err != nil { glog.Fatalf("Failed to generate service certificate: %v", err) } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. PrivilegedSources: capabilities.PrivilegedSources{ HostNetworkSources: []string{}, HostPIDSources: []string{}, HostIPCSources: []string{}, }, PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec, }) // Setup tunneler if needed var tunneler genericapiserver.Tunneler var proxyDialerFn apiserver.ProxyDialerFunc if len(s.SSHUser) > 0 { // Get ssh key distribution func, if supported var installSSH genericapiserver.InstallSSHKey cloud, err := cloudprovider.InitCloudProvider(s.GenericServerRunOptions.CloudProvider, s.GenericServerRunOptions.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } if s.KubeletConfig.Port == 0 { glog.Fatalf("Must enable kubelet port if proxy ssh-tunneling is specified.") } // Set up the tunneler // TODO(cjcullen): If we want this to handle per-kubelet ports or other // kubelet listen-addresses, we need to plumb through options. healthCheckPath := &url.URL{ Scheme: "https", Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)), Path: "healthz", } tunneler = genericapiserver.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH) // Use the tunneler's dialer to connect to the kubelet s.KubeletConfig.Dial = tunneler.Dial // Use the tunneler's dialer when proxying to pods, services, and nodes proxyDialerFn = tunneler.Dial } // Proxying to pods and services is IP-based... don't expect to be able to verify the hostname proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true} if s.GenericServerRunOptions.StorageConfig.DeserializationCacheSize == 0 { // When size of cache is not explicitly set, estimate its size based on // target memory usage. glog.V(2).Infof("Initalizing deserialization cache size based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB) // This is the heuristics that from memory capacity is trying to infer // the maximum number of nodes in the cluster and set cache sizes based // on that value. // From our documentation, we officially recomment 120GB machines for // 2000 nodes, and we scale from that point. Thus we assume ~60MB of // capacity per node. // TODO: We may consider deciding that some percentage of memory will // be used for the deserialization cache and divide it by the max object // size to compute its size. We may even go further and measure // collective sizes of the objects in the cache. clusterSize := s.GenericServerRunOptions.TargetRAMMB / 60 s.GenericServerRunOptions.StorageConfig.DeserializationCacheSize = 25 * clusterSize if s.GenericServerRunOptions.StorageConfig.DeserializationCacheSize < 1000 { s.GenericServerRunOptions.StorageConfig.DeserializationCacheSize = 1000 } } storageGroupsToEncodingVersion, err := s.GenericServerRunOptions.StorageGroupsToEncodingVersion() if err != nil { glog.Fatalf("error generating storage version map: %s", err) } storageFactory, err := genericapiserver.BuildDefaultStorageFactory( s.GenericServerRunOptions.StorageConfig, s.GenericServerRunOptions.DefaultStorageMediaType, api.Codecs, genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion, // FIXME: this GroupVersionResource override should be configurable []schema.GroupVersionResource{batch.Resource("cronjobs").WithVersion("v2alpha1")}, master.DefaultAPIResourceConfigSource(), s.GenericServerRunOptions.RuntimeConfig) if err != nil { glog.Fatalf("error in initializing storage factory: %s", err) } storageFactory.AddCohabitatingResources(batch.Resource("jobs"), extensions.Resource("jobs")) storageFactory.AddCohabitatingResources(autoscaling.Resource("horizontalpodautoscalers"), extensions.Resource("horizontalpodautoscalers")) for _, override := range s.GenericServerRunOptions.EtcdServersOverrides { tokens := strings.Split(override, "#") if len(tokens) != 2 { glog.Errorf("invalid value of etcd server overrides: %s", override) continue } apiresource := strings.Split(tokens[0], "/") if len(apiresource) != 2 { glog.Errorf("invalid resource definition: %s", tokens[0]) continue } group := apiresource[0] resource := apiresource[1] groupResource := schema.GroupResource{Group: group, Resource: resource} servers := strings.Split(tokens[1], ";") storageFactory.SetEtcdLocation(groupResource, servers) } // Default to the private server key for service account token signing if len(s.ServiceAccountKeyFiles) == 0 && s.GenericServerRunOptions.TLSPrivateKeyFile != "" { if authenticator.IsValidServiceAccountKeyFile(s.GenericServerRunOptions.TLSPrivateKeyFile) { s.ServiceAccountKeyFiles = []string{s.GenericServerRunOptions.TLSPrivateKeyFile} } else { glog.Warning("No TLS key provided, service account token authentication disabled") } } var serviceAccountGetter serviceaccount.ServiceAccountTokenGetter if s.ServiceAccountLookup { // If we need to look up service accounts and tokens, // go directly to etcd to avoid recursive auth insanity storageConfig, err := storageFactory.NewConfig(api.Resource("serviceaccounts")) if err != nil { glog.Fatalf("Unable to get serviceaccounts storage: %v", err) } serviceAccountGetter = serviceaccountcontroller.NewGetterFromStorageInterface(storageConfig, storageFactory.ResourcePrefix(api.Resource("serviceaccounts")), storageFactory.ResourcePrefix(api.Resource("secrets"))) } apiAuthenticator, securityDefinitions, err := authenticator.New(authenticator.AuthenticatorConfig{ Anonymous: s.GenericServerRunOptions.AnonymousAuth, AnyToken: s.GenericServerRunOptions.EnableAnyToken, BasicAuthFile: s.GenericServerRunOptions.BasicAuthFile, ClientCAFile: s.GenericServerRunOptions.ClientCAFile, TokenAuthFile: s.GenericServerRunOptions.TokenAuthFile, OIDCIssuerURL: s.GenericServerRunOptions.OIDCIssuerURL, OIDCClientID: s.GenericServerRunOptions.OIDCClientID, OIDCCAFile: s.GenericServerRunOptions.OIDCCAFile, OIDCUsernameClaim: s.GenericServerRunOptions.OIDCUsernameClaim, OIDCGroupsClaim: s.GenericServerRunOptions.OIDCGroupsClaim, ServiceAccountKeyFiles: s.ServiceAccountKeyFiles, ServiceAccountLookup: s.ServiceAccountLookup, ServiceAccountTokenGetter: serviceAccountGetter, KeystoneURL: s.GenericServerRunOptions.KeystoneURL, KeystoneCAFile: s.GenericServerRunOptions.KeystoneCAFile, WebhookTokenAuthnConfigFile: s.WebhookTokenAuthnConfigFile, WebhookTokenAuthnCacheTTL: s.WebhookTokenAuthnCacheTTL, RequestHeaderConfig: s.GenericServerRunOptions.AuthenticationRequestHeaderConfig(), }) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } privilegedLoopbackToken := uuid.NewRandom().String() selfClientConfig, err := s.GenericServerRunOptions.NewSelfClientConfig(privilegedLoopbackToken) if err != nil { glog.Fatalf("Failed to create clientset: %v", err) } client, err := s.GenericServerRunOptions.NewSelfClient(privilegedLoopbackToken) if err != nil { glog.Errorf("Failed to create clientset: %v", err) } sharedInformers := informers.NewSharedInformerFactory(nil, client, 10*time.Minute) authorizationConfig := authorizer.AuthorizationConfig{ PolicyFile: s.GenericServerRunOptions.AuthorizationPolicyFile, WebhookConfigFile: s.GenericServerRunOptions.AuthorizationWebhookConfigFile, WebhookCacheAuthorizedTTL: s.GenericServerRunOptions.AuthorizationWebhookCacheAuthorizedTTL, WebhookCacheUnauthorizedTTL: s.GenericServerRunOptions.AuthorizationWebhookCacheUnauthorizedTTL, RBACSuperUser: s.GenericServerRunOptions.AuthorizationRBACSuperUser, InformerFactory: sharedInformers, } authorizationModeNames := strings.Split(s.GenericServerRunOptions.AuthorizationMode, ",") apiAuthorizer, err := authorizer.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, authorizationConfig) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.GenericServerRunOptions.AdmissionControl, ",") // TODO(dims): We probably need to add an option "EnableLoopbackToken" if apiAuthenticator != nil { var uid = uuid.NewRandom().String() tokens := make(map[string]*user.DefaultInfo) tokens[privilegedLoopbackToken] = &user.DefaultInfo{ Name: user.APIServerUser, UID: uid, Groups: []string{user.SystemPrivilegedGroup}, } tokenAuthenticator := authenticator.NewAuthenticatorFromTokens(tokens) apiAuthenticator = authenticatorunion.New(tokenAuthenticator, apiAuthenticator) tokenAuthorizer := authorizer.NewPrivilegedGroups(user.SystemPrivilegedGroup) apiAuthorizer = authorizerunion.New(tokenAuthorizer, apiAuthorizer) } pluginInitializer := admission.NewPluginInitializer(sharedInformers, apiAuthorizer) admissionController, err := admission.NewFromPlugins(client, admissionControlPluginNames, s.GenericServerRunOptions.AdmissionControlConfigFile, pluginInitializer) if err != nil { glog.Fatalf("Failed to initialize plugins: %v", err) } proxyTransport := utilnet.SetTransportDefaults(&http.Transport{ Dial: proxyDialerFn, TLSClientConfig: proxyTLSClientConfig, }) kubeVersion := version.Get() genericConfig.Version = &kubeVersion genericConfig.LoopbackClientConfig = selfClientConfig genericConfig.Authenticator = apiAuthenticator genericConfig.Authorizer = apiAuthorizer genericConfig.AdmissionControl = admissionController genericConfig.APIResourceConfigSource = storageFactory.APIResourceConfigSource genericConfig.OpenAPIConfig.Info.Title = "Kubernetes" genericConfig.OpenAPIConfig.Definitions = generatedopenapi.OpenAPIDefinitions genericConfig.EnableOpenAPISupport = true genericConfig.EnableMetrics = true genericConfig.OpenAPIConfig.SecurityDefinitions = securityDefinitions config := &master.Config{ GenericConfig: genericConfig.Config, StorageFactory: storageFactory, EnableWatchCache: s.GenericServerRunOptions.EnableWatchCache, EnableCoreControllers: true, DeleteCollectionWorkers: s.GenericServerRunOptions.DeleteCollectionWorkers, EventTTL: s.EventTTL, KubeletClientConfig: s.KubeletConfig, EnableUISupport: true, EnableLogsSupport: true, ProxyTransport: proxyTransport, Tunneler: tunneler, ServiceIPRange: serviceIPRange, APIServerServiceIP: apiServerServiceIP, APIServerServicePort: 443, ServiceNodePortRange: s.GenericServerRunOptions.ServiceNodePortRange, KubernetesServiceNodePort: s.GenericServerRunOptions.KubernetesServiceNodePort, MasterCount: s.GenericServerRunOptions.MasterCount, } if s.GenericServerRunOptions.EnableWatchCache { glog.V(2).Infof("Initalizing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB) cachesize.InitializeWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) cachesize.SetWatchCacheSizes(s.GenericServerRunOptions.WatchCacheSizes) } m, err := config.Complete().New() if err != nil { return err } sharedInformers.Start(wait.NeverStop) m.GenericAPIServer.PrepareRun().Run(wait.NeverStop) return nil }
// Run runs the specified APIServer. This should never exit. func Run(s *options.APIServer) error { genericapiserver.DefaultAndValidateRunOptions(s.ServerRunOptions) if len(s.StorageConfig.ServerList) == 0 { glog.Fatalf("--etcd-servers must be specified") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. PrivilegedSources: capabilities.PrivilegedSources{ HostNetworkSources: []string{}, HostPIDSources: []string{}, HostIPCSources: []string{}, }, PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec, }) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } // Setup tunneler if needed var tunneler genericapiserver.Tunneler var proxyDialerFn apiserver.ProxyDialerFunc if len(s.SSHUser) > 0 { // Get ssh key distribution func, if supported var installSSH genericapiserver.InstallSSHKey if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } if s.KubeletConfig.Port == 0 { glog.Fatalf("Must enable kubelet port if proxy ssh-tunneling is specified.") } // Set up the tunneler // TODO(cjcullen): If we want this to handle per-kubelet ports or other // kubelet listen-addresses, we need to plumb through options. healthCheckPath := &url.URL{ Scheme: "https", Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)), Path: "healthz", } tunneler = genericapiserver.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH) // Use the tunneler's dialer to connect to the kubelet s.KubeletConfig.Dial = tunneler.Dial // Use the tunneler's dialer when proxying to pods, services, and nodes proxyDialerFn = tunneler.Dial } // Proxying to pods and services is IP-based... don't expect to be able to verify the hostname proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true} kubeletClient, err := kubeletclient.NewStaticKubeletClient(&s.KubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } apiResourceConfigSource, err := parseRuntimeConfig(s) if err != nil { glog.Fatalf("error in parsing runtime-config: %s", err) } clientConfig := &restclient.Config{ Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), // Increase QPS limits. The client is currently passed to all admission plugins, // and those can be throttled in case of higher load on apiserver - see #22340 and #22422 // for more details. Once #22422 is fixed, we may want to remove it. QPS: 50, Burst: 100, } if len(s.DeprecatedStorageVersion) != 0 { gv, err := unversioned.ParseGroupVersion(s.DeprecatedStorageVersion) if err != nil { glog.Fatalf("error in parsing group version: %s", err) } clientConfig.GroupVersion = &gv } client, err := clientset.NewForConfig(clientConfig) if err != nil { glog.Errorf("Failed to create clientset: %v", err) } resourceEncoding := genericapiserver.NewDefaultResourceEncodingConfig() groupToEncoding, err := s.StorageGroupsToEncodingVersion() if err != nil { glog.Fatalf("error getting group encoding: %s", err) } for group, storageEncodingVersion := range groupToEncoding { resourceEncoding.SetVersionEncoding(group, storageEncodingVersion, unversioned.GroupVersion{Group: group, Version: runtime.APIVersionInternal}) } storageFactory := genericapiserver.NewDefaultStorageFactory(s.StorageConfig, api.Codecs, resourceEncoding, apiResourceConfigSource) storageFactory.AddCohabitatingResources(batch.Resource("jobs"), extensions.Resource("jobs")) storageFactory.AddCohabitatingResources(autoscaling.Resource("horizontalpodautoscalers"), extensions.Resource("horizontalpodautoscalers")) for _, override := range s.EtcdServersOverrides { tokens := strings.Split(override, "#") if len(tokens) != 2 { glog.Errorf("invalid value of etcd server overrides: %s", override) continue } apiresource := strings.Split(tokens[0], "/") if len(apiresource) != 2 { glog.Errorf("invalid resource definition: %s", tokens[0]) continue } group := apiresource[0] resource := apiresource[1] groupResource := unversioned.GroupResource{Group: group, Resource: resource} servers := strings.Split(tokens[1], ";") storageFactory.SetEtcdLocation(groupResource, servers) } // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { if authenticator.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) { s.ServiceAccountKeyFile = s.TLSPrivateKeyFile } else { glog.Warning("No RSA key provided, service account token authentication disabled") } } var serviceAccountGetter serviceaccount.ServiceAccountTokenGetter if s.ServiceAccountLookup { // If we need to look up service accounts and tokens, // go directly to etcd to avoid recursive auth insanity storage, err := storageFactory.New(api.Resource("serviceaccounts")) if err != nil { glog.Fatalf("Unable to get serviceaccounts storage: %v", err) } serviceAccountGetter = serviceaccountcontroller.NewGetterFromStorageInterface(storage) } authenticator, err := authenticator.New(authenticator.AuthenticatorConfig{ BasicAuthFile: s.BasicAuthFile, ClientCAFile: s.ClientCAFile, TokenAuthFile: s.TokenAuthFile, OIDCIssuerURL: s.OIDCIssuerURL, OIDCClientID: s.OIDCClientID, OIDCCAFile: s.OIDCCAFile, OIDCUsernameClaim: s.OIDCUsernameClaim, OIDCGroupsClaim: s.OIDCGroupsClaim, ServiceAccountKeyFile: s.ServiceAccountKeyFile, ServiceAccountLookup: s.ServiceAccountLookup, ServiceAccountTokenGetter: serviceAccountGetter, KeystoneURL: s.KeystoneURL, }) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizationModeNames := strings.Split(s.AuthorizationMode, ",") authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, s.AuthorizationConfig) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile) if len(s.ExternalHost) == 0 { // TODO: extend for other providers if s.CloudProvider == "gce" { instances, supported := cloud.Instances() if !supported { glog.Fatalf("GCE cloud provider has no instances. this shouldn't happen. exiting.") } name, err := os.Hostname() if err != nil { glog.Fatalf("Failed to get hostname: %v", err) } addrs, err := instances.NodeAddresses(name) if err != nil { glog.Warningf("Unable to obtain external host address from cloud provider: %v", err) } else { for _, addr := range addrs { if addr.Type == api.NodeExternalIP { s.ExternalHost = addr.Address } } } } } genericConfig := genericapiserver.NewConfig(s.ServerRunOptions) // TODO: Move the following to generic api server as well. genericConfig.StorageFactory = storageFactory genericConfig.Authenticator = authenticator genericConfig.SupportsBasicAuth = len(s.BasicAuthFile) > 0 genericConfig.Authorizer = authorizer genericConfig.AdmissionControl = admissionController genericConfig.APIResourceConfigSource = apiResourceConfigSource genericConfig.MasterServiceNamespace = s.MasterServiceNamespace genericConfig.ProxyDialer = proxyDialerFn genericConfig.ProxyTLSClientConfig = proxyTLSClientConfig genericConfig.Serializer = api.Codecs config := &master.Config{ Config: genericConfig, EnableCoreControllers: true, DeleteCollectionWorkers: s.DeleteCollectionWorkers, EventTTL: s.EventTTL, KubeletClient: kubeletClient, Tunneler: tunneler, } if s.EnableWatchCache { cachesize.SetWatchCacheSizes(s.WatchCacheSizes) } m, err := master.New(config) if err != nil { return err } m.Run(s.ServerRunOptions) return nil }
// Run runs the specified APIServer. This should never exit. func Run(s *options.APIServer) error { genericapiserver.DefaultAndValidateRunOptions(s.ServerRunOptions) capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. PrivilegedSources: capabilities.PrivilegedSources{ HostNetworkSources: []string{}, HostPIDSources: []string{}, HostIPCSources: []string{}, }, PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec, }) // Setup tunneler if needed var tunneler genericapiserver.Tunneler var proxyDialerFn apiserver.ProxyDialerFunc if len(s.SSHUser) > 0 { // Get ssh key distribution func, if supported var installSSH genericapiserver.InstallSSHKey cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } if s.KubeletConfig.Port == 0 { glog.Fatalf("Must enable kubelet port if proxy ssh-tunneling is specified.") } // Set up the tunneler // TODO(cjcullen): If we want this to handle per-kubelet ports or other // kubelet listen-addresses, we need to plumb through options. healthCheckPath := &url.URL{ Scheme: "https", Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)), Path: "healthz", } tunneler = genericapiserver.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH) // Use the tunneler's dialer to connect to the kubelet s.KubeletConfig.Dial = tunneler.Dial // Use the tunneler's dialer when proxying to pods, services, and nodes proxyDialerFn = tunneler.Dial } // Proxying to pods and services is IP-based... don't expect to be able to verify the hostname proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true} kubeletClient, err := kubeletclient.NewStaticKubeletClient(&s.KubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion() if err != nil { glog.Fatalf("error generating storage version map: %s", err) } storageFactory, err := genericapiserver.BuildDefaultStorageFactory( s.StorageConfig, s.DefaultStorageMediaType, api.Codecs, genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion, master.DefaultAPIResourceConfigSource(), s.RuntimeConfig) if err != nil { glog.Fatalf("error in initializing storage factory: %s", err) } storageFactory.AddCohabitatingResources(batch.Resource("jobs"), extensions.Resource("jobs")) storageFactory.AddCohabitatingResources(autoscaling.Resource("horizontalpodautoscalers"), extensions.Resource("horizontalpodautoscalers")) for _, override := range s.EtcdServersOverrides { tokens := strings.Split(override, "#") if len(tokens) != 2 { glog.Errorf("invalid value of etcd server overrides: %s", override) continue } apiresource := strings.Split(tokens[0], "/") if len(apiresource) != 2 { glog.Errorf("invalid resource definition: %s", tokens[0]) continue } group := apiresource[0] resource := apiresource[1] groupResource := unversioned.GroupResource{Group: group, Resource: resource} servers := strings.Split(tokens[1], ";") storageFactory.SetEtcdLocation(groupResource, servers) } // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { if authenticator.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) { s.ServiceAccountKeyFile = s.TLSPrivateKeyFile } else { glog.Warning("No RSA key provided, service account token authentication disabled") } } var serviceAccountGetter serviceaccount.ServiceAccountTokenGetter if s.ServiceAccountLookup { // If we need to look up service accounts and tokens, // go directly to etcd to avoid recursive auth insanity storage, err := storageFactory.New(api.Resource("serviceaccounts")) if err != nil { glog.Fatalf("Unable to get serviceaccounts storage: %v", err) } serviceAccountGetter = serviceaccountcontroller.NewGetterFromStorageInterface(storage) } authenticator, err := authenticator.New(authenticator.AuthenticatorConfig{ BasicAuthFile: s.BasicAuthFile, ClientCAFile: s.ClientCAFile, TokenAuthFile: s.TokenAuthFile, OIDCIssuerURL: s.OIDCIssuerURL, OIDCClientID: s.OIDCClientID, OIDCCAFile: s.OIDCCAFile, OIDCUsernameClaim: s.OIDCUsernameClaim, OIDCGroupsClaim: s.OIDCGroupsClaim, ServiceAccountKeyFile: s.ServiceAccountKeyFile, ServiceAccountLookup: s.ServiceAccountLookup, ServiceAccountTokenGetter: serviceAccountGetter, KeystoneURL: s.KeystoneURL, WebhookTokenAuthnConfigFile: s.WebhookTokenAuthnConfigFile, WebhookTokenAuthnCacheTTL: s.WebhookTokenAuthnCacheTTL, }) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizationModeNames := strings.Split(s.AuthorizationMode, ",") authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, s.AuthorizationConfig) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") client, err := s.NewSelfClient() if err != nil { glog.Errorf("Failed to create clientset: %v", err) } admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile) genericConfig := genericapiserver.NewConfig(s.ServerRunOptions) // TODO: Move the following to generic api server as well. genericConfig.StorageFactory = storageFactory genericConfig.Authenticator = authenticator genericConfig.SupportsBasicAuth = len(s.BasicAuthFile) > 0 genericConfig.Authorizer = authorizer genericConfig.AdmissionControl = admissionController genericConfig.APIResourceConfigSource = storageFactory.APIResourceConfigSource genericConfig.MasterServiceNamespace = s.MasterServiceNamespace genericConfig.ProxyDialer = proxyDialerFn genericConfig.ProxyTLSClientConfig = proxyTLSClientConfig genericConfig.Serializer = api.Codecs config := &master.Config{ Config: genericConfig, EnableCoreControllers: true, DeleteCollectionWorkers: s.DeleteCollectionWorkers, EventTTL: s.EventTTL, KubeletClient: kubeletClient, Tunneler: tunneler, } if s.EnableWatchCache { cachesize.SetWatchCacheSizes(s.WatchCacheSizes) } m, err := master.New(config) if err != nil { return err } m.Run(s.ServerRunOptions) return nil }
// Run runs the specified APIServer. This should never exit. func Run(s *options.ServerRunOptions) error { if errs := s.Etcd.Validate(); len(errs) > 0 { return utilerrors.NewAggregate(errs) } if err := s.GenericServerRunOptions.DefaultExternalAddress(s.SecureServing, s.InsecureServing); err != nil { return err } serviceIPRange, apiServerServiceIP, err := master.DefaultServiceIPRange(s.GenericServerRunOptions.ServiceClusterIPRange) if err != nil { return fmt.Errorf("error determining service IP ranges: %v", err) } if err := s.SecureServing.MaybeDefaultWithSelfSignedCerts(s.GenericServerRunOptions.AdvertiseAddress.String(), apiServerServiceIP); err != nil { return fmt.Errorf("error creating self-signed certificates: %v", err) } genericapiserver.DefaultAndValidateRunOptions(s.GenericServerRunOptions) genericConfig := genericapiserver.NewConfig(). // create the new config ApplyOptions(s.GenericServerRunOptions). // apply the options selected ApplyInsecureServingOptions(s.InsecureServing) if _, err := genericConfig.ApplySecureServingOptions(s.SecureServing); err != nil { return fmt.Errorf("failed to configure https: %s", err) } if _, err = genericConfig.ApplyAuthenticationOptions(s.Authentication); err != nil { return fmt.Errorf("failed to configure authentication: %s", err) } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. PrivilegedSources: capabilities.PrivilegedSources{ HostNetworkSources: []string{}, HostPIDSources: []string{}, HostIPCSources: []string{}, }, PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec, }) // Setup tunneler if needed var tunneler genericapiserver.Tunneler var proxyDialerFn apiserver.ProxyDialerFunc if len(s.SSHUser) > 0 { // Get ssh key distribution func, if supported var installSSH genericapiserver.InstallSSHKey cloud, err := cloudprovider.InitCloudProvider(s.GenericServerRunOptions.CloudProvider, s.GenericServerRunOptions.CloudConfigFile) if err != nil { return fmt.Errorf("cloud provider could not be initialized: %v", err) } if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } if s.KubeletConfig.Port == 0 { return fmt.Errorf("must enable kubelet port if proxy ssh-tunneling is specified") } // Set up the tunneler // TODO(cjcullen): If we want this to handle per-kubelet ports or other // kubelet listen-addresses, we need to plumb through options. healthCheckPath := &url.URL{ Scheme: "https", Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)), Path: "healthz", } tunneler = genericapiserver.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH) // Use the tunneler's dialer to connect to the kubelet s.KubeletConfig.Dial = tunneler.Dial // Use the tunneler's dialer when proxying to pods, services, and nodes proxyDialerFn = tunneler.Dial } // Proxying to pods and services is IP-based... don't expect to be able to verify the hostname proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true} if s.Etcd.StorageConfig.DeserializationCacheSize == 0 { // When size of cache is not explicitly set, estimate its size based on // target memory usage. glog.V(2).Infof("Initalizing deserialization cache size based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB) // This is the heuristics that from memory capacity is trying to infer // the maximum number of nodes in the cluster and set cache sizes based // on that value. // From our documentation, we officially recomment 120GB machines for // 2000 nodes, and we scale from that point. Thus we assume ~60MB of // capacity per node. // TODO: We may consider deciding that some percentage of memory will // be used for the deserialization cache and divide it by the max object // size to compute its size. We may even go further and measure // collective sizes of the objects in the cache. clusterSize := s.GenericServerRunOptions.TargetRAMMB / 60 s.Etcd.StorageConfig.DeserializationCacheSize = 25 * clusterSize if s.Etcd.StorageConfig.DeserializationCacheSize < 1000 { s.Etcd.StorageConfig.DeserializationCacheSize = 1000 } } storageGroupsToEncodingVersion, err := s.GenericServerRunOptions.StorageGroupsToEncodingVersion() if err != nil { return fmt.Errorf("error generating storage version map: %s", err) } storageFactory, err := genericapiserver.BuildDefaultStorageFactory( s.Etcd.StorageConfig, s.GenericServerRunOptions.DefaultStorageMediaType, api.Codecs, genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion, // FIXME: this GroupVersionResource override should be configurable []schema.GroupVersionResource{batch.Resource("cronjobs").WithVersion("v2alpha1")}, master.DefaultAPIResourceConfigSource(), s.GenericServerRunOptions.RuntimeConfig) if err != nil { return fmt.Errorf("error in initializing storage factory: %s", err) } storageFactory.AddCohabitatingResources(batch.Resource("jobs"), extensions.Resource("jobs")) storageFactory.AddCohabitatingResources(autoscaling.Resource("horizontalpodautoscalers"), extensions.Resource("horizontalpodautoscalers")) for _, override := range s.Etcd.EtcdServersOverrides { tokens := strings.Split(override, "#") if len(tokens) != 2 { glog.Errorf("invalid value of etcd server overrides: %s", override) continue } apiresource := strings.Split(tokens[0], "/") if len(apiresource) != 2 { glog.Errorf("invalid resource definition: %s", tokens[0]) continue } group := apiresource[0] resource := apiresource[1] groupResource := schema.GroupResource{Group: group, Resource: resource} servers := strings.Split(tokens[1], ";") storageFactory.SetEtcdLocation(groupResource, servers) } // Default to the private server key for service account token signing if len(s.Authentication.ServiceAccounts.KeyFiles) == 0 && s.SecureServing.ServerCert.CertKey.KeyFile != "" { if authenticator.IsValidServiceAccountKeyFile(s.SecureServing.ServerCert.CertKey.KeyFile) { s.Authentication.ServiceAccounts.KeyFiles = []string{s.SecureServing.ServerCert.CertKey.KeyFile} } else { glog.Warning("No TLS key provided, service account token authentication disabled") } } authenticatorConfig := s.Authentication.ToAuthenticationConfig() if s.Authentication.ServiceAccounts.Lookup { // If we need to look up service accounts and tokens, // go directly to etcd to avoid recursive auth insanity storageConfig, err := storageFactory.NewConfig(api.Resource("serviceaccounts")) if err != nil { return fmt.Errorf("unable to get serviceaccounts storage: %v", err) } authenticatorConfig.ServiceAccountTokenGetter = serviceaccountcontroller.NewGetterFromStorageInterface(storageConfig, storageFactory.ResourcePrefix(api.Resource("serviceaccounts")), storageFactory.ResourcePrefix(api.Resource("secrets"))) } apiAuthenticator, securityDefinitions, err := authenticator.New(authenticatorConfig) if err != nil { return fmt.Errorf("invalid Authentication Config: %v", err) } privilegedLoopbackToken := uuid.NewRandom().String() selfClientConfig, err := genericapiserver.NewSelfClientConfig(genericConfig.SecureServingInfo, genericConfig.InsecureServingInfo, privilegedLoopbackToken) if err != nil { return fmt.Errorf("failed to create clientset: %v", err) } client, err := internalclientset.NewForConfig(selfClientConfig) if err != nil { kubeAPIVersions := os.Getenv("KUBE_API_VERSIONS") if len(kubeAPIVersions) == 0 { return fmt.Errorf("failed to create clientset: %v", err) } // KUBE_API_VERSIONS is used in test-update-storage-objects.sh, disabling a number of API // groups. This leads to a nil client above and undefined behaviour further down. // TODO: get rid of KUBE_API_VERSIONS or define sane behaviour if set glog.Errorf("Failed to create clientset with KUBE_API_VERSIONS=%q. KUBE_API_VERSIONS is only for testing. Things will break.", kubeAPIVersions) } sharedInformers := informers.NewSharedInformerFactory(nil, client, 10*time.Minute) authorizationConfig := s.Authorization.ToAuthorizationConfig(sharedInformers) apiAuthorizer, err := authorizer.NewAuthorizerFromAuthorizationConfig(authorizationConfig) if err != nil { return fmt.Errorf("invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.GenericServerRunOptions.AdmissionControl, ",") pluginInitializer := admission.NewPluginInitializer(sharedInformers, apiAuthorizer) admissionController, err := admission.NewFromPlugins(client, admissionControlPluginNames, s.GenericServerRunOptions.AdmissionControlConfigFile, pluginInitializer) if err != nil { return fmt.Errorf("failed to initialize plugins: %v", err) } proxyTransport := utilnet.SetTransportDefaults(&http.Transport{ Dial: proxyDialerFn, TLSClientConfig: proxyTLSClientConfig, }) kubeVersion := version.Get() genericConfig.Version = &kubeVersion genericConfig.LoopbackClientConfig = selfClientConfig genericConfig.Authenticator = apiAuthenticator genericConfig.Authorizer = apiAuthorizer genericConfig.AdmissionControl = admissionController genericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.OpenAPIDefinitions) genericConfig.OpenAPIConfig.SecurityDefinitions = securityDefinitions genericConfig.OpenAPIConfig.Info.Title = "Kubernetes" genericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig() genericConfig.EnableMetrics = true genericConfig.LongRunningFunc = filters.BasicLongRunningRequestCheck( sets.NewString("watch", "proxy"), sets.NewString("attach", "exec", "proxy", "log", "portforward"), ) config := &master.Config{ GenericConfig: genericConfig, APIResourceConfigSource: storageFactory.APIResourceConfigSource, StorageFactory: storageFactory, EnableWatchCache: s.GenericServerRunOptions.EnableWatchCache, EnableCoreControllers: true, DeleteCollectionWorkers: s.GenericServerRunOptions.DeleteCollectionWorkers, EventTTL: s.EventTTL, KubeletClientConfig: s.KubeletConfig, EnableUISupport: true, EnableLogsSupport: true, ProxyTransport: proxyTransport, Tunneler: tunneler, ServiceIPRange: serviceIPRange, APIServerServiceIP: apiServerServiceIP, APIServerServicePort: 443, ServiceNodePortRange: s.GenericServerRunOptions.ServiceNodePortRange, KubernetesServiceNodePort: s.GenericServerRunOptions.KubernetesServiceNodePort, MasterCount: s.GenericServerRunOptions.MasterCount, } if s.GenericServerRunOptions.EnableWatchCache { glog.V(2).Infof("Initalizing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB) cachesize.InitializeWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) cachesize.SetWatchCacheSizes(s.GenericServerRunOptions.WatchCacheSizes) } m, err := config.Complete().New() if err != nil { return err } sharedInformers.Start(wait.NeverStop) m.GenericAPIServer.PrepareRun().Run(wait.NeverStop) return nil }