// RunBuildController starts the build sync loop for builds and buildConfig processing. func (c *MasterConfig) RunBuildController(informers shared.InformerFactory) error { // initialize build controller dockerImage := c.ImageFor("docker-builder") stiImage := c.ImageFor("sti-builder") storageVersion := c.Options.EtcdStorageConfig.OpenShiftStorageVersion groupVersion := unversioned.GroupVersion{Group: "", Version: storageVersion} codec := kapi.Codecs.LegacyCodec(groupVersion) admissionControl := admission.InitPlugin("SecurityContextConstraint", clientadapter.FromUnversionedClient(c.PrivilegedLoopbackKubernetesClient), "") if wantsInformers, ok := admissionControl.(cmdadmission.WantsInformers); ok { wantsInformers.SetInformers(informers) } buildDefaults, err := builddefaults.NewBuildDefaults(c.Options.AdmissionConfig.PluginConfig) if err != nil { return err } buildOverrides, err := buildoverrides.NewBuildOverrides(c.Options.AdmissionConfig.PluginConfig) if err != nil { return err } osclient, kclient := c.BuildControllerClients() factory := buildcontrollerfactory.BuildControllerFactory{ KubeClient: kclient, OSClient: osclient, BuildUpdater: buildclient.NewOSClientBuildClient(osclient), BuildLister: buildclient.NewOSClientBuildClient(osclient), DockerBuildStrategy: &buildstrategy.DockerBuildStrategy{ Image: dockerImage, // TODO: this will be set to --storage-version (the internal schema we use) Codec: codec, }, SourceBuildStrategy: &buildstrategy.SourceBuildStrategy{ Image: stiImage, // TODO: this will be set to --storage-version (the internal schema we use) Codec: codec, AdmissionControl: admissionControl, }, CustomBuildStrategy: &buildstrategy.CustomBuildStrategy{ // TODO: this will be set to --storage-version (the internal schema we use) Codec: codec, }, BuildDefaults: buildDefaults, BuildOverrides: buildOverrides, } controller := factory.Create() controller.Run() deleteController := factory.CreateDeleteController() deleteController.Run() return nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(kubeClient) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: plugin := admission.InitPlugin(pluginName, kubeClient, server.AdmissionControlConfigFile) if plugin != nil { plugins = append(plugins, plugin) } } } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } // TODO you have to know every APIGroup you're enabling or upstream will panic. It's alternative to panicing is Fataling // It needs a refactor to return errors storageDestinations := master.NewStorageDestinations() // storageVersions is a map from API group to allowed versions that must be a version exposed by the REST API or it breaks. // We need to fix the upstream to stop using the storage version as a preferred api version. storageVersions := map[string]string{} enabledKubeVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupKube) enabledKubeVersionSet := sets.NewString(enabledKubeVersions...) if len(enabledKubeVersions) > 0 { databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesForLegacyGroup, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupKube, databaseStorage) storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion } enabledExtensionsVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions) if len(enabledExtensionsVersions) > 0 { groupMeta, err := kapilatest.Group(configapi.APIGroupExtensions) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } // TODO expose storage version options for api groups databaseStorage, err := master.NewEtcdStorage(etcdClient, groupMeta.InterfacesFor, groupMeta.GroupVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupExtensions, databaseStorage) storageVersions[configapi.APIGroupExtensions] = enabledExtensionsVersions[0] } m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, StorageDestinations: storageDestinations, StorageVersions: storageVersions, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, APIGroupPrefix: KubeAPIGroupPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableExp: len(enabledExtensionsVersions) > 0, DisableV1: !enabledKubeVersionSet.Has("v1"), // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, } // set for consistency -- Origin only used m.EnableExp cmserver.EnableExperimental = m.EnableExp if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP, TargetPort: util.NewIntOrStringFromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP, TargetPort: util.NewIntOrStringFromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesFor, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(kubeClient) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: plugin := admission.InitPlugin(pluginName, kubeClient, server.AdmissionControlConfigFile) if plugin != nil { plugins = append(plugins, plugin) } } } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, DatabaseStorage: databaseStorage, ExpDatabaseStorage: databaseStorage, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableV1Beta3: configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1beta3"), DisableV1: !configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1"), // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
// BuildMasterConfig builds and returns the OpenShift master configuration based on the // provided options func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion} etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest policyCache, policyClient := newReadOnlyCacheAndClient(etcdHelper) requestContextMapper := kapi.NewRequestContextMapper() groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupstorage.NewREST(etcdHelper))) projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector) kubeletClientConfig := configapi.GetKubeletClientConfig(options) // in-order list of plug-ins that should intercept admission decisions (origin only intercepts) admissionControlPluginNames := []string{"OriginNamespaceLifecycle", "BuildByStrategy"} if len(options.AdmissionConfig.PluginOrderOverride) > 0 { admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride } pluginInitializer := oadmission.PluginInitializer{ OpenshiftClient: privilegedLoopbackOpenShiftClient, ProjectCache: projectCache, } plugins := []admission.Interface{} for _, pluginName := range admissionControlPluginNames { configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName]) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, privilegedLoopbackKubeClient, configFile) if plugin != nil { plugins = append(plugins, plugin) } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) authorizer := newAuthorizer(policyClient, options.ProjectConfig.ProjectRequestMessage) config := &MasterConfig{ Options: options, Authenticator: newAuthenticator(options, etcdHelper, serviceAccountTokenGetter, apiClientCAs, groupCache), Authorizer: authorizer, AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), PolicyCache: policyCache, GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, policyClient), ProjectCache: projectCache, RequestContextMapper: requestContextMapper, AdmissionControl: admissionController, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, } return config, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, pluginInitializer oadmission.PluginInitializer) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } // Defaults are tested in TestAPIServerDefaults server := apiserveroptions.NewAPIServer() // Adjust defaults server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") server.EnableLogsSupport = false // don't expose server logs // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } if len(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride) > 0 { server.AdmissionControl = strings.Join(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride, ",") } // Defaults are tested in TestCMServerDefaults cmserver := cmapp.NewCMServer() // Adjust defaults cmserver.Address = "" // no healthz endpoint cmserver.Port = 0 // no healthz endpoint cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout} // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case serviceadmit.ExternalIPPluginName: // this needs to be moved upstream to be part of core config reject, admit, err := serviceadmit.ParseCIDRRules(options.NetworkConfig.ExternalIPNetworkCIDRs) if err != nil { // should have been caught with validation return nil, err } plugins = append(plugins, serviceadmit.NewExternalIPRanger(reject, admit)) case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(internalclientset.FromUnversionedClient(kubeClient)) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: configFile, err := pluginconfig.GetPluginConfigFile(options.KubernetesMasterConfig.AdmissionConfig.PluginConfig, pluginName, server.AdmissionControlConfigFile) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, internalclientset.FromUnversionedClient(kubeClient), configFile) if plugin != nil { plugins = append(plugins, plugin) } } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } // TODO you have to know every APIGroup you're enabling or upstream will panic. It's alternative to panicing is Fataling // It needs a refactor to return errors storageDestinations := genericapiserver.NewStorageDestinations() // storageVersions is a map from API group to allowed versions that must be a version exposed by the REST API or it breaks. // We need to fix the upstream to stop using the storage version as a preferred api version. storageVersions := map[string]string{} enabledKubeVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupKube) if len(enabledKubeVersions) > 0 { kubeStorageVersion := unversioned.GroupVersion{Group: configapi.APIGroupKube, Version: options.EtcdStorageConfig.KubernetesStorageVersion} databaseStorage, err := NewEtcdStorage(etcdClient, kubeStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupKube, databaseStorage) storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion } // enable this if extensions API is enabled (or batch or autoscaling, since they persist to extensions/v1beta1 for now) // TODO: replace this with a loop over configured storage versions extensionsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions)) > 0 batchEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupBatch)) > 0 autoscalingEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupAutoscaling)) > 0 if extensionsEnabled || autoscalingEnabled || batchEnabled { // TODO: replace this with a configured storage version for extensions once configuration exposes this extensionsStorageVersion := unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"} databaseStorage, err := NewEtcdStorage(etcdClient, extensionsStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupExtensions, databaseStorage) storageVersions[configapi.APIGroupExtensions] = extensionsStorageVersion.String() } // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := knet.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ Config: &genericapiserver.Config{ PublicAddress: publicAddress, ReadWritePort: port, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, StorageDestinations: storageDestinations, StorageVersions: storageVersions, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, APIGroupVersionOverrides: getAPIGroupVersionOverrides(options), APIPrefix: KubeAPIPrefix, APIGroupPrefix: KubeAPIGroupPrefix, MasterCount: options.KubernetesMasterConfig.MasterCount, // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, Serializer: kapi.Codecs, }, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, KubeletClient: kubeletClient, EnableCoreControllers: true, } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
// BuildMasterConfig builds and returns the OpenShift master configuration based on the // provided options func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion} etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } restOptsGetter := restoptions.NewConfigGetter(options) clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } customListerWatchers := shared.DefaultListerWatcherOverrides{} if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil { return nil, err } informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute) imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest requestContextMapper := kapi.NewRequestContextMapper() groupStorage, err := groupstorage.NewREST(restOptsGetter) if err != nil { return nil, err } groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage)) projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector) clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas()) kubeletClientConfig := configapi.GetKubeletClientConfig(options) // in-order list of plug-ins that should intercept admission decisions (origin only intercepts) admissionControlPluginNames := []string{ "ProjectRequestLimit", "OriginNamespaceLifecycle", "PodNodeConstraints", "JenkinsBootstrapper", "BuildByStrategy", imageadmission.PluginName, quotaadmission.PluginName, } if len(options.AdmissionConfig.PluginOrderOverride) > 0 { admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride } quotaRegistry := quota.NewOriginQuotaRegistry(privilegedLoopbackOpenShiftClient) ruleResolver := rulevalidation.NewDefaultRuleResolver( informerFactory.Policies().Lister(), informerFactory.PolicyBindings().Lister(), informerFactory.ClusterPolicies().Lister().ClusterPolicies(), informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(), ) authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage) pluginInitializer := oadmission.PluginInitializer{ OpenshiftClient: privilegedLoopbackOpenShiftClient, ProjectCache: projectCache, OriginQuotaRegistry: quotaRegistry, Authorizer: authorizer, JenkinsPipelineConfig: options.JenkinsPipelineConfig, RESTClientConfig: *privilegedLoopbackClientConfig, } plugins := []admission.Interface{} clientsetClient := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient) for _, pluginName := range admissionControlPluginNames { configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName]) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, clientsetClient, configFile) if plugin != nil { plugins = append(plugins, plugin) } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) // TODO: look up storage by resource serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient) if err != nil { return nil, err } authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache) if err != nil { return nil, err } plug, plugStart := newControllerPlug(options, client) config := &MasterConfig{ Options: options, RESTOptionsGetter: restOptsGetter, RuleResolver: ruleResolver, Authenticator: authenticator, Authorizer: authorizer, AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), GroupCache: groupCache, ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory), ProjectCache: projectCache, ClusterQuotaMappingController: clusterQuotaMappingController, RequestContextMapper: requestContextMapper, AdmissionControl: admissionController, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug, ControllerPlugStart: plugStart, ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PluginInitializer: pluginInitializer, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, Informers: informerFactory, } return config, nil }
func newAdmissionChain(pluginNames []string, admissionConfigFilename string, pluginConfig map[string]configapi.AdmissionPluginConfig, options configapi.MasterConfig, kubeClientSet *internalclientset.Clientset, pluginInitializer oadmission.PluginInitializer) (admission.Interface, error) { plugins := []admission.Interface{} for _, pluginName := range pluginNames { switch pluginName { case lifecycle.PluginName: // We need to include our infrastructure and shared resource namespaces in the immortal namespaces list immortalNamespaces := sets.NewString(kapi.NamespaceDefault) if len(options.PolicyConfig.OpenShiftSharedResourcesNamespace) > 0 { immortalNamespaces.Insert(options.PolicyConfig.OpenShiftSharedResourcesNamespace) } if len(options.PolicyConfig.OpenShiftInfrastructureNamespace) > 0 { immortalNamespaces.Insert(options.PolicyConfig.OpenShiftInfrastructureNamespace) } plugins = append(plugins, lifecycle.NewLifecycle(kubeClientSet, immortalNamespaces)) case serviceadmit.ExternalIPPluginName: // this needs to be moved upstream to be part of core config reject, admit, err := serviceadmit.ParseRejectAdmitCIDRRules(options.NetworkConfig.ExternalIPNetworkCIDRs) if err != nil { // should have been caught with validation return nil, err } plugins = append(plugins, serviceadmit.NewExternalIPRanger(reject, admit)) case serviceadmit.RestrictedEndpointsPluginName: // we need to set some customer parameters, so create by hand restrictedNetworks, err := serviceadmit.ParseSimpleCIDRRules([]string{options.NetworkConfig.ClusterNetworkCIDR, options.NetworkConfig.ServiceNetworkCIDR}) if err != nil { // should have been caught with validation return nil, err } plugins = append(plugins, serviceadmit.NewRestrictedEndpointsAdmission(restrictedNetworks)) case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(kubeClientSet) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: configFile, err := pluginconfig.GetPluginConfigFile(pluginConfig, pluginName, admissionConfigFilename) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, kubeClientSet, configFile) if plugin != nil { plugins = append(plugins, plugin) } } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } return admission.NewChainHandler(plugins...), nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, informers shared.InformerFactory, pluginInitializer oadmission.PluginInitializer) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } // Defaults are tested in TestAPIServerDefaults server := apiserveroptions.NewAPIServer() // Adjust defaults server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") server.EnableLogsSupport = false // don't expose server logs server.EnableProfiling = false server.APIPrefix = KubeAPIPrefix server.APIGroupPrefix = KubeAPIGroupPrefix server.SecurePort = port server.MasterCount = options.KubernetesMasterConfig.MasterCount // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } if len(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride) > 0 { server.AdmissionControl = strings.Join(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride, ",") } // Defaults are tested in TestCMServerDefaults cmserver := cmapp.NewCMServer() // Adjust defaults cmserver.Address = "" // no healthz endpoint cmserver.Port = 0 // no healthz endpoint cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout} cmserver.VolumeConfiguration.EnableDynamicProvisioning = options.VolumeConfig.DynamicProvisioningEnabled // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case lifecycle.PluginName: // We need to include our infrastructure and shared resource namespaces in the immortal namespaces list immortalNamespaces := sets.NewString(kapi.NamespaceDefault) if len(options.PolicyConfig.OpenShiftSharedResourcesNamespace) > 0 { immortalNamespaces.Insert(options.PolicyConfig.OpenShiftSharedResourcesNamespace) } if len(options.PolicyConfig.OpenShiftInfrastructureNamespace) > 0 { immortalNamespaces.Insert(options.PolicyConfig.OpenShiftInfrastructureNamespace) } plugins = append(plugins, lifecycle.NewLifecycle(clientadapter.FromUnversionedClient(kubeClient), immortalNamespaces)) case serviceadmit.ExternalIPPluginName: // this needs to be moved upstream to be part of core config reject, admit, err := serviceadmit.ParseCIDRRules(options.NetworkConfig.ExternalIPNetworkCIDRs) if err != nil { // should have been caught with validation return nil, err } plugins = append(plugins, serviceadmit.NewExternalIPRanger(reject, admit)) case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(clientadapter.FromUnversionedClient(kubeClient)) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: configFile, err := pluginconfig.GetPluginConfigFile(options.KubernetesMasterConfig.AdmissionConfig.PluginConfig, pluginName, server.AdmissionControlConfigFile) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, clientadapter.FromUnversionedClient(kubeClient), configFile) if plugin != nil { plugins = append(plugins, plugin) } } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } resourceEncodingConfig := genericapiserver.NewDefaultResourceEncodingConfig() resourceEncodingConfig.SetVersionEncoding( kapi.GroupName, unversioned.GroupVersion{Group: kapi.GroupName, Version: options.EtcdStorageConfig.KubernetesStorageVersion}, kapi.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( extensions.GroupName, unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"}, extensions.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( batch.GroupName, unversioned.GroupVersion{Group: batch.GroupName, Version: "v1"}, batch.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( autoscaling.GroupName, unversioned.GroupVersion{Group: autoscaling.GroupName, Version: "v1"}, autoscaling.SchemeGroupVersion, ) etcdConfig := storagebackend.Config{ Prefix: options.EtcdStorageConfig.KubernetesStoragePrefix, ServerList: options.EtcdClientInfo.URLs, KeyFile: options.EtcdClientInfo.ClientCert.KeyFile, CertFile: options.EtcdClientInfo.ClientCert.CertFile, CAFile: options.EtcdClientInfo.CA, DeserializationCacheSize: genericapiserveroptions.DefaultDeserializationCacheSize, } storageFactory := genericapiserver.NewDefaultStorageFactory(etcdConfig, "", kapi.Codecs, resourceEncodingConfig, master.DefaultAPIResourceConfigSource()) // the order here is important, it defines which version will be used for storage storageFactory.AddCohabitatingResources(extensions.Resource("jobs"), batch.Resource("jobs")) storageFactory.AddCohabitatingResources(extensions.Resource("horizontalpodautoscalers"), autoscaling.Resource("horizontalpodautoscalers")) // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := knet.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ Config: &genericapiserver.Config{ PublicAddress: publicAddress, ReadWritePort: port, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, StorageFactory: storageFactory, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, APIResourceConfigSource: getAPIResourceConfig(options), APIPrefix: server.APIPrefix, APIGroupPrefix: server.APIGroupPrefix, MasterCount: server.MasterCount, // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, Serializer: kapi.Codecs, EnableLogsSupport: server.EnableLogsSupport, EnableProfiling: server.EnableProfiling, EnableWatchCache: server.EnableWatchCache, MasterServiceNamespace: server.MasterServiceNamespace, ExternalHost: server.ExternalHost, MinRequestTimeout: server.MinRequestTimeout, KubernetesServiceNodePort: server.KubernetesServiceNodePort, }, EventTTL: server.EventTTL, KubeletClient: kubeletClient, EnableCoreControllers: true, DeleteCollectionWorkers: server.DeleteCollectionWorkers, } if server.EnableWatchCache { cachesize.SetWatchCacheSizes(server.WatchCacheSizes) } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: int32(dnsPort), Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: int32(dnsPort), Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, Informers: informers, } return kmaster, nil }