// BuildSerializeableKubeMasterConfig creates a fully specified kubernetes master startup configuration based on MasterArgs func (args MasterArgs) BuildSerializeableKubeMasterConfig() (*configapi.KubernetesMasterConfig, error) { servicesSubnet := net.IPNet(args.PortalNet) masterAddr, err := args.GetMasterAddress() if err != nil { return nil, err } masterHost, _, err := net.SplitHostPort(masterAddr.Host) if err != nil { masterHost = masterAddr.Host } masterIP := "" if ip := net.ParseIP(masterHost); ip != nil { masterIP = ip.String() } staticNodeList := util.NewStringSet(args.NodeList...) staticNodeList.Delete("") config := &configapi.KubernetesMasterConfig{ MasterIP: masterIP, ServicesSubnet: servicesSubnet.String(), StaticNodeNames: staticNodeList.List(), SchedulerConfigFile: args.SchedulerConfigFile, } return config, nil }
// getIPAMConfLibnetwork returns the Libnetwork specific IPAM configuration. func (d *Daemon) getIPAMConfLibnetwork(ln ipam.IPAMReq) (*ipam.IPAMConfigRep, error) { if ln.RequestPoolRequest != nil { var poolID, pool, gw string if ln.RequestPoolRequest.V6 == false { poolID = ipam.LibnetworkDefaultPoolV4 pool = ipam.LibnetworkDummyV4AllocPool gw = ipam.LibnetworkDummyV4Gateway } else { subnetGo := net.IPNet(d.ipamConf.IPAMConfig.Subnet) poolID = ipam.LibnetworkDefaultPoolV6 pool = subnetGo.String() gw = d.ipamConf.IPAMConfig.Gateway.String() + "/128" } return &ipam.IPAMConfigRep{ RequestPoolResponse: &lnAPI.RequestPoolResponse{ PoolID: poolID, Pool: pool, Data: map[string]string{ "com.docker.network.gateway": gw, }, }, }, nil } ciliumV6Routes := []ipam.Route{} for _, r := range d.ipamConf.IPAMConfig.Routes { if r.Dst.IP.To4() == nil { ciliumRoute := ipam.NewRoute(r.Dst, r.GW) ciliumV6Routes = append(ciliumV6Routes, *ciliumRoute) } } rep := &ipam.IPAMConfigRep{ IPAMConfig: &ipam.IPAMRep{ IP6: &ipam.IPConfig{ Gateway: d.ipamConf.IPAMConfig.Gateway, Routes: ciliumV6Routes, }, }, } if d.conf.IPv4Enabled { ciliumV4Routes := []ipam.Route{} for _, r := range d.ipamConf.IPAMConfig.Routes { if r.Dst.IP.To4() != nil { ciliumRoute := ipam.NewRoute(r.Dst, r.GW) ciliumV4Routes = append(ciliumV4Routes, *ciliumRoute) } } rep.IPAMConfig.IP4 = &ipam.IPConfig{ Gateway: d.conf.NodeAddress.IPv4Address.IP(), Routes: ciliumV4Routes, } } return rep, nil }
func (r *Route) UnmarshalJSON(data []byte) error { rt := route{} if err := json.Unmarshal(data, &rt); err != nil { return err } r.Dst = net.IPNet(rt.Dst) r.GW = rt.GW return nil }
func (c *IPConfig) UnmarshalJSON(data []byte) error { ipc := ipConfig{} if err := json.Unmarshal(data, &ipc); err != nil { return err } c.IP = net.IPNet(ipc.IP) c.Gateway = ipc.Gateway c.Routes = ipc.Routes return nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(kubeClient) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: plugin := admission.InitPlugin(pluginName, kubeClient, server.AdmissionControlConfigFile) if plugin != nil { plugins = append(plugins, plugin) } } } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } // TODO you have to know every APIGroup you're enabling or upstream will panic. It's alternative to panicing is Fataling // It needs a refactor to return errors storageDestinations := master.NewStorageDestinations() // storageVersions is a map from API group to allowed versions that must be a version exposed by the REST API or it breaks. // We need to fix the upstream to stop using the storage version as a preferred api version. storageVersions := map[string]string{} enabledKubeVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupKube) enabledKubeVersionSet := sets.NewString(enabledKubeVersions...) if len(enabledKubeVersions) > 0 { databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesForLegacyGroup, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupKube, databaseStorage) storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion } enabledExtensionsVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions) if len(enabledExtensionsVersions) > 0 { groupMeta, err := kapilatest.Group(configapi.APIGroupExtensions) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } // TODO expose storage version options for api groups databaseStorage, err := master.NewEtcdStorage(etcdClient, groupMeta.InterfacesFor, groupMeta.GroupVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupExtensions, databaseStorage) storageVersions[configapi.APIGroupExtensions] = enabledExtensionsVersions[0] } m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, StorageDestinations: storageDestinations, StorageVersions: storageVersions, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, APIGroupPrefix: KubeAPIGroupPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableExp: len(enabledExtensionsVersions) > 0, DisableV1: !enabledKubeVersionSet.Has("v1"), // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, } // set for consistency -- Origin only used m.EnableExp cmserver.EnableExperimental = m.EnableExp if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP, TargetPort: util.NewIntOrStringFromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP, TargetPort: util.NewIntOrStringFromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesFor, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(kubeClient) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: plugin := admission.InitPlugin(pluginName, kubeClient, server.AdmissionControlConfigFile) if plugin != nil { plugins = append(plugins, plugin) } } } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, DatabaseStorage: databaseStorage, ExpDatabaseStorage: databaseStorage, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableV1Beta3: configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1beta3"), DisableV1: !configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1"), // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, pluginInitializer oadmission.PluginInitializer) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } // Defaults are tested in TestAPIServerDefaults server := apiserveroptions.NewAPIServer() // Adjust defaults server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") server.EnableLogsSupport = false // don't expose server logs // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } if len(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride) > 0 { server.AdmissionControl = strings.Join(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride, ",") } // Defaults are tested in TestCMServerDefaults cmserver := cmapp.NewCMServer() // Adjust defaults cmserver.Address = "" // no healthz endpoint cmserver.Port = 0 // no healthz endpoint cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout} // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case serviceadmit.ExternalIPPluginName: // this needs to be moved upstream to be part of core config reject, admit, err := serviceadmit.ParseCIDRRules(options.NetworkConfig.ExternalIPNetworkCIDRs) if err != nil { // should have been caught with validation return nil, err } plugins = append(plugins, serviceadmit.NewExternalIPRanger(reject, admit)) case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(internalclientset.FromUnversionedClient(kubeClient)) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: configFile, err := pluginconfig.GetPluginConfigFile(options.KubernetesMasterConfig.AdmissionConfig.PluginConfig, pluginName, server.AdmissionControlConfigFile) if err != nil { return nil, err } plugin := admission.InitPlugin(pluginName, internalclientset.FromUnversionedClient(kubeClient), configFile) if plugin != nil { plugins = append(plugins, plugin) } } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } // TODO you have to know every APIGroup you're enabling or upstream will panic. It's alternative to panicing is Fataling // It needs a refactor to return errors storageDestinations := genericapiserver.NewStorageDestinations() // storageVersions is a map from API group to allowed versions that must be a version exposed by the REST API or it breaks. // We need to fix the upstream to stop using the storage version as a preferred api version. storageVersions := map[string]string{} enabledKubeVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupKube) if len(enabledKubeVersions) > 0 { kubeStorageVersion := unversioned.GroupVersion{Group: configapi.APIGroupKube, Version: options.EtcdStorageConfig.KubernetesStorageVersion} databaseStorage, err := NewEtcdStorage(etcdClient, kubeStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupKube, databaseStorage) storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion } // enable this if extensions API is enabled (or batch or autoscaling, since they persist to extensions/v1beta1 for now) // TODO: replace this with a loop over configured storage versions extensionsEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions)) > 0 batchEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupBatch)) > 0 autoscalingEnabled := len(configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupAutoscaling)) > 0 if extensionsEnabled || autoscalingEnabled || batchEnabled { // TODO: replace this with a configured storage version for extensions once configuration exposes this extensionsStorageVersion := unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"} databaseStorage, err := NewEtcdStorage(etcdClient, extensionsStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupExtensions, databaseStorage) storageVersions[configapi.APIGroupExtensions] = extensionsStorageVersion.String() } // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := knet.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ Config: &genericapiserver.Config{ PublicAddress: publicAddress, ReadWritePort: port, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, StorageDestinations: storageDestinations, StorageVersions: storageVersions, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, APIGroupVersionOverrides: getAPIGroupVersionOverrides(options), APIPrefix: KubeAPIPrefix, APIGroupPrefix: KubeAPIGroupPrefix, MasterCount: options.KubernetesMasterConfig.MasterCount, // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, Serializer: kapi.Codecs, }, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, KubeletClient: kubeletClient, EnableCoreControllers: true, } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyPortalFlags() if (*etcdConfigFile != "" && len(etcdServerList) != 0) || (*etcdConfigFile == "" && len(etcdServerList) == 0) { glog.Fatalf("specify either -etcd_servers or -etcd_config") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: *allowPrivileged, }) cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile) kubeletClient, err := client.NewKubeletClient(&kubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } _, v1beta3 := runtimeConfig["api/v1beta3"] // TODO: expose same flags as client.BindClientConfigFlags but for a server clientConfig := &client.Config{ Host: net.JoinHostPort(address.String(), strconv.Itoa(int(*port))), Version: *storageVersion, } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } helper, err := newEtcd(*etcdConfigFile, etcdServerList) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } n := net.IPNet(portalNet) authenticator, err := apiserver.NewAuthenticatorFromTokenFile(*tokenAuthFile) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(*authorizationMode, *authorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(*admissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, *admissionControlConfigFile) config := &master.Config{ Client: client, Cloud: cloud, EtcdHelper: helper, HealthCheckMinions: *healthCheckMinions, EventTTL: *eventTTL, KubeletClient: kubeletClient, PortalNet: &n, EnableLogsSupport: *enableLogsSupport, EnableUISupport: true, EnableSwaggerSupport: true, APIPrefix: *apiPrefix, CorsAllowedOriginList: corsAllowedOriginList, ReadOnlyPort: *readOnlyPort, ReadWritePort: *port, PublicAddress: *publicAddressOverride, Authenticator: authenticator, Authorizer: authorizer, AdmissionControl: admissionController, EnableV1Beta3: v1beta3, MasterServiceNamespace: *masterServiceNamespace, } m := master.New(config) // We serve on 3 ports. See docs/reaching_the_api.md roLocation := "" if *readOnlyPort != 0 { roLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(config.ReadOnlyPort)) } secureLocation := "" if *securePort != 0 { secureLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(*securePort)) } rwLocation := net.JoinHostPort(address.String(), strconv.Itoa(int(*port))) // See the flag commentary to understand our assumptions when opening the read-only and read-write ports. if roLocation != "" { // Allow 1 read-only request per second, allow up to 20 in a burst before enforcing. rl := util.NewTokenBucketRateLimiter(1.0, 20) readOnlyServer := &http.Server{ Addr: roLocation, Handler: apiserver.RecoverPanics(apiserver.ReadOnly(apiserver.RateLimit(rl, m.InsecureHandler))), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving read-only insecurely on %s", roLocation) go func() { defer util.HandleCrash() for { if err := readOnlyServer.ListenAndServe(); err != nil { glog.Errorf("Unable to listen for read only traffic (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } if secureLocation != "" { secureServer := &http.Server{ Addr: secureLocation, Handler: apiserver.RecoverPanics(m.Handler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types ClientAuth: tls.RequestClientCert, }, } glog.Infof("Serving securely on %s", secureLocation) go func() { defer util.HandleCrash() for { if *tlsCertFile == "" && *tlsPrivateKeyFile == "" { *tlsCertFile = "/var/run/kubernetes/apiserver.crt" *tlsPrivateKeyFile = "/var/run/kubernetes/apiserver.key" if err := util.GenerateSelfSignedCert(config.PublicAddress, *tlsCertFile, *tlsPrivateKeyFile); err != nil { glog.Errorf("Unable to generate self signed cert: %v", err) } else { glog.Infof("Using self-signed cert (%s, %s)", *tlsCertFile, *tlsPrivateKeyFile) } } if err := secureServer.ListenAndServeTLS(*tlsCertFile, *tlsPrivateKeyFile); err != nil { glog.Errorf("Unable to listen for secure (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } s := &http.Server{ Addr: rwLocation, Handler: apiserver.RecoverPanics(m.InsecureHandler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving insecurely on %s", rwLocation) glog.Fatal(s.ListenAndServe()) }
// Run runs the specified APIServer. This should never exit. func (s *APIServer) Run(_ []string) error { s.verifyClusterIPFlags() // If advertise-address is not specified, use bind-address. If bind-address // is also unset (or 0.0.0.0), setDefaults() in pkg/master/master.go will // do the right thing and use the host's default interface. if s.AdvertiseAddress == nil || net.IP(s.AdvertiseAddress).IsUnspecified() { s.AdvertiseAddress = s.BindAddress } if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) { glog.Fatalf("specify either --etcd-servers or --etcd-config") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. HostNetworkSources: []string{}, }) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } kubeletClient, err := client.NewKubeletClient(&s.KubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } // "api/all=false" allows users to selectively enable specific api versions. disableAllAPIs := false allAPIFlagValue, ok := s.RuntimeConfig["api/all"] if ok && allAPIFlagValue == "false" { disableAllAPIs = true } // "api/legacy=false" allows users to disable legacy api versions. disableLegacyAPIs := false legacyAPIFlagValue, ok := s.RuntimeConfig["api/legacy"] if ok && legacyAPIFlagValue == "false" { disableLegacyAPIs = true } _ = disableLegacyAPIs // hush the compiler while we don't have legacy APIs to disable. // v1beta3 is disabled by default. Users can enable it using "api/v1beta3=true" enableV1beta3 := s.getRuntimeConfigValue("api/v1beta3", false) // "api/v1={true|false} allows users to enable/disable v1 API. // This takes preference over api/all and api/legacy, if specified. disableV1 := disableAllAPIs disableV1 = !s.getRuntimeConfigValue("api/v1", !disableV1) // TODO: expose same flags as client.BindClientConfigFlags but for a server clientConfig := &client.Config{ Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), Version: s.StorageVersion, } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } helper, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, s.StorageVersion, s.EtcdPathPrefix) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } // TODO Is this the right place for migration to happen? Must *both* old and // new etcd prefix params be supplied for this to be valid? if s.OldEtcdPathPrefix != "" { if err = helper.MigrateKeys(s.OldEtcdPathPrefix); err != nil { glog.Fatalf("Migration of old etcd keys failed: %v", err) } } n := net.IPNet(s.ServiceClusterIPRange) // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { if apiserver.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) { s.ServiceAccountKeyFile = s.TLSPrivateKeyFile } else { glog.Warning("no RSA key provided, service account token authentication disabled") } } authenticator, err := apiserver.NewAuthenticator(s.BasicAuthFile, s.ClientCAFile, s.TokenAuthFile, s.ServiceAccountKeyFile, s.ServiceAccountLookup, helper) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(s.AuthorizationMode, s.AuthorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile) if len(s.ExternalHost) == 0 { // TODO: extend for other providers if s.CloudProvider == "gce" { instances, supported := cloud.Instances() if !supported { glog.Fatalf("gce cloud provider has no instances. this shouldn't happen. exiting.") } name, err := os.Hostname() if err != nil { glog.Fatalf("failed to get hostname: %v", err) } addrs, err := instances.NodeAddresses(name) if err != nil { glog.Warningf("unable to obtain external host address from cloud provider: %v", err) } else { for _, addr := range addrs { if addr.Type == api.NodeExternalIP { s.ExternalHost = addr.Address } } } } } var installSSH master.InstallSSHKey if cloud != nil { if instances, supported := cloud.Instances(); supported { installSSH = instances.AddSSHKeyToAllInstances } } config := &master.Config{ EtcdHelper: helper, EventTTL: s.EventTTL, KubeletClient: kubeletClient, ServiceClusterIPRange: &n, EnableCoreControllers: true, EnableLogsSupport: s.EnableLogsSupport, EnableUISupport: true, EnableSwaggerSupport: true, EnableProfiling: s.EnableProfiling, EnableIndex: true, APIPrefix: s.APIPrefix, CorsAllowedOriginList: s.CorsAllowedOriginList, ReadWritePort: s.SecurePort, PublicAddress: net.IP(s.AdvertiseAddress), Authenticator: authenticator, SupportsBasicAuth: len(s.BasicAuthFile) > 0, Authorizer: authorizer, AdmissionControl: admissionController, EnableV1Beta3: enableV1beta3, DisableV1: disableV1, MasterServiceNamespace: s.MasterServiceNamespace, ClusterName: s.ClusterName, ExternalHost: s.ExternalHost, MinRequestTimeout: s.MinRequestTimeout, SSHUser: s.SSHUser, SSHKeyfile: s.SSHKeyfile, InstallSSHKey: installSSH, ServiceNodePortRange: s.ServiceNodePortRange, } m := master.New(config) // We serve on 2 ports. See docs/accessing_the_api.md secureLocation := "" if s.SecurePort != 0 { secureLocation = net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.SecurePort)) } insecureLocation := net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)) // See the flag commentary to understand our assumptions when opening the read-only and read-write ports. var sem chan bool if s.MaxRequestsInFlight > 0 { sem = make(chan bool, s.MaxRequestsInFlight) } longRunningRE := regexp.MustCompile(s.LongRunningRequestRE) if secureLocation != "" { secureServer := &http.Server{ Addr: secureLocation, Handler: apiserver.MaxInFlightLimit(sem, longRunningRE, apiserver.RecoverPanics(m.Handler)), ReadTimeout: ReadWriteTimeout, WriteTimeout: ReadWriteTimeout, MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, }, } if len(s.ClientCAFile) > 0 { clientCAs, err := util.CertPoolFromFile(s.ClientCAFile) if err != nil { glog.Fatalf("unable to load client CA file: %v", err) } // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types secureServer.TLSConfig.ClientAuth = tls.RequestClientCert // Specify allowed CAs for client certificates secureServer.TLSConfig.ClientCAs = clientCAs } glog.Infof("Serving securely on %s", secureLocation) go func() { defer util.HandleCrash() for { if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" { s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt") s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key") // TODO (cjcullen): Is PublicAddress the right address to sign a cert with? alternateIPs := []net.IP{config.ServiceReadWriteIP} alternateDNS := []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"} // It would be nice to set a fqdn subject alt name, but only the kubelets know, the apiserver is clueless // alternateDNS = append(alternateDNS, "kubernetes.default.svc.CLUSTER.DNS.NAME") if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile, alternateIPs, alternateDNS); err != nil { glog.Errorf("Unable to generate self signed cert: %v", err) } else { glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile) } } // err == systemd.SdNotifyNoSocket when not running on a systemd system if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket { glog.Errorf("Unable to send systemd daemon sucessful start message: %v\n", err) } if err := secureServer.ListenAndServeTLS(s.TLSCertFile, s.TLSPrivateKeyFile); err != nil { glog.Errorf("Unable to listen for secure (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } http := &http.Server{ Addr: insecureLocation, Handler: apiserver.RecoverPanics(m.InsecureHandler), ReadTimeout: ReadWriteTimeout, WriteTimeout: ReadWriteTimeout, MaxHeaderBytes: 1 << 20, } if secureLocation == "" { // err == systemd.SdNotifyNoSocket when not running on a systemd system if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket { glog.Errorf("Unable to send systemd daemon sucessful start message: %v\n", err) } } glog.Infof("Serving insecurely on %s", insecureLocation) glog.Fatal(http.ListenAndServe()) return nil }
func (ipnet IPNet) String() string { n := net.IPNet(ipnet) return n.String() }
// Run runs the specified APIServer. This should never exit. func (s *APIServer) Run(_ []string) error { s.verifyPortalFlags() if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) { glog.Fatalf("specify either --etcd-servers or --etcd-config") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. HostNetworkSources: []string{}, }) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { glog.Fatalf("Cloud provider could not be initialized: %v", err) } kubeletClient, err := client.NewKubeletClient(&s.KubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } // "api/all=false" allows users to selectively enable specific api versions. disableAllAPIs := false allAPIFlagValue, ok := s.RuntimeConfig["api/all"] if ok && allAPIFlagValue == "false" { disableAllAPIs = true } // "api/legacy=false" allows users to disable legacy api versions. // Right now, v1beta1 and v1beta2 are considered legacy. disableLegacyAPIs := false legacyAPIFlagValue, ok := s.RuntimeConfig["api/legacy"] if ok && legacyAPIFlagValue == "false" { disableLegacyAPIs = true } // "api/v1beta1={true|false} allows users to enable/disable v1beta1 API. // This takes preference over api/all and api/legacy, if specified. disableV1beta1 := disableAllAPIs || disableLegacyAPIs disableV1beta1 = !s.getRuntimeConfigValue("api/v1beta1", !disableV1beta1) // "api/v1beta2={true|false} allows users to enable/disable v1beta2 API. // This takes preference over api/all and api/legacy, if specified. disableV1beta2 := disableAllAPIs || disableLegacyAPIs disableV1beta2 = !s.getRuntimeConfigValue("api/v1beta2", !disableV1beta2) // "api/v1beta3={true|false} allows users to enable/disable v1beta3 API. // This takes preference over api/all and api/legacy, if specified. disableV1beta3 := disableAllAPIs disableV1beta3 = !s.getRuntimeConfigValue("api/v1beta3", !disableV1beta3) // V1 is disabled by default. Users can enable it using "api/v1={true}". _, enableV1 := s.RuntimeConfig["api/v1"] // TODO: expose same flags as client.BindClientConfigFlags but for a server clientConfig := &client.Config{ Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), Version: s.StorageVersion, } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } helper, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, s.StorageVersion, s.EtcdPathPrefix) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } // TODO Is this the right place for migration to happen? Must *both* old and // new etcd prefix params be supplied for this to be valid? if s.OldEtcdPathPrefix != "" { if err = helper.MigrateKeys(s.OldEtcdPathPrefix); err != nil { glog.Fatalf("Migration of old etcd keys failed: %v", err) } } n := net.IPNet(s.PortalNet) // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { s.ServiceAccountKeyFile = s.TLSPrivateKeyFile } authenticator, err := apiserver.NewAuthenticator(s.BasicAuthFile, s.ClientCAFile, s.TokenAuthFile, s.ServiceAccountKeyFile, s.ServiceAccountLookup, helper) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(s.AuthorizationMode, s.AuthorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } admissionControlPluginNames := strings.Split(s.AdmissionControl, ",") admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile) if len(s.ExternalHost) == 0 { // TODO: extend for other providers if s.CloudProvider == "gce" { instances, supported := cloud.Instances() if !supported { glog.Fatalf("gce cloud provider has no instances. this shouldn't happen. exiting.") } name, err := os.Hostname() if err != nil { glog.Fatalf("failed to get hostname: %v", err) } addrs, err := instances.NodeAddresses(name) if err != nil { glog.Warningf("unable to obtain external host address from cloud provider: %v", err) } else { for _, addr := range addrs { if addr.Type == api.NodeExternalIP { s.ExternalHost = addr.Address } } } } } config := &master.Config{ EtcdHelper: helper, EventTTL: s.EventTTL, KubeletClient: kubeletClient, PortalNet: &n, EnableCoreControllers: true, EnableLogsSupport: s.EnableLogsSupport, EnableUISupport: true, EnableSwaggerSupport: true, EnableProfiling: s.EnableProfiling, EnableIndex: true, APIPrefix: s.APIPrefix, CorsAllowedOriginList: s.CorsAllowedOriginList, ReadOnlyPort: s.ReadOnlyPort, ReadWritePort: s.SecurePort, PublicAddress: net.IP(s.BindAddress), Authenticator: authenticator, SupportsBasicAuth: len(s.BasicAuthFile) > 0, Authorizer: authorizer, AdmissionControl: admissionController, DisableV1Beta1: disableV1beta1, DisableV1Beta2: disableV1beta2, DisableV1Beta3: disableV1beta3, EnableV1: enableV1, MasterServiceNamespace: s.MasterServiceNamespace, ClusterName: s.ClusterName, ExternalHost: s.ExternalHost, } m := master.New(config) // We serve on 3 ports. See docs/accessing_the_api.md roLocation := "" if s.ReadOnlyPort != 0 { roLocation = net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.ReadOnlyPort)) } secureLocation := "" if s.SecurePort != 0 { secureLocation = net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.SecurePort)) } insecureLocation := net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)) // See the flag commentary to understand our assumptions when opening the read-only and read-write ports. var sem chan bool if s.MaxRequestsInFlight > 0 { sem = make(chan bool, s.MaxRequestsInFlight) } longRunningRE := regexp.MustCompile(s.LongRunningRequestRE) if roLocation != "" { // Default settings allow 1 read-only request per second, allow up to 20 in a burst before enforcing. rl := util.NewTokenBucketRateLimiter(s.APIRate, s.APIBurst) readOnlyServer := &http.Server{ Addr: roLocation, Handler: apiserver.MaxInFlightLimit(sem, longRunningRE, apiserver.RecoverPanics(apiserver.ReadOnly(apiserver.RateLimit(rl, m.InsecureHandler)))), ReadTimeout: ReadWriteTimeout, WriteTimeout: ReadWriteTimeout, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving read-only insecurely on %s", roLocation) go func() { defer util.HandleCrash() for { if err := readOnlyServer.ListenAndServe(); err != nil { glog.Errorf("Unable to listen for read only traffic (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } if secureLocation != "" { secureServer := &http.Server{ Addr: secureLocation, Handler: apiserver.MaxInFlightLimit(sem, longRunningRE, apiserver.RecoverPanics(m.Handler)), ReadTimeout: ReadWriteTimeout, WriteTimeout: ReadWriteTimeout, MaxHeaderBytes: 1 << 20, TLSConfig: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, }, } if len(s.ClientCAFile) > 0 { clientCAs, err := util.CertPoolFromFile(s.ClientCAFile) if err != nil { glog.Fatalf("unable to load client CA file: %v", err) } // Populate PeerCertificates in requests, but don't reject connections without certificates // This allows certificates to be validated by authenticators, while still allowing other auth types secureServer.TLSConfig.ClientAuth = tls.RequestClientCert // Specify allowed CAs for client certificates secureServer.TLSConfig.ClientCAs = clientCAs } glog.Infof("Serving securely on %s", secureLocation) go func() { defer util.HandleCrash() for { if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" { s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt") s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key") if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile); err != nil { glog.Errorf("Unable to generate self signed cert: %v", err) } else { glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile) } } if err := secureServer.ListenAndServeTLS(s.TLSCertFile, s.TLSPrivateKeyFile); err != nil { glog.Errorf("Unable to listen for secure (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } http := &http.Server{ Addr: insecureLocation, Handler: apiserver.RecoverPanics(m.InsecureHandler), ReadTimeout: ReadWriteTimeout, WriteTimeout: ReadWriteTimeout, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving insecurely on %s", insecureLocation) glog.Fatal(http.ListenAndServe()) return nil }
func (ipnet ipNetValue) String() string { n := net.IPNet(ipnet) return n.String() }
// BuildDefaultAPIServer constructs the appropriate APIServer and StorageFactory for the kubernetes server. // It returns an error if no KubernetesMasterConfig was defined. func BuildDefaultAPIServer(options configapi.MasterConfig) (*apiserveroptions.APIServer, genericapiserver.StorageFactory, error) { if options.KubernetesMasterConfig == nil { return nil, nil, fmt.Errorf("no kubernetesMasterConfig defined, unable to load settings") } _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, nil, err } portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, nil, err } // Defaults are tested in TestAPIServerDefaults server := apiserveroptions.NewAPIServer() // Adjust defaults server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.EnableLogsSupport = false // don't expose server logs server.EnableProfiling = false server.APIPrefix = KubeAPIPrefix server.APIGroupPrefix = KubeAPIGroupPrefix server.SecurePort = port server.MasterCount = options.KubernetesMasterConfig.MasterCount // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, nil, kerrors.NewAggregate(err) } resourceEncodingConfig := genericapiserver.NewDefaultResourceEncodingConfig() resourceEncodingConfig.SetVersionEncoding( kapi.GroupName, unversioned.GroupVersion{Group: kapi.GroupName, Version: options.EtcdStorageConfig.KubernetesStorageVersion}, kapi.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( extensions.GroupName, unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"}, extensions.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( batch.GroupName, unversioned.GroupVersion{Group: batch.GroupName, Version: "v1"}, batch.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( autoscaling.GroupName, unversioned.GroupVersion{Group: autoscaling.GroupName, Version: "v1"}, autoscaling.SchemeGroupVersion, ) storageGroupsToEncodingVersion, err := server.StorageGroupsToEncodingVersion() if err != nil { return nil, nil, err } // use the stock storage config based on args, but override bits from our config where appropriate etcdConfig := server.StorageConfig etcdConfig.Prefix = options.EtcdStorageConfig.KubernetesStoragePrefix etcdConfig.ServerList = options.EtcdClientInfo.URLs etcdConfig.KeyFile = options.EtcdClientInfo.ClientCert.KeyFile etcdConfig.CertFile = options.EtcdClientInfo.ClientCert.CertFile etcdConfig.CAFile = options.EtcdClientInfo.CA storageFactory, err := genericapiserver.BuildDefaultStorageFactory( etcdConfig, server.DefaultStorageMediaType, kapi.Codecs, genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion, // FIXME: this GroupVersionResource override should be configurable []unversioned.GroupVersionResource{batch.Resource("scheduledjobs").WithVersion("v2alpha1")}, master.DefaultAPIResourceConfigSource(), server.RuntimeConfig, ) if err != nil { return nil, nil, err } /*storageFactory := genericapiserver.NewDefaultStorageFactory( etcdConfig, server.DefaultStorageMediaType, kapi.Codecs, resourceEncodingConfig, master.DefaultAPIResourceConfigSource(), )*/ // the order here is important, it defines which version will be used for storage storageFactory.AddCohabitatingResources(extensions.Resource("jobs"), batch.Resource("jobs")) storageFactory.AddCohabitatingResources(extensions.Resource("horizontalpodautoscalers"), autoscaling.Resource("horizontalpodautoscalers")) return server, storageFactory, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, informers shared.InformerFactory, admissionControl admission.Interface, originAuthenticator authenticator.Request) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := knet.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } // Defaults are tested in TestAPIServerDefaults server := apiserveroptions.NewAPIServer() // Adjust defaults server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.EnableLogsSupport = false // don't expose server logs server.EnableProfiling = false server.APIPrefix = KubeAPIPrefix server.APIGroupPrefix = KubeAPIGroupPrefix server.SecurePort = port server.MasterCount = options.KubernetesMasterConfig.MasterCount // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } // Defaults are tested in TestCMServerDefaults cmserver := cmapp.NewCMServer() // Adjust defaults cmserver.Address = "" // no healthz endpoint cmserver.Port = 0 // no healthz endpoint cmserver.PodEvictionTimeout = unversioned.Duration{Duration: podEvictionTimeout} cmserver.VolumeConfiguration.EnableDynamicProvisioning = options.VolumeConfig.DynamicProvisioningEnabled // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } resourceEncodingConfig := genericapiserver.NewDefaultResourceEncodingConfig() resourceEncodingConfig.SetVersionEncoding( kapi.GroupName, unversioned.GroupVersion{Group: kapi.GroupName, Version: options.EtcdStorageConfig.KubernetesStorageVersion}, kapi.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( extensions.GroupName, unversioned.GroupVersion{Group: extensions.GroupName, Version: "v1beta1"}, extensions.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( batch.GroupName, unversioned.GroupVersion{Group: batch.GroupName, Version: "v1"}, batch.SchemeGroupVersion, ) resourceEncodingConfig.SetVersionEncoding( autoscaling.GroupName, unversioned.GroupVersion{Group: autoscaling.GroupName, Version: "v1"}, autoscaling.SchemeGroupVersion, ) etcdConfig := storagebackend.Config{ Prefix: options.EtcdStorageConfig.KubernetesStoragePrefix, ServerList: options.EtcdClientInfo.URLs, KeyFile: options.EtcdClientInfo.ClientCert.KeyFile, CertFile: options.EtcdClientInfo.ClientCert.CertFile, CAFile: options.EtcdClientInfo.CA, DeserializationCacheSize: genericapiserveroptions.DefaultDeserializationCacheSize, } storageFactory := genericapiserver.NewDefaultStorageFactory(etcdConfig, "", kapi.Codecs, resourceEncodingConfig, master.DefaultAPIResourceConfigSource()) // the order here is important, it defines which version will be used for storage storageFactory.AddCohabitatingResources(extensions.Resource("jobs"), batch.Resource("jobs")) storageFactory.AddCohabitatingResources(extensions.Resource("horizontalpodautoscalers"), autoscaling.Resource("horizontalpodautoscalers")) // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := knet.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ Config: &genericapiserver.Config{ PublicAddress: publicAddress, ReadWritePort: port, Authenticator: originAuthenticator, // this is used to fulfill the tokenreviews endpoint which is used by node authentication Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionControl, StorageFactory: storageFactory, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, APIResourceConfigSource: getAPIResourceConfig(options), APIPrefix: server.APIPrefix, APIGroupPrefix: server.APIGroupPrefix, MasterCount: server.MasterCount, // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, Serializer: kapi.Codecs, EnableLogsSupport: server.EnableLogsSupport, EnableProfiling: server.EnableProfiling, EnableWatchCache: server.EnableWatchCache, MasterServiceNamespace: server.MasterServiceNamespace, ExternalHost: server.ExternalHost, MinRequestTimeout: server.MinRequestTimeout, KubernetesServiceNodePort: server.KubernetesServiceNodePort, }, EventTTL: server.EventTTL, KubeletClient: kubeletClient, EnableCoreControllers: true, DeleteCollectionWorkers: server.DeleteCollectionWorkers, } if server.EnableWatchCache { cachesize.SetWatchCacheSizes(server.WatchCacheSizes) } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: int32(dnsPort), Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: int32(dnsPort), Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, Informers: informers, } return kmaster, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesFor, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join([]string{ "NamespaceExists", "NamespaceLifecycle", "OriginPodNodeEnvironment", "LimitRanger", "ServiceAccount", "SecurityContextConstraint", "ResourceQuota", }, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } admissionController := admission.NewFromPlugins(kubeClient, strings.Split(server.AdmissionControl, ","), server.AdmissionControlConfigFile) m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, DatabaseStorage: databaseStorage, ExpDatabaseStorage: databaseStorage, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableV1Beta3: configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1beta3"), DisableV1: !configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1"), } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func main() { flag.Parse() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() verifyPortalFlags() if (*etcdConfigFile != "" && len(etcdServerList) != 0) || (*etcdConfigFile == "" && len(etcdServerList) == 0) { glog.Fatalf("specify either -etcd_servers or -etcd_config") } capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: *allowPrivileged, }) cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile) kubeletClient, err := client.NewKubeletClient(&kubeletConfig) if err != nil { glog.Fatalf("Failure to start kubelet client: %v", err) } // TODO: expose same flags as client.BindClientConfigFlags but for a server clientConfig := &client.Config{ Host: net.JoinHostPort(address.String(), strconv.Itoa(int(*port))), Version: *storageVersion, } client, err := client.New(clientConfig) if err != nil { glog.Fatalf("Invalid server address: %v", err) } helper, err := newEtcd(*etcdConfigFile, etcdServerList) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } n := net.IPNet(portalNet) authenticator, err := apiserver.NewAuthenticatorFromTokenFile(*tokenAuthFile) if err != nil { glog.Fatalf("Invalid Authentication Config: %v", err) } authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(*authorizationMode, *authorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } config := &master.Config{ Client: client, Cloud: cloud, EtcdHelper: helper, HealthCheckMinions: *healthCheckMinions, EventTTL: *eventTTL, KubeletClient: kubeletClient, PortalNet: &n, EnableLogsSupport: *enableLogsSupport, EnableUISupport: true, APIPrefix: *apiPrefix, CorsAllowedOriginList: corsAllowedOriginList, ReadOnlyPort: *readOnlyPort, ReadWritePort: *port, PublicAddress: *publicAddressOverride, Authenticator: authenticator, Authorizer: authorizer, } m := master.New(config) // We serve on 3 ports. See docs/reaching_the_api.md roLocation := "" if *readOnlyPort != 0 { roLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(config.ReadOnlyPort)) } secureLocation := "" if *securePort != 0 { secureLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(*securePort)) } rwLocation := net.JoinHostPort(address.String(), strconv.Itoa(int(*port))) // See the flag commentary to understand our assumptions when opening the read-only and read-write ports. if roLocation != "" { // Allow 1 read-only request per second, allow up to 20 in a burst before enforcing. rl := util.NewTokenBucketRateLimiter(1.0, 20) readOnlyServer := &http.Server{ Addr: roLocation, Handler: apiserver.RecoverPanics(apiserver.ReadOnly(apiserver.RateLimit(rl, m.InsecureHandler))), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving read-only insecurely on %s", roLocation) go func() { defer util.HandleCrash() for { if err := readOnlyServer.ListenAndServe(); err != nil { glog.Errorf("Unable to listen for read only traffic (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } if secureLocation != "" { secureServer := &http.Server{ Addr: secureLocation, Handler: apiserver.RecoverPanics(m.Handler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving securely on %s", secureLocation) go func() { defer util.HandleCrash() for { if err := secureServer.ListenAndServeTLS(*tlsCertFile, *tlsPrivateKeyFile); err != nil { glog.Errorf("Unable to listen for secure (%v); will try again.", err) } time.Sleep(15 * time.Second) } }() } s := &http.Server{ Addr: rwLocation, Handler: apiserver.RecoverPanics(m.InsecureHandler), ReadTimeout: 5 * time.Minute, WriteTimeout: 5 * time.Minute, MaxHeaderBytes: 1 << 20, } glog.Infof("Serving insecurely on %s", rwLocation) glog.Fatal(s.ListenAndServe()) }