// NewAPIServerCommand provides a CLI handler for the 'apiserver' command func NewAPIServerCommand(name, fullName string, out io.Writer) *cobra.Command { s := app.NewAPIServer() cmd := &cobra.Command{ Use: name, Short: "Launch Kubernetes apiserver (kube-apiserver)", Long: apiserverLong, Run: func(c *cobra.Command, args []string) { startProfiler() util.InitLogs() defer util.FlushLogs() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }, } cmd.SetOutput(out) flags := cmd.Flags() //TODO: uncomment after picking up a newer cobra //pflag.AddFlagSetToPFlagSet(flag, flags) s.AddFlags(flags) return cmd }
// NewAPIServerCommand provides a CLI handler for the 'apiserver' command func NewAPIServerCommand(name, fullName string, out io.Writer) *cobra.Command { s := app.NewAPIServer() cmd := &cobra.Command{ Use: name, Short: "Launch Kubernetes apiserver (kube-apiserver)", Long: apiserverLong, Run: func(c *cobra.Command, args []string) { startProfiler() util.InitLogs() defer util.FlushLogs() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }, } cmd.SetOutput(out) flags := cmd.Flags() flags.SetNormalizeFunc(util.WordSepNormalizeFunc) flags.AddGoFlagSet(flag.CommandLine) s.AddFlags(flags) return cmd }
// NewKubeAPIServer creates a new hyperkube Server object that includes the // description and flags. func NewKubeAPIServer() *Server { s := kubeapiserver.NewAPIServer() hks := Server{ SimpleUsage: hyperkube.CommandApiserver, Long: "The main API entrypoint and interface to the storage system. The API server is also the focal point for all authorization decisions.", Run: func(_ *Server, args []string) error { return s.Run(args) }, } s.AddFlags(hks.Flags()) return &hks }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) rand.Seed(time.Now().UTC().UnixNano()) s := app.NewAPIServer() s.AddFlags(pflag.CommandLine) util.InitFlags() util.InitLogs() defer util.FlushLogs() verflag.PrintAndExitIfRequested() if err := s.Run(pflag.CommandLine.Args()); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(kubeClient) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: plugin := admission.InitPlugin(pluginName, kubeClient, server.AdmissionControlConfigFile) if plugin != nil { plugins = append(plugins, plugin) } } } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } // TODO you have to know every APIGroup you're enabling or upstream will panic. It's alternative to panicing is Fataling // It needs a refactor to return errors storageDestinations := master.NewStorageDestinations() // storageVersions is a map from API group to allowed versions that must be a version exposed by the REST API or it breaks. // We need to fix the upstream to stop using the storage version as a preferred api version. storageVersions := map[string]string{} enabledKubeVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupKube) enabledKubeVersionSet := sets.NewString(enabledKubeVersions...) if len(enabledKubeVersions) > 0 { databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesForLegacyGroup, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupKube, databaseStorage) storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion } enabledExtensionsVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions) if len(enabledExtensionsVersions) > 0 { groupMeta, err := kapilatest.Group(configapi.APIGroupExtensions) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } // TODO expose storage version options for api groups databaseStorage, err := master.NewEtcdStorage(etcdClient, groupMeta.InterfacesFor, groupMeta.GroupVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupExtensions, databaseStorage) storageVersions[configapi.APIGroupExtensions] = enabledExtensionsVersions[0] } m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, StorageDestinations: storageDestinations, StorageVersions: storageVersions, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, APIGroupPrefix: KubeAPIGroupPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableExp: len(enabledExtensionsVersions) > 0, DisableV1: !enabledKubeVersionSet.Has("v1"), // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, } // set for consistency -- Origin only used m.EnableExp cmserver.EnableExperimental = m.EnableExp if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP, TargetPort: util.NewIntOrStringFromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP, TargetPort: util.NewIntOrStringFromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesFor, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join(AdmissionPlugins, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(kubeClient) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: plugin := admission.InitPlugin(pluginName, kubeClient, server.AdmissionControlConfigFile) if plugin != nil { plugins = append(plugins, plugin) } } } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, DatabaseStorage: databaseStorage, ExpDatabaseStorage: databaseStorage, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableV1Beta3: configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1beta3"), DisableV1: !configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1"), // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func ValidateAPIServerExtendedArguments(config api.ExtendedArguments) fielderrors.ValidationErrorList { return ValidateExtendedArguments(config, kapp.NewAPIServer().AddFlags) }
func main() { // embed kubectl if filepath.Base(os.Args[0]) == "kubectl" { cmd := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr) if err := cmd.Execute(); err != nil { os.Exit(1) } return } usr, err := user.Current() if err != nil { log.Fatal(err) } mfs := pflag.NewFlagSet("main", pflag.ExitOnError) nodes := mfs.StringSlice("nodes", []string{}, "list of nodes to make part of cluster") sshKeyfile := mfs.String("ssh-keyfile", usr.HomeDir+"/.vagrant.d/insecure_private_key", "private ssh key to use for tunnels") sshUser := mfs.String("ssh-user", "core", "ssh user to use for tunnels") clusterIPRange := mfs.String("service-cluster-ip-range", "10.1.30.0/24", "A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.") mfs.Parse(os.Args) config := &ssh.ClientConfig{ User: *sshUser, Auth: []ssh.AuthMethod{ PublicKeyFile(*sshKeyfile), }, } for _, remoteHost := range *nodes { // Dial your ssh server. go func(host string) { // Serve HTTP with your SSH server acting as a reverse proxy. go func() { b := &backoff.Backoff{ //These are the defaults Min: 100 * time.Millisecond, Max: 10 * time.Second, Factor: 2, Jitter: false, } for { conn, err := ssh.Dial("tcp", host, config) if err != nil { log.Println("unable to connect, retrying:", err) time.Sleep(b.Duration()) continue } defer conn.Close() // Request the remote side to open port 8080 on all interfaces. l, err := conn.Listen("tcp", remoteListen) if err != nil { log.Println("unable to register tcp forward, retrying:", err) time.Sleep(b.Duration()) continue } defer l.Close() fwd, _ := forward.New() http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { req.URL = testutils.ParseURI("http://localhost:8080") fwd.ServeHTTP(resp, req) })) log.Println("proxy connection broken, reconnecting....") time.Sleep(b.Duration()) } }() go func() { // this will block, and the kubelet will stop once the connection is broken // loop for reconnection b := &backoff.Backoff{ //These are the defaults Min: 100 * time.Millisecond, Max: 10 * time.Second, Factor: 2, Jitter: false, } for { ip, _, err := net.SplitHostPort(host) if err != nil { log.Fatalf("unable split host port: %v", err) return } cmd := fmt.Sprintf("sudo /usr/bin/kubelet --hostname-override=%s --api-servers=http://localhost:8080", ip) _, err = executeCmd(cmd, host, config) if err != nil { log.Println("unable to execute kubelet, retrying:", err) } // if we got here something went wrong dur := b.Duration() log.Println("kubelet connection broken, reconnecting in", dur) time.Sleep(dur) } }() <-make(chan interface{}) }(remoteHost) } go func() { // etcd reads os.Args so we have to use mess with them os.Args = []string{"etcd"} etcdmain.Main() }() go func() { s := kubeapiserver.NewAPIServer() fs := pflag.NewFlagSet("apiserver", pflag.ContinueOnError) s.AddFlags(fs) fs.Parse([]string{ "--service-cluster-ip-range=" + *clusterIPRange, "--etcd-servers=http://127.0.0.1:2379", "--ssh-keyfile=" + *sshKeyfile, "--ssh-user="******"controller", pflag.ContinueOnError) s.AddFlags(fs) fs.Parse([]string{}) s.Run([]string{}) }() go func() { s := scheduler.NewSchedulerServer() fs := pflag.NewFlagSet("scheduler", pflag.ContinueOnError) s.AddFlags(fs) fs.Parse([]string{}) s.Run([]string{}) }() <-make(chan interface{}) }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client, projectCache *projectcache.ProjectCache) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.EtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kubeletclient.NewStaticKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = net.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange admissionPlugins := AdmissionPlugins server.AdmissionControl = strings.Join(admissionPlugins, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } if len(options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride) > 0 { admissionPlugins := options.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride server.AdmissionControl = strings.Join(admissionPlugins, ",") } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } if cloud != nil { glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile) } // This is a placeholder to provide additional initialization // objects to plugins pluginInitializer := oadmission.PluginInitializer{ ProjectCache: projectCache, } plugins := []admission.Interface{} for _, pluginName := range strings.Split(server.AdmissionControl, ",") { switch pluginName { case saadmit.PluginName: // we need to set some custom parameters on the service account admission controller, so create that one by hand saAdmitter := saadmit.NewServiceAccount(kubeClient) saAdmitter.LimitSecretReferences = options.ServiceAccountConfig.LimitSecretReferences saAdmitter.Run() plugins = append(plugins, saAdmitter) default: configFile := server.AdmissionControlConfigFile pluginConfig := options.KubernetesMasterConfig.AdmissionConfig.PluginConfig // Check whether a config is specified for this plugin. If not, default to the // global plugin config file specifiedd in the server config. if cfg, hasConfig := pluginConfig[pluginName]; hasConfig { configFile, err = pluginconfig.GetPluginConfig(cfg) if err != nil { return nil, err } } plugin := admission.InitPlugin(pluginName, kubeClient, configFile) if plugin != nil { plugins = append(plugins, plugin) } } } pluginInitializer.Initialize(plugins) // ensure that plugins have been properly initialized if err := oadmission.Validate(plugins); err != nil { return nil, err } admissionController := admission.NewChainHandler(plugins...) var proxyClientCerts []tls.Certificate if len(options.KubernetesMasterConfig.ProxyClientInfo.CertFile) > 0 { clientCert, err := tls.LoadX509KeyPair( options.KubernetesMasterConfig.ProxyClientInfo.CertFile, options.KubernetesMasterConfig.ProxyClientInfo.KeyFile, ) if err != nil { return nil, err } proxyClientCerts = append(proxyClientCerts, clientCert) } // TODO you have to know every APIGroup you're enabling or upstream will panic. It's alternative to panicing is Fataling // It needs a refactor to return errors storageDestinations := master.NewStorageDestinations() // storageVersions is a map from API group to allowed versions that must be a version exposed by the REST API or it breaks. // We need to fix the upstream to stop using the storage version as a preferred api version. storageVersions := map[string]string{} enabledKubeVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupKube) if len(enabledKubeVersions) > 0 { kubeStorageVersion := unversioned.GroupVersion{Group: configapi.APIGroupKube, Version: options.EtcdStorageConfig.KubernetesStorageVersion} databaseStorage, err := NewEtcdStorage(etcdClient, kubeStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupKube, databaseStorage) storageVersions[configapi.APIGroupKube] = options.EtcdStorageConfig.KubernetesStorageVersion } enabledExtensionsVersions := configapi.GetEnabledAPIVersionsForGroup(*options.KubernetesMasterConfig, configapi.APIGroupExtensions) if len(enabledExtensionsVersions) > 0 { groupMeta, err := kapilatest.Group(configapi.APIGroupExtensions) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } // TODO expose storage version options for api groups databaseStorage, err := NewEtcdStorage(etcdClient, groupMeta.GroupVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes extensions server storage: %v", err) } storageDestinations.AddAPIGroup(configapi.APIGroupExtensions, databaseStorage) storageVersions[configapi.APIGroupExtensions] = enabledExtensionsVersions[0] } // Preserve previous behavior of using the first non-loopback address // TODO: Deprecate this behavior and just require a valid value to be passed in publicAddress := net.ParseIP(options.KubernetesMasterConfig.MasterIP) if publicAddress == nil || publicAddress.IsUnspecified() || publicAddress.IsLoopback() { hostIP, err := util.ChooseHostInterface() if err != nil { glog.Fatalf("Unable to find suitable network address.error='%v'. Set the masterIP directly to avoid this error.", err) } publicAddress = hostIP glog.Infof("Will report %v as public IP address.", publicAddress) } m := &master.Config{ PublicAddress: publicAddress, ReadWritePort: port, StorageDestinations: storageDestinations, StorageVersions: storageVersions, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, APIGroupPrefix: KubeAPIGroupPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, APIGroupVersionOverrides: getAPIGroupVersionOverrides(options), // Set the TLS options for proxying to pods and services // Proxying to nodes uses the kubeletClient TLS config (so can provide a different cert, and verify the node hostname) ProxyTLSClientConfig: &tls.Config{ // Proxying to pods and services cannot verify hostnames, since they are contacted on randomly allocated IPs InsecureSkipVerify: true, Certificates: proxyClientCerts, }, } if options.DNSConfig != nil { _, dnsPortStr, err := net.SplitHostPort(options.DNSConfig.BindAddress) if err != nil { return nil, fmt.Errorf("unable to parse DNS bind address %s: %v", options.DNSConfig.BindAddress, err) } dnsPort, err := strconv.Atoi(dnsPortStr) if err != nil { return nil, fmt.Errorf("invalid DNS port: %v", err) } m.ExtraServicePorts = append(m.ExtraServicePorts, kapi.ServicePort{Name: "dns", Port: 53, Protocol: kapi.ProtocolUDP, TargetPort: intstr.FromInt(dnsPort)}, kapi.ServicePort{Name: "dns-tcp", Port: 53, Protocol: kapi.ProtocolTCP, TargetPort: intstr.FromInt(dnsPort)}, ) m.ExtraEndpointPorts = append(m.ExtraEndpointPorts, kapi.EndpointPort{Name: "dns", Port: dnsPort, Protocol: kapi.ProtocolUDP}, kapi.EndpointPort{Name: "dns-tcp", Port: dnsPort, Protocol: kapi.ProtocolTCP}, ) } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }
func ValidateAPIServerExtendedArguments(config api.ExtendedArguments, fldPath *field.Path) field.ErrorList { return ValidateExtendedArguments(config, kapp.NewAPIServer().AddFlags, fldPath) }
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) { if options.KubernetesMasterConfig == nil { return nil, errors.New("insufficient information to build KubernetesMasterConfig") } // Connect and setup etcd interfaces etcdClient, err := etcd.GetAndTestEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } databaseStorage, err := master.NewEtcdStorage(etcdClient, kapilatest.InterfacesFor, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } kubeletClientConfig := configapi.GetKubeletClientConfig(options) kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig) if err != nil { return nil, fmt.Errorf("unable to configure Kubelet client: %v", err) } // in-order list of plug-ins that should intercept admission decisions // TODO: Push node environment support to upstream in future _, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, err } port, err := strconv.Atoi(portString) if err != nil { return nil, err } portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange) if err != nil { return nil, err } podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout) if err != nil { return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err) } server := app.NewAPIServer() server.EventTTL = 2 * time.Hour server.ServiceClusterIPRange = util.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet)) server.ServiceNodePortRange = *portRange server.AdmissionControl = strings.Join([]string{ "NamespaceExists", "NamespaceLifecycle", "OriginPodNodeEnvironment", "LimitRanger", "ServiceAccount", "SecurityContextConstraint", "ResourceQuota", }, ",") // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cmserver := cmapp.NewCMServer() cmserver.PodEvictionTimeout = podEvictionTimeout // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 { return nil, kerrors.NewAggregate(err) } cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile) if err != nil { return nil, err } admissionController := admission.NewFromPlugins(kubeClient, strings.Split(server.AdmissionControl, ","), server.AdmissionControlConfigFile) m := &master.Config{ PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP), ReadWritePort: port, DatabaseStorage: databaseStorage, ExpDatabaseStorage: databaseStorage, EventTTL: server.EventTTL, //MinRequestTimeout: server.MinRequestTimeout, ServiceClusterIPRange: (*net.IPNet)(&server.ServiceClusterIPRange), ServiceNodePortRange: server.ServiceNodePortRange, RequestContextMapper: requestContextMapper, KubeletClient: kubeletClient, APIPrefix: KubeAPIPrefix, EnableCoreControllers: true, MasterCount: options.KubernetesMasterConfig.MasterCount, Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admissionController, EnableV1Beta3: configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1beta3"), DisableV1: !configapi.HasKubernetesAPILevel(*options.KubernetesMasterConfig, "v1"), } kmaster := &MasterConfig{ Options: *options.KubernetesMasterConfig, KubeClient: kubeClient, Master: m, ControllerManager: cmserver, CloudProvider: cloud, } return kmaster, nil }