func main() { config := HollowNodeConfig{} config.addFlags(pflag.CommandLine) flag.InitFlags() if !knownMorphs.Has(config.Morph) { glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List()) } // create a client to communicate with API server. clientConfig, err := config.createClientConfigFromFile() if err != nil { glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err) } cl, err := client.New(clientConfig) if err != nil { glog.Fatalf("Failed to create a Client: %v. Exiting.", err) } clientset, err := internalclientset.NewForConfig(clientConfig) if err != nil { glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err) } if config.Morph == "kubelet" { cadvisorInterface := new(cadvisortest.Fake) containerManager := cm.NewStubContainerManager() fakeDockerClient := dockertools.NewFakeDockerClient() fakeDockerClient.EnableSleep = true hollowKubelet := kubemark.NewHollowKubelet( config.NodeName, clientset, cadvisorInterface, fakeDockerClient, config.KubeletPort, config.KubeletReadOnlyPort, containerManager, maxPods, podsPerCore, ) hollowKubelet.Run() } if config.Morph == "proxy" { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName}) iptInterface := fakeiptables.NewFake() serviceConfig := proxyconfig.NewServiceConfig() serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder) hollowProxy.Run() } }
// RunProxy starts the proxy func (c *NodeConfig) RunProxy() { // initialize kube proxy serviceConfig := pconfig.NewServiceConfig() endpointsConfig := pconfig.NewEndpointsConfig() host, _, err := net.SplitHostPort(c.BindAddress) if err != nil { glog.Fatalf("The provided value to bind to must be an ip:port %q", c.BindAddress) } ip := net.ParseIP(host) if ip == nil { glog.Fatalf("The provided value to bind to must be an ip:port: %q", c.BindAddress) } protocol := iptables.ProtocolIpv4 if ip.To4() == nil { protocol = iptables.ProtocolIpv6 } syncPeriod, err := time.ParseDuration(c.IPTablesSyncPeriod) if err != nil { glog.Fatalf("Cannot parse the provided ip-tables sync period (%s) : %v", c.IPTablesSyncPeriod, err) } eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(c.Client.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "kube-proxy", Host: c.KubeletConfig.NodeName}) nodeRef := &kapi.ObjectReference{ Kind: "Node", Name: c.KubeletConfig.NodeName, } exec := kexec.New() dbus := utildbus.New() iptables := iptables.New(exec, dbus, protocol) proxier, err := proxy.NewProxier(iptables, exec, syncPeriod, false) if err != nil { // This should be fatal, but that would break the integration tests glog.Warningf("WARNING: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err) return } iptables.AddReloadFunc(proxier.Sync) pconfig.NewSourceAPI( c.Client, 10*time.Minute, serviceConfig.Channel("api"), endpointsConfig.Channel("api")) serviceConfig.RegisterHandler(proxier) if c.FilteringEndpointsHandler == nil { endpointsConfig.RegisterHandler(proxier) } else { c.FilteringEndpointsHandler.SetBaseEndpointsHandler(proxier) endpointsConfig.RegisterHandler(c.FilteringEndpointsHandler) } recorder.Eventf(nodeRef, kapi.EventTypeNormal, "Starting", "Starting kube-proxy.") glog.Infof("Started Kubernetes Proxy on %s", host) }
// RunProxy starts the proxy func (c *NodeConfig) RunProxy() { // initialize kube proxy serviceConfig := pconfig.NewServiceConfig() endpointsConfig := pconfig.NewEndpointsConfig() loadBalancer := proxy.NewLoadBalancerRR() endpointsConfig.RegisterHandler(loadBalancer) host, _, err := net.SplitHostPort(c.BindAddress) if err != nil { glog.Fatalf("The provided value to bind to must be an ip:port %q", c.BindAddress) } ip := net.ParseIP(host) if ip == nil { glog.Fatalf("The provided value to bind to must be an ip:port: %q", c.BindAddress) } protocol := iptables.ProtocolIpv4 if ip.To4() == nil { protocol = iptables.ProtocolIpv6 } syncPeriod, err := time.ParseDuration(c.IPTablesSyncPeriod) if err != nil { glog.Fatalf("Cannot parse the provided ip-tables sync period (%s) : %v", c.IPTablesSyncPeriod, err) } go util.Forever(func() { proxier, err := proxy.NewProxier(loadBalancer, ip, iptables.New(kexec.New(), protocol), util.PortRange{}, syncPeriod) if err != nil { switch { // conflicting use of iptables, retry case proxy.IsProxyLocked(err): glog.Errorf("Unable to start proxy, will retry: %v", err) return // on a system without iptables case strings.Contains(err.Error(), "executable file not found in path"): glog.V(4).Infof("kube-proxy initialization error: %v", err) glog.Warningf("WARNING: Could not find the iptables command. The service proxy requires iptables and will be disabled.") case err == proxy.ErrProxyOnLocalhost: glog.Warningf("WARNING: The service proxy cannot bind to localhost and will be disabled.") case strings.Contains(err.Error(), "you must be root"): glog.Warningf("WARNING: Could not modify iptables. You must run this process as root to use the service proxy.") default: glog.Warningf("WARNING: Could not modify iptables. You must run this process as root to use the service proxy: %v", err) } select {} } pconfig.NewSourceAPI( c.Client, 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api")) serviceConfig.RegisterHandler(proxier) glog.Infof("Started Kubernetes Proxy on %s", host) select {} }, 5*time.Second) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) config := HollowNodeConfig{} config.addFlags(pflag.CommandLine) util.InitFlags() if !knownMorphs.Has(config.Morph) { glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List()) } // create a client to communicate with API server. cl, err := createClientFromFile(config.KubeconfigPath) clientset := clientset.FromUnversionedClient(cl) if err != nil { glog.Fatal("Failed to create a Client. Exiting.") } if config.Morph == "kubelet" { cadvisorInterface := new(cadvisortest.Fake) containerManager := cm.NewStubContainerManager() fakeDockerClient := dockertools.NewFakeDockerClient() fakeDockerClient.VersionInfo = docker.Env{"Version=1.1.3", "ApiVersion=1.18"} fakeDockerClient.EnableSleep = true hollowKubelet := kubemark.NewHollowKubelet( config.NodeName, clientset, cadvisorInterface, fakeDockerClient, config.KubeletPort, config.KubeletReadOnlyPort, containerManager, maxPods, ) hollowKubelet.Run() } if config.Morph == "proxy" { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName}) iptInterface := fakeiptables.NewFake() serviceConfig := proxyconfig.NewServiceConfig() serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder) hollowProxy.Run() } }
// NewProxyServerDefault creates a new ProxyServer object with default parameters. func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, error) { if c, err := configz.New("componentconfig"); err == nil { c.Set(config.KubeProxyConfiguration) } else { glog.Errorf("unable to register configz: %s", err) } protocol := utiliptables.ProtocolIpv4 if net.ParseIP(config.BindAddress).To4() == nil { protocol = utiliptables.ProtocolIpv6 } // Create a iptables utils. execer := exec.New() dbus := utildbus.New() iptInterface := utiliptables.New(execer, dbus, protocol) // We omit creation of pretty much everything if we run in cleanup mode if config.CleanupAndExit { return &ProxyServer{ Config: config, IptInterface: iptInterface, }, nil } // TODO(vmarmol): Use container config for this. var oomAdjuster *oom.OOMAdjuster if config.OOMScoreAdj != nil { oomAdjuster = oom.NewOOMAdjuster() if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*config.OOMScoreAdj)); err != nil { glog.V(2).Info(err) } } if config.ResourceContainer != "" { // Run in its own container. if err := resourcecontainer.RunInResourceContainer(config.ResourceContainer); err != nil { glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err) } else { glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer) } } // Create a Kube Client // define api config source if config.Kubeconfig == "" && config.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig() if err != nil { return nil, err } kubeconfig.ContentType = config.ContentType // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = config.KubeAPIQPS kubeconfig.Burst = int(config.KubeAPIBurst) client, err := clientset.NewForConfig(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } // Create event recorder hostname := nodeutil.GetHostname(config.HostnameOverride) eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname}) var proxier proxy.ProxyProvider var endpointsHandler proxyconfig.EndpointsConfigHandler proxyMode := getProxyMode(string(config.Mode), client.Core().Nodes(), hostname, iptInterface, iptables.LinuxKernelCompatTester{}) if proxyMode == proxyModeIPTables { glog.V(0).Info("Using iptables Proxier.") if config.IPTablesMasqueradeBit == nil { // IPTablesMasqueradeBit must be specified or defaulted. return nil, fmt.Errorf("Unable to read IPTablesMasqueradeBit from config") } proxierIPTables, err := iptables.NewProxier(iptInterface, utilsysctl.New(), execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll, int(*config.IPTablesMasqueradeBit), config.ClusterCIDR, hostname, getNodeIP(client, hostname)) if err != nil { glog.Fatalf("Unable to create proxier: %v", err) } proxier = proxierIPTables endpointsHandler = proxierIPTables // No turning back. Remove artifacts that might still exist from the userspace Proxier. glog.V(0).Info("Tearing down userspace rules.") userspace.CleanupLeftovers(iptInterface) } else { glog.V(0).Info("Using userspace Proxier.") // This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for // our config.EndpointsConfigHandler. loadBalancer := userspace.NewLoadBalancerRR() // set EndpointsConfigHandler to our loadBalancer endpointsHandler = loadBalancer proxierUserspace, err := userspace.NewProxier( loadBalancer, net.ParseIP(config.BindAddress), iptInterface, *utilnet.ParsePortRangeOrDie(config.PortRange), config.IPTablesSyncPeriod.Duration, config.UDPIdleTimeout.Duration, ) if err != nil { glog.Fatalf("Unable to create proxier: %v", err) } proxier = proxierUserspace // Remove artifacts from the pure-iptables Proxier. glog.V(0).Info("Tearing down pure-iptables proxy rules.") iptables.CleanupLeftovers(iptInterface) } iptInterface.AddReloadFunc(proxier.Sync) // Create configs (i.e. Watches for Services and Endpoints) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. serviceConfig := proxyconfig.NewServiceConfig() serviceConfig.RegisterHandler(proxier) endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig.RegisterHandler(endpointsHandler) proxyconfig.NewSourceAPI( client.Core().RESTClient(), config.ConfigSyncPeriod, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) config.NodeRef = &api.ObjectReference{ Kind: "Node", Name: hostname, UID: types.UID(hostname), Namespace: "", } conntracker := realConntracker{} return NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker, proxyMode) }
// Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set). func (s *ProxyServer) Run(_ []string) error { protocol := utiliptables.ProtocolIpv4 if s.BindAddress.To4() == nil { protocol = utiliptables.ProtocolIpv6 } // remove iptables rules and exit if s.CleanupAndExit { execer := exec.New() ipt := utiliptables.New(execer, protocol) encounteredError := userspace.CleanupLeftovers(ipt) encounteredError = iptables.CleanupLeftovers(ipt) || encounteredError if encounteredError { return errors.New("Encountered an error while tearing down rules.") } return nil } // TODO(vmarmol): Use container config for this. oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.V(2).Info(err) } // Run in its own container. if err := util.RunInResourceContainer(s.ResourceContainer); err != nil { glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err) } else { glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer) } // define api config source if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } client, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } // Add event recorder Hostname := nodeutil.GetHostname(s.HostnameOverride) eventBroadcaster := record.NewBroadcaster() s.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: Hostname}) eventBroadcaster.StartRecordingToSink(client.Events("")) s.nodeRef = &api.ObjectReference{ Kind: "Node", Name: Hostname, UID: types.UID(Hostname), Namespace: "", } // Birth Cry s.birthCry() serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() var proxier proxy.ProxyProvider var endpointsHandler config.EndpointsConfigHandler // guaranteed false on error, error only necessary for debugging shouldUseIptables, err := iptables.ShouldUseIptablesProxier() if err != nil { glog.Errorf("Can't determine whether to use iptables or userspace, using userspace proxier: %v", err) } if !s.ForceUserspaceProxy && shouldUseIptables { glog.V(2).Info("Using iptables Proxier.") execer := exec.New() ipt := utiliptables.New(execer, protocol) proxierIptables, err := iptables.NewProxier(ipt, execer, s.SyncPeriod, s.MasqueradeAll) if err != nil { glog.Fatalf("Unable to create proxier: %v", err) } proxier = proxierIptables endpointsHandler = proxierIptables // No turning back. Remove artifacts that might still exist from the userspace Proxier. glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.") userspace.CleanupLeftovers(ipt) } else { glog.V(2).Info("Using userspace Proxier.") // This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for // our config.EndpointsConfigHandler. loadBalancer := userspace.NewLoadBalancerRR() // set EndpointsConfigHandler to our loadBalancer endpointsHandler = loadBalancer execer := exec.New() ipt := utiliptables.New(execer, protocol) proxierUserspace, err := userspace.NewProxier(loadBalancer, s.BindAddress, ipt, s.PortRange, s.SyncPeriod) if err != nil { glog.Fatalf("Unable to create proxer: %v", err) } proxier = proxierUserspace // Remove artifacts from the pure-iptables Proxier. glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.") iptables.CleanupLeftovers(ipt) } // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire endpointsHandler to handle changes to endpoints to services endpointsConfig.RegisterHandler(endpointsHandler) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. config.NewSourceAPI( client, 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) if s.HealthzPort > 0 { go util.Until(func() { err := http.ListenAndServe(s.HealthzBindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second, util.NeverStop) } // Just loop forever for now... proxier.SyncLoop() return nil }
// NewProxyServerDefault creates a new ProxyServer object with default parameters. func NewProxyServerDefault(config *ProxyServerConfig) (*ProxyServer, error) { protocol := utiliptables.ProtocolIpv4 if config.BindAddress.To4() == nil { protocol = utiliptables.ProtocolIpv6 } // Create a iptables utils. execer := exec.New() dbus := utildbus.New() iptInterface := utiliptables.New(execer, dbus, protocol) // We ommit creation of pretty much everything if we run in cleanup mode if config.CleanupAndExit { return &ProxyServer{ Config: config, IptInterface: iptInterface, }, nil } // TODO(vmarmol): Use container config for this. var oomAdjuster *oom.OOMAdjuster if config.OOMScoreAdj != 0 { oomAdjuster = oom.NewOOMAdjuster() if err := oomAdjuster.ApplyOOMScoreAdj(0, config.OOMScoreAdj); err != nil { glog.V(2).Info(err) } } if config.ResourceContainer != "" { // Run in its own container. if err := util.RunInResourceContainer(config.ResourceContainer); err != nil { glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err) } else { glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer) } } // Create a Kube Client // define api config source if config.Kubeconfig == "" && config.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig() if err != nil { return nil, err } // Override kubeconfig qps/burst settings from flags kubeconfig.QPS = config.KubeApiQps kubeconfig.Burst = config.KubeApiBurst client, err := kubeclient.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } // Create event recorder hostname := nodeutil.GetHostname(config.HostnameOverride) eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname}) eventBroadcaster.StartRecordingToSink(client.Events("")) var proxier proxy.ProxyProvider var endpointsHandler proxyconfig.EndpointsConfigHandler useIptablesProxy := false if mayTryIptablesProxy(config.ProxyMode, client.Nodes(), hostname) { var err error // guaranteed false on error, error only necessary for debugging useIptablesProxy, err = iptables.ShouldUseIptablesProxier() if err != nil { glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err) } } if useIptablesProxy { glog.V(2).Info("Using iptables Proxier.") proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IptablesSyncPeriod, config.MasqueradeAll) if err != nil { glog.Fatalf("Unable to create proxier: %v", err) } proxier = proxierIptables endpointsHandler = proxierIptables // No turning back. Remove artifacts that might still exist from the userspace Proxier. glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.") userspace.CleanupLeftovers(iptInterface) } else { glog.V(2).Info("Using userspace Proxier.") // This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for // our config.EndpointsConfigHandler. loadBalancer := userspace.NewLoadBalancerRR() // set EndpointsConfigHandler to our loadBalancer endpointsHandler = loadBalancer proxierUserspace, err := userspace.NewProxier(loadBalancer, config.BindAddress, iptInterface, config.PortRange, config.IptablesSyncPeriod) if err != nil { glog.Fatalf("Unable to create proxier: %v", err) } proxier = proxierUserspace // Remove artifacts from the pure-iptables Proxier. glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.") iptables.CleanupLeftovers(iptInterface) } iptInterface.AddReloadFunc(proxier.Sync) // Create configs (i.e. Watches for Services and Endpoints) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. serviceConfig := proxyconfig.NewServiceConfig() serviceConfig.RegisterHandler(proxier) endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig.RegisterHandler(endpointsHandler) proxyconfig.NewSourceAPI( client, config.ConfigSyncPeriod, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) config.nodeRef = &api.ObjectReference{ Kind: "Node", Name: hostname, UID: types.UID(hostname), Namespace: "", } return NewProxyServer(config, iptInterface, proxier, recorder) }
// Run runs the specified ProxyServer. This should never exit. func (s *ProxyServer) Run(_ []string) error { // TODO(vmarmol): Use container config for this. oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.V(2).Info(err) } // Run in its own container. if err := util.RunInResourceContainer(s.ResourceContainer); err != nil { glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err) } else { glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer) } serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() protocol := iptables.ProtocolIpv4 if s.BindAddress.To4() == nil { protocol = iptables.ProtocolIpv6 } loadBalancer := userspace.NewLoadBalancerRR() proxier, err := userspace.NewProxier(loadBalancer, s.BindAddress, iptables.New(exec.New(), protocol), s.PortRange) if err != nil { glog.Fatalf("Unable to create proxer: %v", err) } // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire loadBalancer to handle changes to endpoints to services endpointsConfig.RegisterHandler(loadBalancer) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. // define api config source if s.Kubeconfig == "" && s.Master == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig // file, and then overriding the Master flag, if non-empty. kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() if err != nil { return err } client, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } config.NewSourceAPI( client, 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) if s.HealthzPort > 0 { go util.Forever(func() { err := http.ListenAndServe(s.HealthzBindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } // Just loop forever for now... proxier.SyncLoop() return nil }
// RunProxy starts the proxy func (c *NodeConfig) RunProxy() { protocol := utiliptables.ProtocolIpv4 bindAddr := net.ParseIP(c.ProxyConfig.BindAddress) if bindAddr.To4() == nil { protocol = utiliptables.ProtocolIpv6 } portRange := utilnet.ParsePortRangeOrDie(c.ProxyConfig.PortRange) eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(c.Client.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "kube-proxy", Host: c.KubeletConfig.NodeName}) execer := kexec.New() dbus := utildbus.New() iptInterface := utiliptables.New(execer, dbus, protocol) var proxier proxy.ProxyProvider var endpointsHandler pconfig.EndpointsConfigHandler switch c.ProxyConfig.Mode { case componentconfig.ProxyModeIPTables: glog.V(0).Info("Using iptables Proxier.") if c.ProxyConfig.IPTablesMasqueradeBit == nil { // IPTablesMasqueradeBit must be specified or defaulted. glog.Fatalf("Unable to read IPTablesMasqueradeBit from config") } proxierIptables, err := iptables.NewProxier(iptInterface, execer, c.ProxyConfig.IPTablesSyncPeriod.Duration, c.ProxyConfig.MasqueradeAll, int(*c.ProxyConfig.IPTablesMasqueradeBit), c.ProxyConfig.ClusterCIDR) if err != nil { if c.Containerized { glog.Fatalf("error: Could not initialize Kubernetes Proxy: %v\n When running in a container, you must run the container in the host network namespace with --net=host and with --privileged", err) } else { glog.Fatalf("error: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err) } } proxier = proxierIptables endpointsHandler = proxierIptables // No turning back. Remove artifacts that might still exist from the userspace Proxier. glog.V(0).Info("Tearing down userspace rules.") userspace.CleanupLeftovers(iptInterface) case componentconfig.ProxyModeUserspace: glog.V(0).Info("Using userspace Proxier.") // This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for // our config.EndpointsConfigHandler. loadBalancer := userspace.NewLoadBalancerRR() // set EndpointsConfigHandler to our loadBalancer endpointsHandler = loadBalancer proxierUserspace, err := userspace.NewProxier( loadBalancer, bindAddr, iptInterface, *portRange, c.ProxyConfig.IPTablesSyncPeriod.Duration, c.ProxyConfig.UDPIdleTimeout.Duration, ) if err != nil { if c.Containerized { glog.Fatalf("error: Could not initialize Kubernetes Proxy: %v\n When running in a container, you must run the container in the host network namespace with --net=host and with --privileged", err) } else { glog.Fatalf("error: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err) } } proxier = proxierUserspace // Remove artifacts from the pure-iptables Proxier. glog.V(0).Info("Tearing down pure-iptables proxy rules.") iptables.CleanupLeftovers(iptInterface) default: glog.Fatalf("Unknown proxy mode %q", c.ProxyConfig.Mode) } // Create configs (i.e. Watches for Services and Endpoints) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. serviceConfig := pconfig.NewServiceConfig() if c.EnableUnidling { unidlingLoadBalancer := ouserspace.NewLoadBalancerRR() signaler := unidler.NewEventSignaler(recorder) unidlingUserspaceProxy, err := unidler.NewUnidlerProxier(unidlingLoadBalancer, bindAddr, iptInterface, execer, *portRange, c.ProxyConfig.IPTablesSyncPeriod.Duration, c.ProxyConfig.UDPIdleTimeout.Duration, signaler) if err != nil { if c.Containerized { glog.Fatalf("error: Could not initialize Kubernetes Proxy: %v\n When running in a container, you must run the container in the host network namespace with --net=host and with --privileged", err) } else { glog.Fatalf("error: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err) } } hybridProxier, err := hybrid.NewHybridProxier(unidlingLoadBalancer, unidlingUserspaceProxy, endpointsHandler, proxier, c.ProxyConfig.IPTablesSyncPeriod.Duration, serviceConfig) if err != nil { if c.Containerized { glog.Fatalf("error: Could not initialize Kubernetes Proxy: %v\n When running in a container, you must run the container in the host network namespace with --net=host and with --privileged", err) } else { glog.Fatalf("error: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err) } } endpointsHandler = hybridProxier iptInterface.AddReloadFunc(hybridProxier.Sync) serviceConfig.RegisterHandler(hybridProxier) } endpointsConfig := pconfig.NewEndpointsConfig() // customized handling registration that inserts a filter if needed if c.FilteringEndpointsHandler != nil { if err := c.FilteringEndpointsHandler.Start(endpointsHandler); err != nil { glog.Fatalf("error: node proxy plugin startup failed: %v", err) } endpointsHandler = c.FilteringEndpointsHandler } endpointsConfig.RegisterHandler(endpointsHandler) c.ServiceStore = pconfig.NewServiceStore(c.ServiceStore, serviceConfig.Channel("api")) c.EndpointsStore = pconfig.NewEndpointsStore(c.EndpointsStore, endpointsConfig.Channel("api")) // will be started by RunServiceStores recorder.Eventf(c.ProxyConfig.NodeRef, kapi.EventTypeNormal, "Starting", "Starting kube-proxy.") // periodically sync k8s iptables rules go utilwait.Forever(proxier.SyncLoop, 0) glog.Infof("Started Kubernetes Proxy on %s", c.ProxyConfig.BindAddress) }
// RunProxy starts the proxy func (c *NodeConfig) RunProxy() { protocol := utiliptables.ProtocolIpv4 bindAddr := net.ParseIP(c.ProxyConfig.BindAddress) if bindAddr.To4() == nil { protocol = utiliptables.ProtocolIpv6 } portRange := utilnet.ParsePortRangeOrDie(c.ProxyConfig.PortRange) eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(c.Client.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "kube-proxy", Host: c.KubeletConfig.NodeName}) exec := kexec.New() dbus := utildbus.New() iptInterface := utiliptables.New(exec, dbus, protocol) var proxier proxy.ProxyProvider var endpointsHandler pconfig.EndpointsConfigHandler switch c.ProxyConfig.Mode { case "iptables": glog.V(0).Info("Using iptables Proxier.") proxierIptables, err := iptables.NewProxier(iptInterface, exec, c.ProxyConfig.IPTablesSyncPeriod.Duration, c.ProxyConfig.MasqueradeAll, *c.ProxyConfig.IPTablesMasqueradeBit) if err != nil { if c.Containerized { glog.Fatalf("error: Could not initialize Kubernetes Proxy: %v\n When running in a container, you must run the container in the host network namespace with --net=host and with --privileged", err) } else { glog.Fatalf("error: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err) } } proxier = proxierIptables endpointsHandler = proxierIptables // No turning back. Remove artifacts that might still exist from the userspace Proxier. glog.V(0).Info("Tearing down userspace rules. Errors here are acceptable.") userspace.CleanupLeftovers(iptInterface) case "userspace": glog.V(0).Info("Using userspace Proxier.") loadBalancer := userspace.NewLoadBalancerRR() endpointsHandler = loadBalancer proxierUserspace, err := userspace.NewProxier(loadBalancer, bindAddr, iptInterface, *portRange, c.ProxyConfig.IPTablesSyncPeriod.Duration, c.ProxyConfig.UDPIdleTimeout.Duration) if err != nil { if c.Containerized { glog.Fatalf("error: Could not initialize Kubernetes Proxy: %v\n When running in a container, you must run the container in the host network namespace with --net=host and with --privileged", err) } else { glog.Fatalf("error: Could not initialize Kubernetes Proxy. You must run this process as root to use the service proxy: %v", err) } } proxier = proxierUserspace // Remove artifacts from the pure-iptables Proxier. glog.V(0).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.") iptables.CleanupLeftovers(iptInterface) default: glog.Fatalf("Unknown proxy mode %q", c.ProxyConfig.Mode) } iptInterface.AddReloadFunc(proxier.Sync) // Create configs (i.e. Watches for Services and Endpoints) // Note: RegisterHandler() calls need to happen before creation of Sources because sources // only notify on changes, and the initial update (on process start) may be lost if no handlers // are registered yet. serviceConfig := pconfig.NewServiceConfig() serviceConfig.RegisterHandler(proxier) endpointsConfig := pconfig.NewEndpointsConfig() if c.FilteringEndpointsHandler == nil { endpointsConfig.RegisterHandler(endpointsHandler) } else { c.FilteringEndpointsHandler.SetBaseEndpointsHandler(endpointsHandler) endpointsConfig.RegisterHandler(c.FilteringEndpointsHandler) } pconfig.NewSourceAPI( c.Client, c.ProxyConfig.ConfigSyncPeriod, serviceConfig.Channel("api"), endpointsConfig.Channel("api")) recorder.Eventf(c.ProxyConfig.NodeRef, kapi.EventTypeNormal, "Starting", "Starting kube-proxy.") glog.Infof("Started Kubernetes Proxy on %s", c.ProxyConfig.BindAddress) }
// RunProxy starts the proxy func (c *NodeConfig) RunProxy(endpointsFilterer FilteringEndpointsConfigHandler) { // initialize kube proxy serviceConfig := pconfig.NewServiceConfig() endpointsConfig := pconfig.NewEndpointsConfig() loadBalancer := proxy.NewLoadBalancerRR() if endpointsFilterer == nil { endpointsConfig.RegisterHandler(loadBalancer) } else { endpointsFilterer.SetBaseEndpointsHandler(loadBalancer) endpointsConfig.RegisterHandler(endpointsFilterer) } host, _, err := net.SplitHostPort(c.BindAddress) if err != nil { glog.Fatalf("The provided value to bind to must be an ip:port %q", c.BindAddress) } ip := net.ParseIP(host) if ip == nil { glog.Fatalf("The provided value to bind to must be an ip:port: %q", c.BindAddress) } protocol := iptables.ProtocolIpv4 if ip.To4() == nil { protocol = iptables.ProtocolIpv6 } syncPeriod, err := time.ParseDuration(c.IPTablesSyncPeriod) if err != nil { glog.Fatalf("Cannot parse the provided ip-tables sync period (%s) : %v", c.IPTablesSyncPeriod, err) } eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(c.Client.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "kube-proxy", Host: c.KubeletConfig.NodeName}) nodeRef := &kapi.ObjectReference{ Kind: "Node", Name: c.KubeletConfig.NodeName, } go util.Forever(func() { dbus := utildbus.New() iptables := iptables.New(kexec.New(), dbus, protocol) proxier, err := proxy.NewProxier(loadBalancer, ip, iptables, util.PortRange{}, syncPeriod) iptables.AddReloadFunc(proxier.Sync) if err != nil { switch { // conflicting use of iptables, retry case proxy.IsProxyLocked(err): glog.Errorf("Unable to start proxy, will retry: %v", err) return // on a system without iptables case strings.Contains(err.Error(), "executable file not found in path"): glog.V(4).Infof("kube-proxy initialization error: %v", err) glog.Warningf("WARNING: Could not find the iptables command. The service proxy requires iptables and will be disabled.") case err == proxy.ErrProxyOnLocalhost: glog.Warningf("WARNING: The service proxy cannot bind to localhost and will be disabled.") case strings.Contains(err.Error(), "you must be root"): glog.Warningf("WARNING: Could not modify iptables. You must run this process as root to use the service proxy.") default: glog.Warningf("WARNING: Could not modify iptables. You must run this process as root to use the service proxy: %v", err) } select {} } pconfig.NewSourceAPI( c.Client, 10*time.Minute, serviceConfig.Channel("api"), endpointsConfig.Channel("api")) serviceConfig.RegisterHandler(proxier) recorder.Eventf(nodeRef, "Starting", "Starting kube-proxy.") glog.Infof("Started Kubernetes Proxy on %s", host) select {} }, 5*time.Second) }