func ensureProcessInContainer(pid int, oomScoreAdj int, manager *fs.Manager) error {
	if runningInHost, err := isProcessRunningInHost(pid); err != nil {
		// Err on the side of caution. Avoid moving the docker daemon unless we are able to identify its context.
		return err
	} else if !runningInHost {
		// Process is running inside a container. Don't touch that.
		return nil
	}

	var errs []error
	cont, err := getContainer(pid)
	if err != nil {
		errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err))
	}

	if cont != manager.Cgroups.Name {
		err = manager.Apply(pid)
		if err != nil {
			errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q", pid, cont, manager.Cgroups.Name))
		}
	}

	// Also apply oom-score-adj to processes
	oomAdjuster := oom.NewOOMAdjuster()
	if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
		errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid))
	}
	return utilerrors.NewAggregate(errs)
}
// Ensures that the Docker daemon is in the desired container.
func ensureDockerInContainer(cadvisor cadvisor.Interface, oomScoreAdj int, manager *fs.Manager) error {
	// What container is Docker in?
	out, err := exec.Command("pidof", "docker").Output()
	if err != nil {
		return fmt.Errorf("failed to find pid of Docker container: %v", err)
	}

	// The output of pidof is a list of pids.
	// Docker may be forking and thus there would be more than one result.
	pids := []int{}
	for _, pidStr := range strings.Split(strings.TrimSpace(string(out)), " ") {
		pid, err := strconv.Atoi(pidStr)
		if err != nil {
			continue
		}
		pids = append(pids, pid)
	}

	// Move if the pid is not already in the desired container.
	errs := []error{}
	for _, pid := range pids {
		if runningInHost, err := isProcessRunningInHost(pid); err != nil {
			errs = append(errs, err)
			// Err on the side of caution. Avoid moving the docker daemon unless we are able to identify its context.
			continue
		} else if !runningInHost {
			// Docker daemon is running inside a container. Don't touch that.
			continue
		}

		cont, err := getContainer(pid)
		if err != nil {
			errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err))
		}

		if cont != manager.Cgroups.Name {
			err = manager.Apply(pid)
			if err != nil {
				errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q", pid, cont, manager.Cgroups.Name))
			}
		}

		// Also apply oom-score-adj to processes
		oomAdjuster := oom.NewOOMAdjuster()
		if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
			errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid))
		}
	}

	return utilerrors.NewAggregate(errs)
}
示例#3
0
// UnsecuredKubeletDeps returns a KubeletDeps suitable for being run, or an error if the server setup
// is not valid.  It will not start any background processes, and does not include authentication/authorization
func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error) {

	// Initialize the TLS Options
	tlsOptions, err := InitializeTLS(&s.KubeletConfiguration)
	if err != nil {
		return nil, err
	}

	mounter := mount.New(s.ExperimentalMounterPath)
	var writer kubeio.Writer = &kubeio.StdWriter{}
	if s.Containerized {
		glog.V(2).Info("Running kubelet in containerized mode (experimental)")
		mounter = mount.NewNsenterMounter()
		writer = &kubeio.NsenterWriter{}
	}

	var dockerClient dockertools.DockerInterface
	if s.ContainerRuntime == "docker" {
		dockerClient = dockertools.ConnectToDockerOrDie(s.DockerEndpoint, s.RuntimeRequestTimeout.Duration,
			s.ImagePullProgressDeadline.Duration)
	} else {
		dockerClient = nil
	}

	return &kubelet.KubeletDeps{
		Auth:               nil, // default does not enforce auth[nz]
		CAdvisorInterface:  nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here
		Cloud:              nil, // cloud provider might start background processes
		ContainerManager:   nil,
		DockerClient:       dockerClient,
		KubeClient:         nil,
		ExternalKubeClient: nil,
		Mounter:            mounter,
		NetworkPlugins:     ProbeNetworkPlugins(s.NetworkPluginDir, s.CNIConfDir, s.CNIBinDir),
		OOMAdjuster:        oom.NewOOMAdjuster(),
		OSInterface:        kubecontainer.RealOS{},
		Writer:             writer,
		VolumePlugins:      ProbeVolumePlugins(s.VolumePluginDir),
		TLSOptions:         tlsOptions,
	}, nil
}
// Ensures that the Docker daemon is in the desired container.
func ensureDockerInContainer(cadvisor cadvisor.Interface, oomScoreAdj int, manager *fs.Manager) error {
	pids, err := getPidsForProcess("docker")
	if err != nil {
		return err
	}
	// Move if the pid is not already in the desired container.
	errs := []error{}
	for _, pid := range pids {
		if runningInHost, err := isProcessRunningInHost(pid); err != nil {
			errs = append(errs, err)
			// Err on the side of caution. Avoid moving the docker daemon unless we are able to identify its context.
			continue
		} else if !runningInHost {
			// Docker daemon is running inside a container. Don't touch that.
			continue
		}

		cont, err := getContainer(pid)
		if err != nil {
			errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err))
		}

		if cont != manager.Cgroups.Name {
			err = manager.Apply(pid)
			if err != nil {
				errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q", pid, cont, manager.Cgroups.Name))
			}
		}

		// Also apply oom-score-adj to processes
		oomAdjuster := oom.NewOOMAdjuster()
		if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
			errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid))
		}
	}

	return utilerrors.NewAggregate(errs)
}
示例#5
0
// Run runs the specified KubeletExecutorServer.
func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error {
	rand.Seed(time.Now().UTC().UnixNano())

	oomAdjuster := oom.NewOOMAdjuster()
	if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil {
		log.Info(err)
	}

	// empty string for the docker and system containers (= cgroup paths). This
	// stops the kubelet taking any control over other system processes.
	s.SystemContainer = ""
	s.DockerDaemonContainer = ""

	// create apiserver client
	var apiclient *client.Client
	clientConfig, err := s.CreateAPIServerClientConfig()
	if err == nil {
		apiclient, err = client.New(clientConfig)
	}
	if err != nil {
		// required for k8sm since we need to send api.Binding information
		// back to the apiserver
		log.Fatalf("No API client: %v", err)
	}

	log.Infof("Using root directory: %v", s.RootDirectory)
	credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)

	cAdvisorInterface, err := cadvisor.New(s.CAdvisorPort)
	if err != nil {
		return err
	}

	imageGCPolicy := kubelet.ImageGCPolicy{
		HighThresholdPercent: s.ImageGCHighThresholdPercent,
		LowThresholdPercent:  s.ImageGCLowThresholdPercent,
	}

	diskSpacePolicy := kubelet.DiskSpacePolicy{
		DockerFreeDiskMB: s.LowDiskSpaceThresholdMB,
		RootFreeDiskMB:   s.LowDiskSpaceThresholdMB,
	}

	//TODO(jdef) intentionally NOT initializing a cloud provider here since:
	//(a) the kubelet doesn't actually use it
	//(b) we don't need to create N-kubelet connections to zookeeper for no good reason
	//cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	//log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)

	hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
	if err != nil {
		return err
	}

	hostPIDSources, err := kubelet.GetValidatedSources(strings.Split(s.HostPIDSources, ","))
	if err != nil {
		return err
	}

	hostIPCSources, err := kubelet.GetValidatedSources(strings.Split(s.HostIPCSources, ","))
	if err != nil {
		return err
	}

	tlsOptions, err := s.InitializeTLS()
	if err != nil {
		return err
	}
	mounter := mount.New()
	if s.Containerized {
		log.V(2).Info("Running kubelet in containerized mode (experimental)")
		mounter = &mount.NsenterMounter{}
	}

	var writer utilio.Writer = &utilio.StdWriter{}
	var dockerExecHandler dockertools.ExecHandler
	switch s.DockerExecHandlerName {
	case "native":
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case "nsenter":
		writer = &utilio.NsenterWriter{}
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	default:
		log.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName)
		dockerExecHandler = &dockertools.NativeExecHandler{}
	}

	manifestURLHeader := make(http.Header)
	if s.ManifestURLHeader != "" {
		pieces := strings.Split(s.ManifestURLHeader, ":")
		if len(pieces) != 2 {
			return fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader)
		}
		manifestURLHeader.Set(pieces[0], pieces[1])
	}

	kcfg := app.KubeletConfig{
		Address:           s.Address,
		AllowPrivileged:   s.AllowPrivileged,
		CAdvisorInterface: cAdvisorInterface,
		CgroupRoot:        s.CgroupRoot,
		Cloud:             nil, // TODO(jdef) Cloud, specifying null here because we don't want all kubelets polling mesos-master; need to account for this in the cloudprovider impl
		ClusterDNS:        s.ClusterDNS,
		ClusterDomain:     s.ClusterDomain,
		// ConfigFile: ""
		ConfigureCBR0:           s.ConfigureCBR0,
		ContainerRuntime:        s.ContainerRuntime,
		CPUCFSQuota:             s.CPUCFSQuota,
		DiskSpacePolicy:         diskSpacePolicy,
		DockerClient:            dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
		DockerDaemonContainer:   s.DockerDaemonContainer,
		DockerExecHandler:       dockerExecHandler,
		EnableDebuggingHandlers: s.EnableDebuggingHandlers,
		EnableServer:            s.EnableServer,
		EventBurst:              s.EventBurst,
		EventRecordQPS:          s.EventRecordQPS,
		FileCheckFrequency:      s.FileCheckFrequency,
		HostnameOverride:        s.HostnameOverride,
		HostNetworkSources:      hostNetworkSources,
		HostPIDSources:          hostPIDSources,
		HostIPCSources:          hostIPCSources,
		// HTTPCheckFrequency
		ImageGCPolicy: imageGCPolicy,
		KubeClient:    apiclient,
		// ManifestURL: ""
		ManifestURLHeader:         manifestURLHeader,
		MasterServiceNamespace:    s.MasterServiceNamespace,
		MaxContainerCount:         s.MaxContainerCount,
		MaxOpenFiles:              s.MaxOpenFiles,
		MaxPerPodContainerCount:   s.MaxPerPodContainerCount,
		MaxPods:                   s.MaxPods,
		MinimumGCAge:              s.MinimumGCAge,
		Mounter:                   mounter,
		NetworkPluginName:         s.NetworkPluginName,
		NetworkPlugins:            app.ProbeNetworkPlugins(s.NetworkPluginDir),
		NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
		OOMAdjuster:               oomAdjuster,
		OSInterface:               kubecontainer.RealOS{},
		PodCIDR:                   s.PodCIDR,
		PodInfraContainerImage:    s.PodInfraContainerImage,
		Port:              s.Port,
		ReadOnlyPort:      s.ReadOnlyPort,
		RegisterNode:      s.RegisterNode,
		RegistryBurst:     s.RegistryBurst,
		RegistryPullQPS:   s.RegistryPullQPS,
		ResolverConfig:    s.ResolverConfig,
		ResourceContainer: s.ResourceContainer,
		RootDirectory:     s.RootDirectory,
		Runonce:           s.RunOnce,
		// StandaloneMode: false
		StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
		SyncFrequency:                  s.SyncFrequency,
		SystemContainer:                s.SystemContainer,
		TLSOptions:                     tlsOptions,
		VolumePlugins:                  app.ProbeVolumePlugins(),
		Writer:                         writer,
	}

	kcfg.NodeName = kcfg.Hostname

	kcfg.Builder = app.KubeletBuilder(func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) {
		return s.createAndInitKubelet(kc, hks, clientConfig)
	})

	err = app.RunKubelet(&kcfg)
	if err != nil {
		return err
	}

	if s.HealthzPort > 0 {
		healthz.DefaultHealthz()
		go util.Until(func() {
			err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil)
			if err != nil {
				log.Errorf("Starting health server failed: %v", err)
			}
		}, 5*time.Second, util.NeverStop)
	}

	// block until executor is shut down or commits shutdown
	select {}
}
示例#6
0
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup
// is not valid.  It will not start any background processes, and does not include authentication/authorization
func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
	hostNetworkSources, err := kubetypes.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
	if err != nil {
		return nil, err
	}

	hostPIDSources, err := kubetypes.GetValidatedSources(strings.Split(s.HostPIDSources, ","))
	if err != nil {
		return nil, err
	}

	hostIPCSources, err := kubetypes.GetValidatedSources(strings.Split(s.HostIPCSources, ","))
	if err != nil {
		return nil, err
	}

	mounter := mount.New()
	var writer io.Writer = &io.StdWriter{}
	if s.Containerized {
		glog.V(2).Info("Running kubelet in containerized mode (experimental)")
		mounter = mount.NewNsenterMounter()
		writer = &io.NsenterWriter{}
	}

	chmodRunner := chmod.New()
	chownRunner := chown.New()

	tlsOptions, err := InitializeTLS(s)
	if err != nil {
		return nil, err
	}

	var dockerExecHandler dockertools.ExecHandler
	switch s.DockerExecHandlerName {
	case "native":
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case "nsenter":
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	default:
		glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName)
		dockerExecHandler = &dockertools.NativeExecHandler{}
	}

	imageGCPolicy := kubelet.ImageGCPolicy{
		HighThresholdPercent: s.ImageGCHighThresholdPercent,
		LowThresholdPercent:  s.ImageGCLowThresholdPercent,
	}

	diskSpacePolicy := kubelet.DiskSpacePolicy{
		DockerFreeDiskMB: s.LowDiskSpaceThresholdMB,
		RootFreeDiskMB:   s.LowDiskSpaceThresholdMB,
	}

	manifestURLHeader := make(http.Header)
	if s.ManifestURLHeader != "" {
		pieces := strings.Split(s.ManifestURLHeader, ":")
		if len(pieces) != 2 {
			return nil, fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader)
		}
		manifestURLHeader.Set(pieces[0], pieces[1])
	}

	reservation, err := parseReservation(s.KubeReserved, s.SystemReserved)
	if err != nil {
		return nil, err
	}

	return &KubeletConfig{
		Address:                   s.Address,
		AllowPrivileged:           s.AllowPrivileged,
		Auth:                      nil, // default does not enforce auth[nz]
		CAdvisorInterface:         nil, // launches background processes, not set here
		CgroupRoot:                s.CgroupRoot,
		Cloud:                     nil, // cloud provider might start background processes
		ClusterDNS:                s.ClusterDNS,
		ClusterDomain:             s.ClusterDomain,
		ConfigFile:                s.Config,
		ConfigureCBR0:             s.ConfigureCBR0,
		ContainerManager:          nil,
		ContainerRuntime:          s.ContainerRuntime,
		CPUCFSQuota:               s.CPUCFSQuota,
		DiskSpacePolicy:           diskSpacePolicy,
		DockerClient:              dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
		DockerDaemonContainer:     s.DockerDaemonContainer,
		DockerExecHandler:         dockerExecHandler,
		EnableDebuggingHandlers:   s.EnableDebuggingHandlers,
		EnableServer:              s.EnableServer,
		EventBurst:                s.EventBurst,
		EventRecordQPS:            s.EventRecordQPS,
		FileCheckFrequency:        s.FileCheckFrequency,
		HostnameOverride:          s.HostnameOverride,
		HostNetworkSources:        hostNetworkSources,
		HostPIDSources:            hostPIDSources,
		HostIPCSources:            hostIPCSources,
		HTTPCheckFrequency:        s.HTTPCheckFrequency,
		ImageGCPolicy:             imageGCPolicy,
		KubeClient:                nil,
		ManifestURL:               s.ManifestURL,
		ManifestURLHeader:         manifestURLHeader,
		MasterServiceNamespace:    s.MasterServiceNamespace,
		MaxContainerCount:         s.MaxContainerCount,
		MaxOpenFiles:              s.MaxOpenFiles,
		MaxPerPodContainerCount:   s.MaxPerPodContainerCount,
		MaxPods:                   s.MaxPods,
		MinimumGCAge:              s.MinimumGCAge,
		Mounter:                   mounter,
		ChownRunner:               chownRunner,
		ChmodRunner:               chmodRunner,
		NetworkPluginName:         s.NetworkPluginName,
		NetworkPlugins:            ProbeNetworkPlugins(s.NetworkPluginDir),
		NodeLabels:                s.NodeLabels,
		NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
		OOMAdjuster:               oom.NewOOMAdjuster(),
		OSInterface:               kubecontainer.RealOS{},
		PodCIDR:                   s.PodCIDR,
		ReconcileCIDR:             s.ReconcileCIDR,
		PodInfraContainerImage:    s.PodInfraContainerImage,
		Port:                           s.Port,
		ReadOnlyPort:                   s.ReadOnlyPort,
		RegisterNode:                   s.RegisterNode,
		RegisterSchedulable:            s.RegisterSchedulable,
		RegistryBurst:                  s.RegistryBurst,
		RegistryPullQPS:                s.RegistryPullQPS,
		ResolverConfig:                 s.ResolverConfig,
		Reservation:                    *reservation,
		ResourceContainer:              s.ResourceContainer,
		RktPath:                        s.RktPath,
		RktStage1Image:                 s.RktStage1Image,
		RootDirectory:                  s.RootDirectory,
		Runonce:                        s.RunOnce,
		SerializeImagePulls:            s.SerializeImagePulls,
		StandaloneMode:                 (len(s.APIServerList) == 0),
		StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
		SyncFrequency:                  s.SyncFrequency,
		SystemContainer:                s.SystemContainer,
		TLSOptions:                     tlsOptions,
		Writer:                         writer,
		VolumePlugins:                  ProbeVolumePlugins(s.VolumePluginDir),
		OutOfDiskTransitionFrequency:   s.OutOfDiskTransitionFrequency,

		ExperimentalFlannelOverlay: s.ExperimentalFlannelOverlay,
		NodeIP: s.NodeIP,
	}, nil
}
示例#7
0
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, error) {
	if c, err := configz.New("componentconfig"); err == nil {
		c.Set(config.KubeProxyConfiguration)
	} else {
		glog.Errorf("unable to register configz: %s", err)
	}
	protocol := utiliptables.ProtocolIpv4
	if net.ParseIP(config.BindAddress).To4() == nil {
		protocol = utiliptables.ProtocolIpv6
	}

	// Create a iptables utils.
	execer := exec.New()
	dbus := utildbus.New()
	iptInterface := utiliptables.New(execer, dbus, protocol)

	// We omit creation of pretty much everything if we run in cleanup mode
	if config.CleanupAndExit {
		return &ProxyServer{
			Config:       config,
			IptInterface: iptInterface,
		}, nil
	}

	// TODO(vmarmol): Use container config for this.
	var oomAdjuster *oom.OOMAdjuster
	if config.OOMScoreAdj != nil {
		oomAdjuster = oom.NewOOMAdjuster()
		if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*config.OOMScoreAdj)); err != nil {
			glog.V(2).Info(err)
		}
	}

	if config.ResourceContainer != "" {
		// Run in its own container.
		if err := resourcecontainer.RunInResourceContainer(config.ResourceContainer); err != nil {
			glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
		} else {
			glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
		}
	}

	// Create a Kube Client
	// define api config source
	if config.Kubeconfig == "" && config.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}
	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
	if err != nil {
		return nil, err
	}

	kubeconfig.ContentType = config.ContentType
	// Override kubeconfig qps/burst settings from flags
	kubeconfig.QPS = config.KubeAPIQPS
	kubeconfig.Burst = int(config.KubeAPIBurst)

	client, err := clientset.NewForConfig(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	// Create event recorder
	hostname := nodeutil.GetHostname(config.HostnameOverride)
	eventBroadcaster := record.NewBroadcaster()
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})

	var proxier proxy.ProxyProvider
	var endpointsHandler proxyconfig.EndpointsConfigHandler

	proxyMode := getProxyMode(string(config.Mode), client.Core().Nodes(), hostname, iptInterface, iptables.LinuxKernelCompatTester{})
	if proxyMode == proxyModeIPTables {
		glog.V(0).Info("Using iptables Proxier.")
		if config.IPTablesMasqueradeBit == nil {
			// IPTablesMasqueradeBit must be specified or defaulted.
			return nil, fmt.Errorf("Unable to read IPTablesMasqueradeBit from config")
		}
		proxierIPTables, err := iptables.NewProxier(iptInterface, utilsysctl.New(), execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll, int(*config.IPTablesMasqueradeBit), config.ClusterCIDR, hostname, getNodeIP(client, hostname))
		if err != nil {
			glog.Fatalf("Unable to create proxier: %v", err)
		}
		proxier = proxierIPTables
		endpointsHandler = proxierIPTables
		// No turning back. Remove artifacts that might still exist from the userspace Proxier.
		glog.V(0).Info("Tearing down userspace rules.")
		userspace.CleanupLeftovers(iptInterface)
	} else {
		glog.V(0).Info("Using userspace Proxier.")
		// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
		// our config.EndpointsConfigHandler.
		loadBalancer := userspace.NewLoadBalancerRR()
		// set EndpointsConfigHandler to our loadBalancer
		endpointsHandler = loadBalancer

		proxierUserspace, err := userspace.NewProxier(
			loadBalancer,
			net.ParseIP(config.BindAddress),
			iptInterface,
			*utilnet.ParsePortRangeOrDie(config.PortRange),
			config.IPTablesSyncPeriod.Duration,
			config.UDPIdleTimeout.Duration,
		)
		if err != nil {
			glog.Fatalf("Unable to create proxier: %v", err)
		}
		proxier = proxierUserspace
		// Remove artifacts from the pure-iptables Proxier.
		glog.V(0).Info("Tearing down pure-iptables proxy rules.")
		iptables.CleanupLeftovers(iptInterface)
	}
	iptInterface.AddReloadFunc(proxier.Sync)

	// Create configs (i.e. Watches for Services and Endpoints)
	// Note: RegisterHandler() calls need to happen before creation of Sources because sources
	// only notify on changes, and the initial update (on process start) may be lost if no handlers
	// are registered yet.
	serviceConfig := proxyconfig.NewServiceConfig()
	serviceConfig.RegisterHandler(proxier)

	endpointsConfig := proxyconfig.NewEndpointsConfig()
	endpointsConfig.RegisterHandler(endpointsHandler)

	proxyconfig.NewSourceAPI(
		client.Core().RESTClient(),
		config.ConfigSyncPeriod,
		serviceConfig.Channel("api"),
		endpointsConfig.Channel("api"),
	)

	config.NodeRef = &api.ObjectReference{
		Kind:      "Node",
		Name:      hostname,
		UID:       types.UID(hostname),
		Namespace: "",
	}

	conntracker := realConntracker{}

	return NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker, proxyMode)
}
示例#8
0
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup
// is not valid.  It will not start any background processes, and does not include authentication/authorization
func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
	hostNetworkSources, err := kubetypes.GetValidatedSources(s.HostNetworkSources)
	if err != nil {
		return nil, err
	}

	hostPIDSources, err := kubetypes.GetValidatedSources(s.HostPIDSources)
	if err != nil {
		return nil, err
	}

	hostIPCSources, err := kubetypes.GetValidatedSources(s.HostIPCSources)
	if err != nil {
		return nil, err
	}

	mounter := mount.New()
	var writer io.Writer = &io.StdWriter{}
	if s.Containerized {
		glog.V(2).Info("Running kubelet in containerized mode (experimental)")
		mounter = mount.NewNsenterMounter()
		writer = &io.NsenterWriter{}
	}

	tlsOptions, err := InitializeTLS(s)
	if err != nil {
		return nil, err
	}

	var dockerExecHandler dockertools.ExecHandler
	switch s.DockerExecHandlerName {
	case "native":
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case "nsenter":
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	default:
		glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName)
		dockerExecHandler = &dockertools.NativeExecHandler{}
	}

	imageGCPolicy := images.ImageGCPolicy{
		MinAge:               s.ImageMinimumGCAge.Duration,
		HighThresholdPercent: int(s.ImageGCHighThresholdPercent),
		LowThresholdPercent:  int(s.ImageGCLowThresholdPercent),
	}

	diskSpacePolicy := kubelet.DiskSpacePolicy{
		DockerFreeDiskMB: int(s.LowDiskSpaceThresholdMB),
		RootFreeDiskMB:   int(s.LowDiskSpaceThresholdMB),
	}

	manifestURLHeader := make(http.Header)
	if s.ManifestURLHeader != "" {
		pieces := strings.Split(s.ManifestURLHeader, ":")
		if len(pieces) != 2 {
			return nil, fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader)
		}
		manifestURLHeader.Set(pieces[0], pieces[1])
	}

	reservation, err := parseReservation(s.KubeReserved, s.SystemReserved)
	if err != nil {
		return nil, err
	}

	thresholds, err := eviction.ParseThresholdConfig(s.EvictionHard, s.EvictionSoft, s.EvictionSoftGracePeriod, s.EvictionMinimumReclaim)
	if err != nil {
		return nil, err
	}
	evictionConfig := eviction.Config{
		PressureTransitionPeriod: s.EvictionPressureTransitionPeriod.Duration,
		MaxPodGracePeriodSeconds: int64(s.EvictionMaxPodGracePeriod),
		Thresholds:               thresholds,
	}

	return &KubeletConfig{
		Address:                      net.ParseIP(s.Address),
		AllowPrivileged:              s.AllowPrivileged,
		Auth:                         nil, // default does not enforce auth[nz]
		CAdvisorInterface:            nil, // launches background processes, not set here
		VolumeStatsAggPeriod:         s.VolumeStatsAggPeriod.Duration,
		CgroupRoot:                   s.CgroupRoot,
		Cloud:                        nil, // cloud provider might start background processes
		ClusterDNS:                   net.ParseIP(s.ClusterDNS),
		ClusterDomain:                s.ClusterDomain,
		ConfigFile:                   s.Config,
		ConfigureCBR0:                s.ConfigureCBR0,
		ContainerManager:             nil,
		ContainerRuntime:             s.ContainerRuntime,
		RuntimeRequestTimeout:        s.RuntimeRequestTimeout.Duration,
		CPUCFSQuota:                  s.CPUCFSQuota,
		DiskSpacePolicy:              diskSpacePolicy,
		DockerClient:                 dockertools.ConnectToDockerOrDie(s.DockerEndpoint, s.RuntimeRequestTimeout.Duration), // TODO(random-liu): Set RuntimeRequestTimeout for rkt.
		RuntimeCgroups:               s.RuntimeCgroups,
		DockerExecHandler:            dockerExecHandler,
		EnableControllerAttachDetach: s.EnableControllerAttachDetach,
		EnableCustomMetrics:          s.EnableCustomMetrics,
		EnableDebuggingHandlers:      s.EnableDebuggingHandlers,
		CgroupsPerQOS:                s.CgroupsPerQOS,
		EnableServer:                 s.EnableServer,
		EventBurst:                   int(s.EventBurst),
		EventRecordQPS:               float32(s.EventRecordQPS),
		FileCheckFrequency:           s.FileCheckFrequency.Duration,
		HostnameOverride:             s.HostnameOverride,
		HostNetworkSources:           hostNetworkSources,
		HostPIDSources:               hostPIDSources,
		HostIPCSources:               hostIPCSources,
		HTTPCheckFrequency:           s.HTTPCheckFrequency.Duration,
		ImageGCPolicy:                imageGCPolicy,
		KubeClient:                   nil,
		ManifestURL:                  s.ManifestURL,
		ManifestURLHeader:            manifestURLHeader,
		MasterServiceNamespace:       s.MasterServiceNamespace,
		MaxContainerCount:            int(s.MaxContainerCount),
		MaxOpenFiles:                 uint64(s.MaxOpenFiles),
		MaxPerPodContainerCount:      int(s.MaxPerPodContainerCount),
		MaxPods:                      int(s.MaxPods),
		NvidiaGPUs:                   int(s.NvidiaGPUs),
		MinimumGCAge:                 s.MinimumGCAge.Duration,
		Mounter:                      mounter,
		NetworkPluginName:            s.NetworkPluginName,
		NetworkPlugins:               ProbeNetworkPlugins(s.NetworkPluginDir),
		NodeLabels:                   s.NodeLabels,
		NodeStatusUpdateFrequency:    s.NodeStatusUpdateFrequency.Duration,
		NonMasqueradeCIDR:            s.NonMasqueradeCIDR,
		OOMAdjuster:                  oom.NewOOMAdjuster(),
		OSInterface:                  kubecontainer.RealOS{},
		PodCIDR:                      s.PodCIDR,
		ReconcileCIDR:                s.ReconcileCIDR,
		PodInfraContainerImage:       s.PodInfraContainerImage,
		Port:                           uint(s.Port),
		ReadOnlyPort:                   uint(s.ReadOnlyPort),
		RegisterNode:                   s.RegisterNode,
		RegisterSchedulable:            s.RegisterSchedulable,
		RegistryBurst:                  int(s.RegistryBurst),
		RegistryPullQPS:                float64(s.RegistryPullQPS),
		ResolverConfig:                 s.ResolverConfig,
		Reservation:                    *reservation,
		KubeletCgroups:                 s.KubeletCgroups,
		RktPath:                        s.RktPath,
		RktAPIEndpoint:                 s.RktAPIEndpoint,
		RktStage1Image:                 s.RktStage1Image,
		RootDirectory:                  s.RootDirectory,
		SeccompProfileRoot:             s.SeccompProfileRoot,
		Runonce:                        s.RunOnce,
		SerializeImagePulls:            s.SerializeImagePulls,
		StandaloneMode:                 (len(s.APIServerList) == 0),
		StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout.Duration,
		SyncFrequency:                  s.SyncFrequency.Duration,
		SystemCgroups:                  s.SystemCgroups,
		TLSOptions:                     tlsOptions,
		Writer:                         writer,
		VolumePlugins:                  ProbeVolumePlugins(s.VolumePluginDir),
		OutOfDiskTransitionFrequency:   s.OutOfDiskTransitionFrequency.Duration,
		HairpinMode:                    s.HairpinMode,
		BabysitDaemons:                 s.BabysitDaemons,
		ExperimentalFlannelOverlay:     s.ExperimentalFlannelOverlay,
		NodeIP:         net.ParseIP(s.NodeIP),
		EvictionConfig: evictionConfig,
		PodsPerCore:    int(s.PodsPerCore),
	}, nil
}
示例#9
0
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *ProxyServerConfig) (*ProxyServer, error) {
	protocol := utiliptables.ProtocolIpv4
	if config.BindAddress.To4() == nil {
		protocol = utiliptables.ProtocolIpv6
	}

	// Create a iptables utils.
	execer := exec.New()
	dbus := utildbus.New()
	iptInterface := utiliptables.New(execer, dbus, protocol)

	// We ommit creation of pretty much everything if we run in cleanup mode
	if config.CleanupAndExit {
		return &ProxyServer{
			Config:       config,
			IptInterface: iptInterface,
		}, nil
	}

	// TODO(vmarmol): Use container config for this.
	var oomAdjuster *oom.OOMAdjuster
	if config.OOMScoreAdj != 0 {
		oomAdjuster = oom.NewOOMAdjuster()
		if err := oomAdjuster.ApplyOOMScoreAdj(0, config.OOMScoreAdj); err != nil {
			glog.V(2).Info(err)
		}
	}

	if config.ResourceContainer != "" {
		// Run in its own container.
		if err := util.RunInResourceContainer(config.ResourceContainer); err != nil {
			glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
		} else {
			glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
		}
	}

	// Create a Kube Client
	// define api config source
	if config.Kubeconfig == "" && config.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}
	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
	if err != nil {
		return nil, err
	}

	// Override kubeconfig qps/burst settings from flags
	kubeconfig.QPS = config.KubeApiQps
	kubeconfig.Burst = config.KubeApiBurst

	client, err := kubeclient.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	// Create event recorder
	hostname := nodeutil.GetHostname(config.HostnameOverride)
	eventBroadcaster := record.NewBroadcaster()
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})
	eventBroadcaster.StartRecordingToSink(client.Events(""))

	var proxier proxy.ProxyProvider
	var endpointsHandler proxyconfig.EndpointsConfigHandler

	useIptablesProxy := false
	if mayTryIptablesProxy(config.ProxyMode, client.Nodes(), hostname) {
		var err error
		// guaranteed false on error, error only necessary for debugging
		useIptablesProxy, err = iptables.ShouldUseIptablesProxier()
		if err != nil {
			glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
		}
	}

	if useIptablesProxy {
		glog.V(2).Info("Using iptables Proxier.")
		proxierIptables, err := iptables.NewProxier(iptInterface, execer, config.IptablesSyncPeriod, config.MasqueradeAll)
		if err != nil {
			glog.Fatalf("Unable to create proxier: %v", err)
		}
		proxier = proxierIptables
		endpointsHandler = proxierIptables
		// No turning back. Remove artifacts that might still exist from the userspace Proxier.
		glog.V(2).Info("Tearing down userspace rules. Errors here are acceptable.")
		userspace.CleanupLeftovers(iptInterface)
	} else {
		glog.V(2).Info("Using userspace Proxier.")
		// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
		// our config.EndpointsConfigHandler.
		loadBalancer := userspace.NewLoadBalancerRR()
		// set EndpointsConfigHandler to our loadBalancer
		endpointsHandler = loadBalancer

		proxierUserspace, err := userspace.NewProxier(loadBalancer, config.BindAddress, iptInterface, config.PortRange, config.IptablesSyncPeriod)
		if err != nil {
			glog.Fatalf("Unable to create proxier: %v", err)
		}
		proxier = proxierUserspace
		// Remove artifacts from the pure-iptables Proxier.
		glog.V(2).Info("Tearing down pure-iptables proxy rules. Errors here are acceptable.")
		iptables.CleanupLeftovers(iptInterface)
	}
	iptInterface.AddReloadFunc(proxier.Sync)

	// Create configs (i.e. Watches for Services and Endpoints)
	// Note: RegisterHandler() calls need to happen before creation of Sources because sources
	// only notify on changes, and the initial update (on process start) may be lost if no handlers
	// are registered yet.
	serviceConfig := proxyconfig.NewServiceConfig()
	serviceConfig.RegisterHandler(proxier)

	endpointsConfig := proxyconfig.NewEndpointsConfig()
	endpointsConfig.RegisterHandler(endpointsHandler)

	proxyconfig.NewSourceAPI(
		client,
		config.ConfigSyncPeriod,
		serviceConfig.Channel("api"),
		endpointsConfig.Channel("api"),
	)

	config.nodeRef = &api.ObjectReference{
		Kind:      "Node",
		Name:      hostname,
		UID:       types.UID(hostname),
		Namespace: "",
	}
	return NewProxyServer(config, iptInterface, proxier, recorder)
}
示例#10
0
文件: server.go 项目: F21/kubernetes
// Run runs the specified KubeletServer for the given KubeletConfig.  This should never exit.
// The kcfg argument may be nil - if so, it is initialized from the settings on KubeletServer.
// Otherwise, the caller is assumed to have set up the KubeletConfig object and all defaults
// will be ignored.
func (s *KubeletServer) Run(kcfg *KubeletConfig) error {
	if kcfg == nil {
		cfg, err := s.KubeletConfig()
		if err != nil {
			return err
		}
		kcfg = cfg

		clientConfig, err := s.CreateAPIServerClientConfig()
		if err == nil {
			kcfg.KubeClient, err = client.New(clientConfig)
		}
		if err != nil && len(s.APIServerList) > 0 {
			glog.Warningf("No API client: %v", err)
		}

		cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
		if err != nil {
			return err
		}
		glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
		kcfg.Cloud = cloud
	}

	if kcfg.CAdvisorInterface == nil {
		ca, err := cadvisor.New(s.CAdvisorPort)
		if err != nil {
			return err
		}
		kcfg.CAdvisorInterface = ca
	}

	util.ReallyCrash = s.ReallyCrashForTesting
	rand.Seed(time.Now().UTC().UnixNano())

	credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)

	glog.V(2).Infof("Using root directory: %v", s.RootDirectory)

	// TODO(vmarmol): Do this through container config.
	oomAdjuster := oom.NewOOMAdjuster()
	if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil {
		glog.Warning(err)
	}

	if err := RunKubelet(kcfg, nil); err != nil {
		return err
	}

	if s.HealthzPort > 0 {
		healthz.DefaultHealthz()
		go util.Until(func() {
			err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil)
			if err != nil {
				glog.Errorf("Starting health server failed: %v", err)
			}
		}, 5*time.Second, util.NeverStop)
	}

	if s.RunOnce {
		return nil
	}

	// run forever
	select {}
}