Exemplo n.º 1
0
// NewKubeletCommand provides a CLI handler for the 'kubelet' command
func NewKubeletCommand(name, fullName string, out io.Writer) *cobra.Command {
	kubeletOptions := kubeletoptions.NewKubeletServer()

	cmd := &cobra.Command{
		Use:   name,
		Short: "Launch the Kubelet (kubelet)",
		Long:  kubeletLog,
		Run: func(c *cobra.Command, args []string) {
			startProfiler()

			util.InitLogs()
			defer util.FlushLogs()

			if err := kubeletapp.Run(kubeletOptions, nil); err != nil {
				fmt.Fprintf(os.Stderr, "%v\n", err)
				os.Exit(1)
			}
		},
	}
	cmd.SetOutput(out)

	flags := cmd.Flags()
	flags.SetNormalizeFunc(util.WordSepNormalizeFunc)
	flags.AddGoFlagSet(flag.CommandLine)
	kubeletOptions.AddFlags(flags)

	return cmd
}
Exemplo n.º 2
0
// NewKubeletCommand creates a *cobra.Command object with default parameters
func NewKubeletCommand() *cobra.Command {
	s := options.NewKubeletServer()
	s.AddFlags(pflag.CommandLine)
	cmd := &cobra.Command{
		Use: "kubelet",
		Long: `The kubelet is the primary "node agent" that runs on each
node. The kubelet works in terms of a PodSpec. A PodSpec is a YAML or JSON object
that describes a pod. The kubelet takes a set of PodSpecs that are provided through
various mechanisms (primarily through the apiserver) and ensures that the containers
described in those PodSpecs are running and healthy.

Other than from an PodSpec from the apiserver, there are three ways that a container
manifest can be provided to the Kubelet.

File: Path passed as a flag on the command line. This file is rechecked every 20
seconds (configurable with a flag).

HTTP endpoint: HTTP endpoint passed as a parameter on the command line. This endpoint
is checked every 20 seconds (also configurable with a flag).

HTTP server: The kubelet can also listen for HTTP and respond to a simple API
(underspec'd currently) to submit a new manifest.`,
		Run: func(cmd *cobra.Command, args []string) {
		},
	}

	return cmd
}
Exemplo n.º 3
0
func StartKubeletServer(lk LocalkubeServer) func() error {
	config := options.NewKubeletServer()

	// Master details
	config.APIServerList = []string{lk.GetAPIServerInsecureURL()}

	// Set containerized based on the flag
	config.Containerized = lk.Containerized

	config.AllowPrivileged = true
	config.Config = "/etc/kubernetes/manifests"

	// Networking
	config.ClusterDomain = lk.DNSDomain
	config.ClusterDNS = lk.DNSIP.String()

	config.NodeIP = lk.NodeIP.String()

	// Use the host's resolver config
	if lk.Containerized {
		config.ResolverConfig = "/rootfs/etc/resolv.conf"
	} else {
		config.ResolverConfig = "/etc/resolv.conf"
	}

	return func() error {
		return kubelet.Run(config, nil)
	}
}
Exemplo n.º 4
0
func NewStatusUpdater(client *clientset.Clientset, relistPeriod time.Duration, nowFunc func() time.Time) *StatusUpdater {
	kubecfg := options.NewKubeletServer() // only create to get the config, this is without side-effects
	return &StatusUpdater{
		client:          client,
		relistPeriod:    relistPeriod,
		heartBeatPeriod: kubecfg.NodeStatusUpdateFrequency.Duration,
		nowFunc:         nowFunc,
	}
}
Exemplo n.º 5
0
func TestBuildCloudProviderNone(t *testing.T) {
	server := kubeletoptions.NewKubeletServer()
	server.CloudProvider = ""
	cloud, err := buildCloudProvider(server)
	if err != nil {
		t.Errorf("buildCloudProvider failed: %v", err)
	}
	if cloud != nil {
		t.Errorf("buildCloudProvider returned cloud provider %q where nil was expected", cloud.ProviderName())
	}
}
Exemplo n.º 6
0
func TestBuildCloudProviderError(t *testing.T) {
	server := kubeletoptions.NewKubeletServer()
	server.CloudProvider = "unknown-provider-name"
	cloud, err := buildCloudProvider(server)
	if err == nil {
		t.Errorf("buildCloudProvider returned no error when one was expected")
	}
	if cloud != nil {
		t.Errorf("buildCloudProvider returned cloud provider %q where nil was expected", cloud.ProviderName())
	}
}
Exemplo n.º 7
0
func NewKubeletExecutorServer() *KubeletExecutorServer {
	k := &KubeletExecutorServer{
		KubeletServer:     options.NewKubeletServer(),
		SuicideTimeout:    config.DefaultSuicideTimeout,
		LaunchGracePeriod: config.DefaultLaunchGracePeriod,
	}
	if pwd, err := os.Getwd(); err != nil {
		log.Warningf("failed to determine current directory: %v", err)
	} else {
		k.RootDirectory = pwd // mesos sandbox dir
	}
	k.Address = defaultBindingAddress()

	return k
}
Exemplo n.º 8
0
func main() {
	s := options.NewKubeletServer()
	s.AddFlags(pflag.CommandLine)

	flag.InitFlags()
	logs.InitLogs()
	defer logs.FlushLogs()

	verflag.PrintAndExitIfRequested()

	if err := app.Run(s, nil); err != nil {
		fmt.Fprintf(os.Stderr, "error: %v\n", err)
		os.Exit(1)
	}
}
Exemplo n.º 9
0
func Test_nodeWithUpdatedStatus(t *testing.T) {
	now := time.Now()
	testNode := func(d time.Duration, s api.ConditionStatus, r string) *api.Node {
		return &api.Node{
			Status: api.NodeStatus{
				Conditions: []api.NodeCondition{{
					Type: api.NodeOutOfDisk,
				}, {
					Type:               api.NodeReady,
					Status:             s,
					Reason:             r,
					Message:            "some message we don't care about here",
					LastTransitionTime: unversioned.Time{now.Add(-time.Minute)},
					LastHeartbeatTime:  unversioned.Time{now.Add(d)},
				}},
			},
		}
	}

	cm := cmoptions.NewCMServer()
	kubecfg := kubeletoptions.NewKubeletServer()
	assert.True(t, kubecfg.NodeStatusUpdateFrequency.Duration*3 < cm.NodeControllerOptions.NodeMonitorGracePeriod) // sanity check for defaults

	n := testNode(0, api.ConditionTrue, "KubeletReady")
	su := NewStatusUpdater(nil, cm.NodeControllerOptions.NodeMonitorPeriod, func() time.Time { return now })
	_, updated, err := su.nodeWithUpdatedStatus(n)
	assert.NoError(t, err)
	assert.False(t, updated, "no update expected b/c kubelet updated heartbeat just now")

	n = testNode(-cm.NodeControllerOptions.NodeMonitorGracePeriod, api.ConditionTrue, "KubeletReady")
	n2, updated, err := su.nodeWithUpdatedStatus(n)
	assert.NoError(t, err)
	assert.True(t, updated, "update expected b/c kubelet's update is older than DefaultNodeMonitorGracePeriod")
	assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Reason, slaveReadyReason)
	assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Message, slaveReadyMessage)

	n = testNode(-kubecfg.NodeStatusUpdateFrequency.Duration, api.ConditionTrue, "KubeletReady")
	n2, updated, err = su.nodeWithUpdatedStatus(n)
	assert.NoError(t, err)
	assert.False(t, updated, "no update expected b/c kubelet's update was missed only once")

	n = testNode(-kubecfg.NodeStatusUpdateFrequency.Duration*3, api.ConditionTrue, "KubeletReady")
	n2, updated, err = su.nodeWithUpdatedStatus(n)
	assert.NoError(t, err)
	assert.True(t, updated, "update expected b/c kubelet's update is older than 3*DefaultNodeStatusUpdateFrequency")
	assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Reason, slaveReadyReason)
	assert.Equal(t, getCondition(&n2.Status, api.NodeReady).Message, slaveReadyMessage)
}
Exemplo n.º 10
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	s := options.NewKubeletServer()
	s.AddFlags(pflag.CommandLine)

	flag.InitFlags()
	util.InitLogs()
	defer util.FlushLogs()

	verflag.PrintAndExitIfRequested()

	if err := app.Run(s, nil); err != nil {
		fmt.Fprintf(os.Stderr, "%v\n", err)
		os.Exit(1)
	}
}
Exemplo n.º 11
0
// NewKubelet creates a new hyperkube Server object that includes the
// description and flags.
func NewKubelet() *Server {
	s := options.NewKubeletServer()
	hks := Server{
		SimpleUsage: "kubelet",
		Long: `The kubelet binary is responsible for maintaining a set of containers on a
		particular node. It syncs data from a variety of sources including a
		Kubernetes API server, an etcd cluster, HTTP endpoint or local file. It then
		queries Docker to see what is currently running.  It synchronizes the
		configuration data, with the running set of containers by starting or stopping
		Docker containers.`,
		Run: func(_ *Server, _ []string) error {
			return app.Run(s, nil)
		},
	}
	s.AddFlags(hks.Flags())
	return &hks
}
Exemplo n.º 12
0
func TestBuildCloudProviderFake(t *testing.T) {
	providerName := "fake"
	cloudprovider.RegisterCloudProvider(providerName, func(config io.Reader) (cloudprovider.Interface, error) {
		return &fake.FakeCloud{}, nil
	})

	server := kubeletoptions.NewKubeletServer()
	server.CloudProvider = providerName

	cloud, err := buildCloudProvider(server)
	if err != nil {
		t.Errorf("buildCloudProvider failed: %v", err)
	}
	if cloud == nil {
		t.Errorf("buildCloudProvider returned nil cloud provider")
	} else {
		if cloud.ProviderName() != providerName {
			t.Errorf("Invalid cloud provider returned, expected %q, got %q", providerName, cloud.ProviderName())
		}
	}
}
Exemplo n.º 13
0
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) {
	originClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	// Make a separate client for event reporting, to avoid event QPS blocking node calls
	eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}

	if options.NodeName == "localhost" {
		glog.Warningf(`Using "localhost" as node name will not resolve from all locations`)
	}

	clientCAs, err := kcrypto.CertPoolFromFile(options.ServingInfo.ClientCA)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	var path string
	var fileCheckInterval int64
	if options.PodManifestConfig != nil {
		path = options.PodManifestConfig.Path
		fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds
	}

	var dockerExecHandler dockertools.ExecHandler

	switch options.DockerConfig.ExecHandlerName {
	case configapi.DockerExecHandlerNative:
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case configapi.DockerExecHandlerNsenter:
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	}

	kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node address: %v", err)
	}
	kubePort, err := strconv.Atoi(kubePortStr)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node port: %v", err)
	}

	// Defaults are tested in TestKubeletDefaults
	server := kubeletoptions.NewKubeletServer()
	// Adjust defaults
	server.Config = path
	server.RootDirectory = options.VolumeDirectory
	server.NodeIP = options.NodeIP
	server.HostnameOverride = options.NodeName
	server.AllowPrivileged = true
	server.RegisterNode = true
	server.Address = kubeAddressStr
	server.Port = uint(kubePort)
	server.ReadOnlyPort = 0        // no read only access
	server.CAdvisorPort = 0        // no unsecured cadvisor access
	server.HealthzPort = 0         // no unsecured healthz access
	server.HealthzBindAddress = "" // no unsecured healthz access
	server.ClusterDNS = options.DNSIP
	server.ClusterDomain = options.DNSDomain
	server.NetworkPluginName = options.NetworkConfig.NetworkPluginName
	server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access
	server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second}
	server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod")
	server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default
	server.MaxPods = 110
	server.SerializeImagePulls = false // disable serial image pulls by default

	switch server.NetworkPluginName {
	case ovs.SingleTenantPluginName, ovs.MultiTenantPluginName:
		// set defaults for openshift-sdn
		server.HairpinMode = componentconfig.HairpinNone
		server.ConfigureCBR0 = false
	}

	// prevents kube from generating certs
	server.TLSCertFile = options.ServingInfo.ServerCert.CertFile
	server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile

	containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true"
	server.Containerized = containerized

	// resolve extended arguments
	// TODO: this should be done in config validation (along with the above) so we can provide
	// proper errors
	if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 {
		return nil, kerrors.NewAggregate(err)
	}

	proxyconfig, err := buildKubeProxyConfig(options)
	if err != nil {
		return nil, err
	}

	cfg, err := kubeletapp.UnsecuredKubeletConfig(server)
	if err != nil {
		return nil, err
	}

	// provide any config overrides
	cfg.NodeName = options.NodeName
	cfg.KubeClient = clientadapter.FromUnversionedClient(kubeClient)
	cfg.EventClient = clientadapter.FromUnversionedClient(eventClient)
	cfg.DockerExecHandler = dockerExecHandler

	// Setup auth
	authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL)
	if err != nil {
		return nil, err
	}
	authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize)
	if err != nil {
		return nil, err
	}

	authzAttr, err := newAuthorizerAttributesGetter(options.NodeName)
	if err != nil {
		return nil, err
	}

	authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL)
	if err != nil {
		return nil, err
	}
	authz, err := newAuthorizer(originClient, authzTTL, options.AuthConfig.AuthorizationCacheSize)
	if err != nil {
		return nil, err
	}

	cfg.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz)

	// Make sure the node doesn't think it is in standalone mode
	// This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc
	cfg.StandaloneMode = false

	// TODO: could be cleaner
	if configapi.UseTLS(options.ServingInfo) {
		extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates)
		if err != nil {
			return nil, err
		}
		cfg.TLSOptions = &kubeletserver.TLSOptions{
			Config: crypto.SecureTLSConfig(&tls.Config{
				// RequestClientCert lets us request certs, but allow requests without client certs
				// Verification is done by the authn layer
				ClientAuth: tls.RequestClientCert,
				ClientCAs:  clientCAs,
				// Set SNI certificate func
				// Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list,
				// which we do not control when running with http.Server#ListenAndServeTLS
				GetCertificate: cmdutil.GetCertificateFunc(extraCerts),
			}),
			CertFile: options.ServingInfo.ServerCert.CertFile,
			KeyFile:  options.ServingInfo.ServerCert.KeyFile,
		}
	} else {
		cfg.TLSOptions = nil
	}

	// Prepare cloud provider
	cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile)
	if err != nil {
		return nil, err
	}
	if cloud != nil {
		glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile)
	}
	cfg.Cloud = cloud

	sdnPlugin, err := factory.NewNodePlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP)
	if err != nil {
		return nil, fmt.Errorf("SDN initialization failed: %v", err)
	}
	if sdnPlugin != nil {
		cfg.NetworkPlugins = append(cfg.NetworkPlugins, sdnPlugin)
	}

	endpointFilter, err := factory.NewProxyPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient)
	if err != nil {
		return nil, fmt.Errorf("SDN proxy initialization failed: %v", err)
	}

	config := &NodeConfig{
		BindAddress: options.ServingInfo.BindAddress,

		AllowDisabledDocker: options.AllowDisabledDocker,
		Containerized:       containerized,

		Client: kubeClient,

		VolumeDir: options.VolumeDirectory,

		KubeletServer: server,
		KubeletConfig: cfg,

		ProxyConfig: proxyconfig,

		MTU: options.NetworkConfig.MTU,

		SDNPlugin:                 sdnPlugin,
		FilteringEndpointsHandler: endpointFilter,
	}

	return config, nil
}
Exemplo n.º 14
0
func ValidateKubeletExtendedArguments(config api.ExtendedArguments, fldPath *field.Path) field.ErrorList {
	return ValidateExtendedArguments(config, kubeletoptions.NewKubeletServer().AddFlags, fldPath)
}
Exemplo n.º 15
0
func TestKubeletDefaults(t *testing.T) {
	defaults := kubeletoptions.NewKubeletServer()

	// This is a snapshot of the default config
	// If the default changes (new fields are added, or default values change), we want to know
	// Once we've reacted to the changes appropriately in BuildKubernetesNodeConfig(), update this expected default to match the new upstream defaults
	expectedDefaults := &kubeletoptions.KubeletServer{
		AuthPath:   util.NewStringFlag(""),
		KubeConfig: util.NewStringFlag("/var/lib/kubelet/kubeconfig"),

		KubeletConfiguration: componentconfig.KubeletConfiguration{
			Address:                     "0.0.0.0", // overridden
			AllowPrivileged:             false,     // overridden
			CAdvisorPort:                4194,      // disabled
			VolumeStatsAggPeriod:        unversioned.Duration{Duration: time.Minute},
			CertDirectory:               "/var/run/kubernetes",
			CgroupRoot:                  "",
			ClusterDNS:                  "", // overridden
			ClusterDomain:               "", // overridden
			ConfigureCBR0:               false,
			ContainerRuntime:            "docker",
			Containerized:               false, // overridden based on OPENSHIFT_CONTAINERIZED
			CPUCFSQuota:                 true,  // forced to true
			DockerExecHandlerName:       "native",
			DockerEndpoint:              "unix:///var/run/docker.sock",
			EventBurst:                  10,
			EventRecordQPS:              5.0,
			EnableCustomMetrics:         false,
			EnableDebuggingHandlers:     true,
			EnableServer:                true,
			EvictionHard:                "memory.available<100Mi",
			FileCheckFrequency:          unversioned.Duration{Duration: 20 * time.Second}, // overridden
			HealthzBindAddress:          "127.0.0.1",                                      // disabled
			HealthzPort:                 10248,                                            // disabled
			HostNetworkSources:          []string{"*"},                                    // overridden
			HostPIDSources:              []string{"*"},                                    // overridden
			HostIPCSources:              []string{"*"},                                    // overridden
			HTTPCheckFrequency:          unversioned.Duration{Duration: 20 * time.Second}, // disabled
			ImageMinimumGCAge:           unversioned.Duration{Duration: 120 * time.Second},
			ImageGCHighThresholdPercent: 90,
			ImageGCLowThresholdPercent:  80,
			IPTablesMasqueradeBit:       14,
			IPTablesDropBit:             15,
			LowDiskSpaceThresholdMB:     256,
			MakeIPTablesUtilChains:      true,
			MasterServiceNamespace:      "default",
			MaxContainerCount:           -1,
			MaxPerPodContainerCount:     1,
			MaxOpenFiles:                1000000,
			MaxPods:                     110, // overridden
			MinimumGCAge:                unversioned.Duration{},
			NetworkPluginDir:            "",
			NetworkPluginName:           "", // overridden
			NonMasqueradeCIDR:           "10.0.0.0/8",
			VolumePluginDir:             "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
			NodeStatusUpdateFrequency:   unversioned.Duration{Duration: 10 * time.Second},
			NodeLabels:                  nil,
			OOMScoreAdj:                 -999,
			LockFilePath:                "",
			PodInfraContainerImage:      "gcr.io/google_containers/pause-amd64:3.0", // overridden
			Port:                           10250, // overridden
			ReadOnlyPort:                   10255, // disabled
			RegisterNode:                   true,
			RegisterSchedulable:            true,
			RegistryBurst:                  10,
			RegistryPullQPS:                5.0,
			ResolverConfig:                 kubetypes.ResolvConfDefault,
			KubeletCgroups:                 "",
			RktAPIEndpoint:                 rkt.DefaultRktAPIServiceEndpoint,
			RktPath:                        "",
			RktStage1Image:                 "",
			RootDirectory:                  "/var/lib/kubelet", // overridden
			RuntimeCgroups:                 "",
			SerializeImagePulls:            true,
			StreamingConnectionIdleTimeout: unversioned.Duration{Duration: 4 * time.Hour},
			SyncFrequency:                  unversioned.Duration{Duration: 1 * time.Minute},
			SystemCgroups:                  "",
			TLSCertFile:                    "", // overridden to prevent cert generation
			TLSPrivateKeyFile:              "", // overridden to prevent cert generation
			ReconcileCIDR:                  true,
			KubeAPIQPS:                     5.0,
			KubeAPIBurst:                   10,
			ExperimentalFlannelOverlay:     false,
			OutOfDiskTransitionFrequency:   unversioned.Duration{Duration: 5 * time.Minute},
			HairpinMode:                    "promiscuous-bridge",
			BabysitDaemons:                 false,
			SeccompProfileRoot:             "",
			CloudProvider:                  "auto-detect",
			RuntimeRequestTimeout:          unversioned.Duration{Duration: 2 * time.Minute},
			ContentType:                    "application/vnd.kubernetes.protobuf",
			EnableControllerAttachDetach:   true,

			EvictionPressureTransitionPeriod: unversioned.Duration{Duration: 5 * time.Minute},

			SystemReserved: utilconfig.ConfigurationMap{},
			KubeReserved:   utilconfig.ConfigurationMap{},
		},
	}

	if !reflect.DeepEqual(defaults, expectedDefaults) {
		t.Logf("expected defaults, actual defaults: \n%s", diff.ObjectReflectDiff(expectedDefaults, defaults))
		t.Errorf("Got different defaults than expected, adjust in BuildKubernetesNodeConfig and update expectedDefaults")
	}
}
Exemplo n.º 16
0
func BuildKubernetesNodeConfig(options configapi.NodeConfig, enableProxy, enableDNS bool) (*NodeConfig, error) {
	originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides)
	if err != nil {
		return nil, err
	}
	_, kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides)
	if err != nil {
		return nil, err
	}
	// Make a separate client for event reporting, to avoid event QPS blocking node calls
	_, eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides)
	if err != nil {
		return nil, err
	}

	if options.NodeName == "localhost" {
		glog.Warningf(`Using "localhost" as node name will not resolve from all locations`)
	}

	clientCAs, err := kcrypto.CertPoolFromFile(options.ServingInfo.ClientCA)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	var path string
	var fileCheckInterval int64
	if options.PodManifestConfig != nil {
		path = options.PodManifestConfig.Path
		fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds
	}

	kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node address: %v", err)
	}
	kubePort, err := strconv.Atoi(kubePortStr)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node port: %v", err)
	}

	if err = validateNetworkPluginName(originClient, options.NetworkConfig.NetworkPluginName); err != nil {
		return nil, err
	}

	// Defaults are tested in TestKubeletDefaults
	server := kubeletoptions.NewKubeletServer()
	// Adjust defaults
	server.RequireKubeConfig = true
	server.PodManifestPath = path
	server.RootDirectory = options.VolumeDirectory
	server.NodeIP = options.NodeIP
	server.HostnameOverride = options.NodeName
	server.AllowPrivileged = true
	server.RegisterNode = true
	server.Address = kubeAddressStr
	server.Port = int32(kubePort)
	server.ReadOnlyPort = 0        // no read only access
	server.CAdvisorPort = 0        // no unsecured cadvisor access
	server.HealthzPort = 0         // no unsecured healthz access
	server.HealthzBindAddress = "" // no unsecured healthz access
	server.ClusterDNS = options.DNSIP
	server.ClusterDomain = options.DNSDomain
	server.NetworkPluginName = options.NetworkConfig.NetworkPluginName
	server.HostNetworkSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}
	server.HostPIDSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}
	server.HostIPCSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}
	server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access
	server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second}
	server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod")
	server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default
	server.MaxPods = 250
	server.PodsPerCore = 10
	server.SerializeImagePulls = false          // disable serialized image pulls by default
	server.EnableControllerAttachDetach = false // stay consistent with existing config, but admins should enable it
	if enableDNS {
		// if we are running local DNS, skydns will load the default recursive nameservers for us
		server.ResolverConfig = ""
	}
	server.DockerExecHandlerName = string(options.DockerConfig.ExecHandlerName)

	if sdnapi.IsOpenShiftNetworkPlugin(server.NetworkPluginName) {
		// set defaults for openshift-sdn
		server.HairpinMode = componentconfig.HairpinNone
		server.ConfigureCBR0 = false
	}

	// prevents kube from generating certs
	server.TLSCertFile = options.ServingInfo.ServerCert.CertFile
	server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile

	containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true"
	server.Containerized = containerized

	// resolve extended arguments
	// TODO: this should be done in config validation (along with the above) so we can provide
	// proper errors
	if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 {
		return nil, kerrors.NewAggregate(err)
	}

	proxyconfig, err := buildKubeProxyConfig(options)
	if err != nil {
		return nil, err
	}

	// Initialize SDN before building kubelet config so it can modify options
	iptablesSyncPeriod, err := time.ParseDuration(options.IPTablesSyncPeriod)
	if err != nil {
		return nil, fmt.Errorf("Cannot parse the provided ip-tables sync period (%s) : %v", options.IPTablesSyncPeriod, err)
	}
	sdnPlugin, err := sdnplugin.NewNodePlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP, iptablesSyncPeriod, options.NetworkConfig.MTU)
	if err != nil {
		return nil, fmt.Errorf("SDN initialization failed: %v", err)
	}
	if sdnPlugin != nil {
		// SDN plugin pod setup/teardown is implemented as a CNI plugin
		server.NetworkPluginName = kubeletcni.CNIPluginName
		server.NetworkPluginDir = kubeletcni.DefaultNetDir
		server.HairpinMode = componentconfig.HairpinNone
		server.ConfigureCBR0 = false
	}

	deps, err := kubeletapp.UnsecuredKubeletDeps(server)
	if err != nil {
		return nil, err
	}

	// Initialize cloud provider
	cloud, err := buildCloudProvider(server)
	if err != nil {
		return nil, err
	}
	deps.Cloud = cloud

	// Replace the kubelet-created CNI plugin with the SDN plugin
	// Kubelet must be initialized with NetworkPluginName="cni" but
	// the SDN plugin (if available) needs to be the only one used
	if sdnPlugin != nil {
		deps.NetworkPlugins = []kubeletnetwork.NetworkPlugin{sdnPlugin}
	}

	// provide any config overrides
	//deps.NodeName = options.NodeName
	deps.KubeClient = kubeClient
	deps.EventClient = eventClient

	// Setup auth
	authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL)
	if err != nil {
		return nil, err
	}
	authn, err := newAuthenticator(kubeClient.Authentication(), clientCAs, authnTTL, options.AuthConfig.AuthenticationCacheSize)
	if err != nil {
		return nil, err
	}

	authzAttr, err := newAuthorizerAttributesGetter(options.NodeName)
	if err != nil {
		return nil, err
	}

	authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL)
	if err != nil {
		return nil, err
	}
	authz, err := newAuthorizer(originClient, authzTTL, options.AuthConfig.AuthorizationCacheSize)
	if err != nil {
		return nil, err
	}

	deps.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz)

	// TODO: could be cleaner
	if configapi.UseTLS(options.ServingInfo) {
		extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates)
		if err != nil {
			return nil, err
		}
		deps.TLSOptions = &kubeletserver.TLSOptions{
			Config: crypto.SecureTLSConfig(&tls.Config{
				// RequestClientCert lets us request certs, but allow requests without client certs
				// Verification is done by the authn layer
				ClientAuth: tls.RequestClientCert,
				ClientCAs:  clientCAs,
				// Set SNI certificate func
				// Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list,
				// which we do not control when running with http.Server#ListenAndServeTLS
				GetCertificate: cmdutil.GetCertificateFunc(extraCerts),
			}),
			CertFile: options.ServingInfo.ServerCert.CertFile,
			KeyFile:  options.ServingInfo.ServerCert.KeyFile,
		}
	} else {
		deps.TLSOptions = nil
	}

	sdnProxy, err := sdnplugin.NewProxyPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient)
	if err != nil {
		return nil, fmt.Errorf("SDN proxy initialization failed: %v", err)
	}

	config := &NodeConfig{
		BindAddress: options.ServingInfo.BindAddress,

		AllowDisabledDocker: options.AllowDisabledDocker,
		Containerized:       containerized,

		Client: kubeClient,

		VolumeDir: options.VolumeDirectory,

		KubeletServer: server,
		KubeletDeps:   deps,

		ServicesReady: make(chan struct{}),

		ProxyConfig:    proxyconfig,
		EnableUnidling: options.EnableUnidling,

		SDNPlugin: sdnPlugin,
		SDNProxy:  sdnProxy,
	}

	if enableDNS {
		dnsConfig, err := dns.NewServerDefaults()
		if err != nil {
			return nil, fmt.Errorf("DNS configuration was not possible: %v", err)
		}
		if len(options.DNSIP) > 0 {
			dnsConfig.DnsAddr = options.DNSIP + ":53"
		}
		dnsConfig.Domain = server.ClusterDomain + "."
		dnsConfig.Local = "openshift.default.svc." + dnsConfig.Domain

		services, serviceStore := dns.NewCachedServiceAccessorAndStore()
		endpoints, endpointsStore := dns.NewCachedEndpointsAccessorAndStore()
		if !enableProxy {
			endpoints = deps.KubeClient
			endpointsStore = nil
		}

		// TODO: use kubeletConfig.ResolverConfig as an argument to etcd in the event the
		//   user sets it, instead of passing it to the kubelet.

		config.ServiceStore = serviceStore
		config.EndpointsStore = endpointsStore
		config.DNSServer = &dns.Server{
			Config:      dnsConfig,
			Services:    services,
			Endpoints:   endpoints,
			MetricsName: "node",
		}
	}

	return config, nil
}
Exemplo n.º 17
0
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) {
	originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	// Make a separate client for event reporting, to avoid event QPS blocking node calls
	eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}

	if options.NodeName == "localhost" {
		glog.Warningf(`Using "localhost" as node name will not resolve from all locations`)
	}

	clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	var path string
	var fileCheckInterval int64
	if options.PodManifestConfig != nil {
		path = options.PodManifestConfig.Path
		fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds
	}

	var dockerExecHandler dockertools.ExecHandler

	switch options.DockerConfig.ExecHandlerName {
	case configapi.DockerExecHandlerNative:
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case configapi.DockerExecHandlerNsenter:
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	}

	kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node address: %v", err)
	}
	kubePort, err := strconv.Atoi(kubePortStr)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node port: %v", err)
	}

	// declare the OpenShift defaults from config
	server := kubeletoptions.NewKubeletServer()
	server.Config = path
	server.RootDirectory = options.VolumeDirectory
	server.NodeIP = options.NodeIP
	server.HostnameOverride = options.NodeName
	server.AllowPrivileged = true
	server.RegisterNode = true
	server.Address = kubeAddressStr
	server.Port = uint(kubePort)
	server.ReadOnlyPort = 0 // no read only access
	server.CAdvisorPort = 0 // no unsecured cadvisor access
	server.HealthzPort = 0  // no unsecured healthz access
	server.ClusterDNS = options.DNSIP
	server.ClusterDomain = options.DNSDomain
	server.NetworkPluginName = options.NetworkConfig.NetworkPluginName
	server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access
	server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second}
	server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod")
	server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default
	server.MaxPods = 110

	// prevents kube from generating certs
	server.TLSCertFile = options.ServingInfo.ServerCert.CertFile
	server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile

	containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true"
	server.Containerized = containerized

	// resolve extended arguments
	// TODO: this should be done in config validation (along with the above) so we can provide
	// proper errors
	if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 {
		return nil, kerrors.NewAggregate(err)
	}

	proxyconfig, err := buildKubeProxyConfig(options)
	if err != nil {
		return nil, err
	}

	cfg, err := kubeletapp.UnsecuredKubeletConfig(server)
	if err != nil {
		return nil, err
	}

	// Replace the standard k8s emptyDir volume plugin with a wrapper version
	// which offers XFS quota functionality, but only if the node config
	// specifies an empty dir quota to apply to projects:
	if options.VolumeConfig.LocalQuota.PerFSGroup != nil {
		glog.V(2).Info("Replacing empty-dir volume plugin with quota wrapper")
		wrappedEmptyDirPlugin := false

		quotaApplicator, err := empty_dir.NewQuotaApplicator(options.VolumeDirectory)
		if err != nil {
			return nil, err
		}

		// Create a volume spec with emptyDir we can use to search for the
		// emptyDir plugin with CanSupport:
		emptyDirSpec := &volume.Spec{
			Volume: &kapi.Volume{
				VolumeSource: kapi.VolumeSource{
					EmptyDir: &kapi.EmptyDirVolumeSource{},
				},
			},
		}

		for idx, plugin := range cfg.VolumePlugins {
			// Can't really do type checking or use a constant here as they are not exported:
			if plugin.CanSupport(emptyDirSpec) {
				wrapper := empty_dir.EmptyDirQuotaPlugin{
					Wrapped:         plugin,
					Quota:           *options.VolumeConfig.LocalQuota.PerFSGroup,
					QuotaApplicator: quotaApplicator,
				}
				cfg.VolumePlugins[idx] = &wrapper
				wrappedEmptyDirPlugin = true
			}
		}
		// Because we can't look for the k8s emptyDir plugin by any means that would
		// survive a refactor, error out if we couldn't find it:
		if !wrappedEmptyDirPlugin {
			return nil, errors.New("unable to wrap emptyDir volume plugin for quota support")
		}
	} else {
		glog.V(2).Info("Skipping replacement of empty-dir volume plugin with quota wrapper, no local fsGroup quota specified")
	}

	// provide any config overrides
	cfg.NodeName = options.NodeName
	cfg.KubeClient = internalclientset.FromUnversionedClient(kubeClient)
	cfg.EventClient = internalclientset.FromUnversionedClient(eventClient)
	cfg.DockerExecHandler = dockerExecHandler

	// docker-in-docker (dind) deployments are used for testing
	// networking plugins.  Running openshift under dind won't work
	// with the real oom adjuster due to the state of the cgroups path
	// in a dind container that uses systemd for init.  Similarly,
	// cgroup manipulation of the nested docker daemon doesn't work
	// properly under centos/rhel and should be disabled by setting
	// the name of the container to an empty string.
	//
	// This workaround should become unnecessary once user namespaces
	if value := cmdutil.Env("OPENSHIFT_DIND", ""); value == "true" {
		glog.Warningf("Using FakeOOMAdjuster for docker-in-docker compatibility")
		cfg.OOMAdjuster = oom.NewFakeOOMAdjuster()
	}

	// Setup auth
	osClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL)
	if err != nil {
		return nil, err
	}
	authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize)
	if err != nil {
		return nil, err
	}

	authzAttr, err := newAuthorizerAttributesGetter(options.NodeName)
	if err != nil {
		return nil, err
	}

	authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL)
	if err != nil {
		return nil, err
	}
	authz, err := newAuthorizer(osClient, authzTTL, options.AuthConfig.AuthorizationCacheSize)
	if err != nil {
		return nil, err
	}

	cfg.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz)

	// Make sure the node doesn't think it is in standalone mode
	// This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc
	cfg.StandaloneMode = false

	// TODO: could be cleaner
	if configapi.UseTLS(options.ServingInfo) {
		extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates)
		if err != nil {
			return nil, err
		}
		cfg.TLSOptions = &kubeletserver.TLSOptions{
			Config: crypto.SecureTLSConfig(&tls.Config{
				// RequestClientCert lets us request certs, but allow requests without client certs
				// Verification is done by the authn layer
				ClientAuth: tls.RequestClientCert,
				ClientCAs:  clientCAs,
				// Set SNI certificate func
				// Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list,
				// which we do not control when running with http.Server#ListenAndServeTLS
				GetCertificate: cmdutil.GetCertificateFunc(extraCerts),
			}),
			CertFile: options.ServingInfo.ServerCert.CertFile,
			KeyFile:  options.ServingInfo.ServerCert.KeyFile,
		}
	} else {
		cfg.TLSOptions = nil
	}

	// Prepare cloud provider
	cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile)
	if err != nil {
		return nil, err
	}
	if cloud != nil {
		glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile)
	}
	cfg.Cloud = cloud

	sdnPlugin, endpointFilter, err := factory.NewPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP)
	if err != nil {
		return nil, fmt.Errorf("SDN initialization failed: %v", err)
	}
	if sdnPlugin != nil {
		cfg.NetworkPlugins = append(cfg.NetworkPlugins, sdnPlugin)
	}

	config := &NodeConfig{
		BindAddress: options.ServingInfo.BindAddress,

		AllowDisabledDocker: options.AllowDisabledDocker,
		Containerized:       containerized,

		Client: kubeClient,

		VolumeDir: options.VolumeDirectory,

		KubeletServer: server,
		KubeletConfig: cfg,

		ProxyConfig: proxyconfig,

		MTU: options.NetworkConfig.MTU,

		SDNPlugin:                 sdnPlugin,
		FilteringEndpointsHandler: endpointFilter,
	}

	return config, nil
}