func (e *MountDiskTask) Find(c *fi.Context) (*MountDiskTask, error) { mounter := mount.New() mps, err := mounter.List() if err != nil { return nil, fmt.Errorf("error finding existing mounts: %v", err) } // If device is a symlink, it will show up by its final name targetDevice, err := filepath.EvalSymlinks(e.Device) if err != nil { return nil, fmt.Errorf("error resolving device symlinks for %q: %v", e.Device, err) } for i := range mps { mp := &mps[i] if mp.Device == targetDevice { actual := &MountDiskTask{ Name: e.Name, Mountpoint: mp.Path, Device: e.Device, // Use our alias, to keep change detection happy } return actual, nil } } return nil, nil }
// SimpleRunKubelet is a simple way to start a Kubelet talking to dockerEndpoint, using an API Client. // Under the hood it calls RunKubelet (below) func SimpleKubelet(client *client.Client, dockerClient dockertools.DockerInterface, hostname, rootDir, manifestURL, address string, port uint, masterServiceNamespace string, volumePlugins []volume.VolumePlugin, tlsOptions *kubelet.TLSOptions, cadvisorInterface cadvisor.Interface, configFilePath string, cloud cloudprovider.Interface, osInterface kubecontainer.OSInterface) *KubeletConfig { imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: 90, LowThresholdPercent: 80, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: 256, RootFreeDiskMB: 256, } kcfg := KubeletConfig{ KubeClient: client, DockerClient: dockerClient, HostnameOverride: hostname, RootDirectory: rootDir, ManifestURL: manifestURL, PodInfraContainerImage: dockertools.PodInfraContainerImage, Port: port, Address: net.ParseIP(address), EnableServer: true, EnableDebuggingHandlers: true, HTTPCheckFrequency: 1 * time.Second, FileCheckFrequency: 1 * time.Second, SyncFrequency: 3 * time.Second, MinimumGCAge: 10 * time.Second, MaxPerPodContainerCount: 2, MaxContainerCount: 100, RegisterNode: true, MasterServiceNamespace: masterServiceNamespace, VolumePlugins: volumePlugins, TLSOptions: tlsOptions, CadvisorInterface: cadvisorInterface, ConfigFile: configFilePath, ImageGCPolicy: imageGCPolicy, DiskSpacePolicy: diskSpacePolicy, Cloud: cloud, NodeStatusUpdateFrequency: 10 * time.Second, ResourceContainer: "/kubelet", OSInterface: osInterface, CgroupRoot: "", ContainerRuntime: "docker", Mounter: mount.New(), DockerDaemonContainer: "/docker-daemon", SystemContainer: "", MaxPods: 32, DockerExecHandler: &dockertools.NativeExecHandler{}, ResolverConfig: kubelet.ResolvConfDefault, } return &kcfg }
// isSysFSWritable checks /proc/mounts to see whether sysfs is 'rw' or not. func isSysFSWritable() (bool, error) { const permWritable = "rw" const sysfsDevice = "sysfs" m := mount.New() mountPoints, err := m.List() if err != nil { glog.Errorf("failed to list mount points: %v", err) return false, err } for _, mountPoint := range mountPoints { const sysfsDevice = "sysfs" if mountPoint.Device != sysfsDevice { continue } // Check whether sysfs is 'rw' const permWritable = "rw" if len(mountPoint.Opts) > 0 && mountPoint.Opts[0] == permWritable { return true, nil } glog.Errorf("sysfs is not writable: %+v (mount options are %v)", mountPoint, mountPoint.Opts) return false, readOnlySysFSError } return false, errors.New("No sysfs mounted") }
func TestPlugin(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t)) plug, err := plugMgr.FindPluginByName("kubernetes.io/git-repo") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ GitRepo: &api.GitRepoVolumeSource{ Repository: "https://github.com/GoogleCloudPlatform/kubernetes.git", Revision: "2a30ce65c5ab586b98916d83385c5983edd353a1", }, }, } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{""}, mount.New()) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } path := builder.GetPath() if !strings.HasSuffix(path, "pods/poduid/volumes/kubernetes.io~git-repo/vol1") { t.Errorf("Got unexpected path: %s", path) } testSetUp(plug, builder, t) if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"), mount.New()) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func getMetadataFromConfigDrive() (*Metadata, error) { // Try to read instance UUID from config drive. dev := "/dev/disk/by-label/" + configDriveLabel if _, err := os.Stat(dev); os.IsNotExist(err) { out, err := exec.New().Command( "blkid", "-l", "-t", "LABEL="+configDriveLabel, "-o", "device", ).CombinedOutput() if err != nil { glog.V(2).Infof("Unable to run blkid: %v", err) return nil, err } dev = strings.TrimSpace(string(out)) } mntdir, err := ioutil.TempDir("", "configdrive") if err != nil { return nil, err } defer os.Remove(mntdir) glog.V(4).Infof("Attempting to mount configdrive %s on %s", dev, mntdir) mounter := mount.New() err = mounter.Mount(dev, mntdir, "iso9660", []string{"ro"}) if err != nil { err = mounter.Mount(dev, mntdir, "vfat", []string{"ro"}) } if err != nil { glog.Errorf("Error mounting configdrive %s: %v", dev, err) return nil, err } defer mounter.Unmount(mntdir) glog.V(4).Infof("Configdrive mounted on %s", mntdir) f, err := os.Open( filepath.Join(mntdir, configDrivePath)) if err != nil { glog.Errorf("Error reading %s on config drive: %v", configDrivePath, err) return nil, err } defer f.Close() return parseMetadata(f) }
func CleanEverything(plugin volume.VolumePlugin, testVolumeName, volumePath string, testPodUID types.UID, t *testing.T) { cleaner, err := plugin.NewCleaner(testVolumeName, testPodUID, mount.New()) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func (_ *MountDiskTask) RenderLocal(t *local.LocalTarget, a, e, changes *MountDiskTask) error { dirMode := os.FileMode(0755) // Create the mountpoint err := os.MkdirAll(e.Mountpoint, dirMode) if err != nil { return fmt.Errorf("error creating mountpoint %q: %v", e.Mountpoint, err) } // Wait for the device to show up for { _, err := os.Stat(e.Device) if err == nil { break } if !os.IsNotExist(err) { return fmt.Errorf("error checking for device %q: %v", e.Device, err) } glog.Infof("Waiting for device %q to be attached", e.Device) time.Sleep(1 * time.Second) } glog.Infof("Found device %q", e.Device) // Mount the device if changes.Mountpoint != "" { glog.Infof("Mounting device %q on %q", e.Device, e.Mountpoint) mounter := &mount.SafeFormatAndMount{Interface: mount.New(), Runner: exec.New()} fstype := "" options := []string{} err := mounter.FormatAndMount(e.Device, e.Mountpoint, fstype, options) if err != nil { return fmt.Errorf("error formatting and mounting disk %q on %q: %v", e.Device, e.Mountpoint, err) } } // TODO: Should we add to /etc/fstab? // Mount the master PD as early as possible // echo "/dev/xvdb /mnt/master-pd ext4 noatime 0 0" >> /etc/fstab return nil }
// UnsecuredKubeletDeps returns a KubeletDeps suitable for being run, or an error if the server setup // is not valid. It will not start any background processes, and does not include authentication/authorization func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error) { // Initialize the TLS Options tlsOptions, err := InitializeTLS(&s.KubeletConfiguration) if err != nil { return nil, err } mounter := mount.New(s.ExperimentalMounterPath) var writer kubeio.Writer = &kubeio.StdWriter{} if s.Containerized { glog.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = mount.NewNsenterMounter() writer = &kubeio.NsenterWriter{} } var dockerClient dockertools.DockerInterface if s.ContainerRuntime == "docker" { dockerClient = dockertools.ConnectToDockerOrDie(s.DockerEndpoint, s.RuntimeRequestTimeout.Duration, s.ImagePullProgressDeadline.Duration) } else { dockerClient = nil } return &kubelet.KubeletDeps{ Auth: nil, // default does not enforce auth[nz] CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here Cloud: nil, // cloud provider might start background processes ContainerManager: nil, DockerClient: dockerClient, KubeClient: nil, ExternalKubeClient: nil, Mounter: mounter, NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir, s.CNIConfDir, s.CNIBinDir), OOMAdjuster: oom.NewOOMAdjuster(), OSInterface: kubecontainer.RealOS{}, Writer: writer, VolumePlugins: ProbeVolumePlugins(s.VolumePluginDir), TLSOptions: tlsOptions, }, nil }
// isSysFSWritable checks /proc/mounts to see whether sysfs is 'rw' or not. func isSysFSWritable() (bool, error) { const permWritable = "rw" const sysfsDevice = "sysfs" m := mount.New("" /* default mount path */) mountPoints, err := m.List() if err != nil { glog.Errorf("failed to list mount points: %v", err) return false, err } for _, mountPoint := range mountPoints { if mountPoint.Device != sysfsDevice { continue } // Check whether sysfs is 'rw' if len(mountPoint.Opts) > 0 && mountPoint.Opts[0] == permWritable { return true, nil } glog.Errorf("sysfs is not writable: %+v", mountPoint) break } return false, nil }
func NewHollowKubelet( nodeName string, client *clientset.Clientset, cadvisorInterface cadvisor.Interface, dockerClient dockertools.DockerInterface, kubeletPort, kubeletReadOnlyPort int, containerManager cm.ContainerManager, maxPods int, podsPerCore int, ) *HollowKubelet { // ----------------- // Static config // ----------------- c := GetHollowKubeletConfig(nodeName, kubeletPort, kubeletReadOnlyPort, maxPods, podsPerCore) // ----------------- // Injected objects // ----------------- volumePlugins := empty_dir.ProbeVolumePlugins() volumePlugins = append(volumePlugins, secret.ProbeVolumePlugins()...) d := &kubelet.KubeletDeps{ KubeClient: client, DockerClient: dockerClient, CAdvisorInterface: cadvisorInterface, Cloud: nil, OSInterface: &containertest.FakeOS{}, ContainerManager: containerManager, VolumePlugins: volumePlugins, TLSOptions: nil, OOMAdjuster: oom.NewFakeOOMAdjuster(), Writer: &kubeio.StdWriter{}, Mounter: mount.New("" /* default mount path */), } return &HollowKubelet{ KubeletConfiguration: c, KubeletDeps: d, } }
// SimpleRunKubelet is a simple way to start a Kubelet talking to dockerEndpoint, using an API Client. // Under the hood it calls RunKubelet (below) func SimpleKubelet(client *client.Client, dockerClient dockertools.DockerInterface, hostname, rootDir, manifestURL, address string, port uint, readOnlyPort uint, masterServiceNamespace string, volumePlugins []volume.VolumePlugin, tlsOptions *server.TLSOptions, cadvisorInterface cadvisor.Interface, configFilePath string, cloud cloudprovider.Interface, osInterface kubecontainer.OSInterface, fileCheckFrequency, httpCheckFrequency, minimumGCAge, nodeStatusUpdateFrequency, syncFrequency, outOfDiskTransitionFrequency time.Duration, maxPods int, containerManager cm.ContainerManager, clusterDNS net.IP) *KubeletConfig { imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: 90, LowThresholdPercent: 80, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: 256, RootFreeDiskMB: 256, } kcfg := KubeletConfig{ Address: net.ParseIP(address), CAdvisorInterface: cadvisorInterface, CgroupRoot: "", Cloud: cloud, ClusterDNS: clusterDNS, ConfigFile: configFilePath, ContainerManager: containerManager, ContainerRuntime: "docker", CPUCFSQuota: false, DiskSpacePolicy: diskSpacePolicy, DockerClient: dockerClient, DockerDaemonContainer: "/docker-daemon", DockerExecHandler: &dockertools.NativeExecHandler{}, EnableDebuggingHandlers: true, EnableServer: true, FileCheckFrequency: fileCheckFrequency, HostnameOverride: hostname, HTTPCheckFrequency: httpCheckFrequency, ImageGCPolicy: imageGCPolicy, KubeClient: client, ManifestURL: manifestURL, MasterServiceNamespace: masterServiceNamespace, MaxContainerCount: 100, MaxOpenFiles: 1024, MaxPerPodContainerCount: 2, MaxPods: maxPods, MinimumGCAge: minimumGCAge, Mounter: mount.New(), ChownRunner: chown.New(), ChmodRunner: chmod.New(), NodeStatusUpdateFrequency: nodeStatusUpdateFrequency, OOMAdjuster: oom.NewFakeOOMAdjuster(), OSInterface: osInterface, PodInfraContainerImage: kubetypes.PodInfraContainerImage, Port: port, ReadOnlyPort: readOnlyPort, RegisterNode: true, RegisterSchedulable: true, RegistryBurst: 10, RegistryPullQPS: 5.0, ResolverConfig: kubetypes.ResolvConfDefault, ResourceContainer: "/kubelet", RootDirectory: rootDir, SerializeImagePulls: true, SyncFrequency: syncFrequency, SystemContainer: "", TLSOptions: tlsOptions, VolumePlugins: volumePlugins, Writer: &io.StdWriter{}, OutOfDiskTransitionFrequency: outOfDiskTransitionFrequency, } return &kcfg }
// SimpleRunKubelet is a simple way to start a Kubelet talking to dockerEndpoint, using an API Client. // Under the hood it calls RunKubelet (below) func SimpleKubelet(client *clientset.Clientset, dockerClient dockertools.DockerInterface, hostname, rootDir, manifestURL, address string, port uint, readOnlyPort uint, masterServiceNamespace string, volumePlugins []volume.VolumePlugin, tlsOptions *server.TLSOptions, cadvisorInterface cadvisor.Interface, configFilePath string, cloud cloudprovider.Interface, osInterface kubecontainer.OSInterface, fileCheckFrequency, httpCheckFrequency, minimumGCAge, nodeStatusUpdateFrequency, syncFrequency, outOfDiskTransitionFrequency, evictionPressureTransitionPeriod time.Duration, maxPods int, podsPerCore int, containerManager cm.ContainerManager, clusterDNS net.IP) *KubeletConfig { imageGCPolicy := images.ImageGCPolicy{ HighThresholdPercent: 90, LowThresholdPercent: 80, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: 256, RootFreeDiskMB: 256, } evictionConfig := eviction.Config{ PressureTransitionPeriod: evictionPressureTransitionPeriod, } c := componentconfig.KubeletConfiguration{} kcfg := KubeletConfig{ Address: net.ParseIP(address), CAdvisorInterface: cadvisorInterface, VolumeStatsAggPeriod: time.Minute, CgroupRoot: "", Cloud: cloud, ClusterDNS: clusterDNS, ConfigFile: configFilePath, ContainerManager: containerManager, ContainerRuntime: "docker", CPUCFSQuota: true, DiskSpacePolicy: diskSpacePolicy, DockerClient: dockerClient, RuntimeCgroups: "", DockerExecHandler: &dockertools.NativeExecHandler{}, EnableControllerAttachDetach: false, EnableCustomMetrics: false, EnableDebuggingHandlers: true, EnableServer: true, CgroupsPerQOS: false, FileCheckFrequency: fileCheckFrequency, // Since this kubelet runs with --configure-cbr0=false, it needs to use // hairpin-veth to allow hairpin packets. Note that this deviates from // what the "real" kubelet currently does, because there's no way to // set promiscuous mode on docker0. HairpinMode: componentconfig.HairpinVeth, HostnameOverride: hostname, HTTPCheckFrequency: httpCheckFrequency, ImageGCPolicy: imageGCPolicy, KubeClient: client, ManifestURL: manifestURL, MasterServiceNamespace: masterServiceNamespace, MaxContainerCount: 100, MaxOpenFiles: 1024, MaxPerPodContainerCount: 2, MaxPods: maxPods, NvidiaGPUs: 0, MinimumGCAge: minimumGCAge, Mounter: mount.New(), NodeStatusUpdateFrequency: nodeStatusUpdateFrequency, OOMAdjuster: oom.NewFakeOOMAdjuster(), OSInterface: osInterface, PodInfraContainerImage: c.PodInfraContainerImage, Port: port, ReadOnlyPort: readOnlyPort, RegisterNode: true, RegisterSchedulable: true, RegistryBurst: 10, RegistryPullQPS: 5.0, ResolverConfig: kubetypes.ResolvConfDefault, KubeletCgroups: "/kubelet", RootDirectory: rootDir, SerializeImagePulls: true, SyncFrequency: syncFrequency, SystemCgroups: "", TLSOptions: tlsOptions, VolumePlugins: volumePlugins, Writer: &io.StdWriter{}, OutOfDiskTransitionFrequency: outOfDiskTransitionFrequency, EvictionConfig: evictionConfig, PodsPerCore: podsPerCore, } return &kcfg }
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup // is not valid. It will not start any background processes, and does not include authentication/authorization func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) { hostNetworkSources, err := kubetypes.GetValidatedSources(s.HostNetworkSources) if err != nil { return nil, err } hostPIDSources, err := kubetypes.GetValidatedSources(s.HostPIDSources) if err != nil { return nil, err } hostIPCSources, err := kubetypes.GetValidatedSources(s.HostIPCSources) if err != nil { return nil, err } mounter := mount.New() var writer io.Writer = &io.StdWriter{} if s.Containerized { glog.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = mount.NewNsenterMounter() writer = &io.NsenterWriter{} } tlsOptions, err := InitializeTLS(s) if err != nil { return nil, err } var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": dockerExecHandler = &dockertools.NsenterExecHandler{} default: glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } imageGCPolicy := images.ImageGCPolicy{ MinAge: s.ImageMinimumGCAge.Duration, HighThresholdPercent: int(s.ImageGCHighThresholdPercent), LowThresholdPercent: int(s.ImageGCLowThresholdPercent), } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: int(s.LowDiskSpaceThresholdMB), RootFreeDiskMB: int(s.LowDiskSpaceThresholdMB), } manifestURLHeader := make(http.Header) if s.ManifestURLHeader != "" { pieces := strings.Split(s.ManifestURLHeader, ":") if len(pieces) != 2 { return nil, fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader) } manifestURLHeader.Set(pieces[0], pieces[1]) } reservation, err := parseReservation(s.KubeReserved, s.SystemReserved) if err != nil { return nil, err } thresholds, err := eviction.ParseThresholdConfig(s.EvictionHard, s.EvictionSoft, s.EvictionSoftGracePeriod, s.EvictionMinimumReclaim) if err != nil { return nil, err } evictionConfig := eviction.Config{ PressureTransitionPeriod: s.EvictionPressureTransitionPeriod.Duration, MaxPodGracePeriodSeconds: int64(s.EvictionMaxPodGracePeriod), Thresholds: thresholds, } return &KubeletConfig{ Address: net.ParseIP(s.Address), AllowPrivileged: s.AllowPrivileged, Auth: nil, // default does not enforce auth[nz] CAdvisorInterface: nil, // launches background processes, not set here VolumeStatsAggPeriod: s.VolumeStatsAggPeriod.Duration, CgroupRoot: s.CgroupRoot, Cloud: nil, // cloud provider might start background processes ClusterDNS: net.ParseIP(s.ClusterDNS), ClusterDomain: s.ClusterDomain, ConfigFile: s.Config, ConfigureCBR0: s.ConfigureCBR0, ContainerManager: nil, ContainerRuntime: s.ContainerRuntime, RuntimeRequestTimeout: s.RuntimeRequestTimeout.Duration, CPUCFSQuota: s.CPUCFSQuota, DiskSpacePolicy: diskSpacePolicy, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint, s.RuntimeRequestTimeout.Duration), // TODO(random-liu): Set RuntimeRequestTimeout for rkt. RuntimeCgroups: s.RuntimeCgroups, DockerExecHandler: dockerExecHandler, EnableControllerAttachDetach: s.EnableControllerAttachDetach, EnableCustomMetrics: s.EnableCustomMetrics, EnableDebuggingHandlers: s.EnableDebuggingHandlers, CgroupsPerQOS: s.CgroupsPerQOS, EnableServer: s.EnableServer, EventBurst: int(s.EventBurst), EventRecordQPS: float32(s.EventRecordQPS), FileCheckFrequency: s.FileCheckFrequency.Duration, HostnameOverride: s.HostnameOverride, HostNetworkSources: hostNetworkSources, HostPIDSources: hostPIDSources, HostIPCSources: hostIPCSources, HTTPCheckFrequency: s.HTTPCheckFrequency.Duration, ImageGCPolicy: imageGCPolicy, KubeClient: nil, ManifestURL: s.ManifestURL, ManifestURLHeader: manifestURLHeader, MasterServiceNamespace: s.MasterServiceNamespace, MaxContainerCount: int(s.MaxContainerCount), MaxOpenFiles: uint64(s.MaxOpenFiles), MaxPerPodContainerCount: int(s.MaxPerPodContainerCount), MaxPods: int(s.MaxPods), NvidiaGPUs: int(s.NvidiaGPUs), MinimumGCAge: s.MinimumGCAge.Duration, Mounter: mounter, NetworkPluginName: s.NetworkPluginName, NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir), NodeLabels: s.NodeLabels, NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency.Duration, NonMasqueradeCIDR: s.NonMasqueradeCIDR, OOMAdjuster: oom.NewOOMAdjuster(), OSInterface: kubecontainer.RealOS{}, PodCIDR: s.PodCIDR, ReconcileCIDR: s.ReconcileCIDR, PodInfraContainerImage: s.PodInfraContainerImage, Port: uint(s.Port), ReadOnlyPort: uint(s.ReadOnlyPort), RegisterNode: s.RegisterNode, RegisterSchedulable: s.RegisterSchedulable, RegistryBurst: int(s.RegistryBurst), RegistryPullQPS: float64(s.RegistryPullQPS), ResolverConfig: s.ResolverConfig, Reservation: *reservation, KubeletCgroups: s.KubeletCgroups, RktPath: s.RktPath, RktAPIEndpoint: s.RktAPIEndpoint, RktStage1Image: s.RktStage1Image, RootDirectory: s.RootDirectory, SeccompProfileRoot: s.SeccompProfileRoot, Runonce: s.RunOnce, SerializeImagePulls: s.SerializeImagePulls, StandaloneMode: (len(s.APIServerList) == 0), StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout.Duration, SyncFrequency: s.SyncFrequency.Duration, SystemCgroups: s.SystemCgroups, TLSOptions: tlsOptions, Writer: writer, VolumePlugins: ProbeVolumePlugins(s.VolumePluginDir), OutOfDiskTransitionFrequency: s.OutOfDiskTransitionFrequency.Duration, HairpinMode: s.HairpinMode, BabysitDaemons: s.BabysitDaemons, ExperimentalFlannelOverlay: s.ExperimentalFlannelOverlay, NodeIP: net.ParseIP(s.NodeIP), EvictionConfig: evictionConfig, PodsPerCore: int(s.PodsPerCore), }, nil }
// Run runs the specified KubeletExecutorServer. func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error { rand.Seed(time.Now().UTC().UnixNano()) oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { log.Info(err) } // derive the executor cgroup and use it as docker container cgroup root mesosCgroup := findMesosCgroup(s.cgroupPrefix) s.cgroupRoot = mesosCgroup log.V(2).Infof("passing cgroup %q to the kubelet as cgroup root", s.CgroupRoot) // empty string for the docker and system containers (= cgroup paths). This // stops the kubelet taking any control over other system processes. s.SystemContainer = "" s.DockerDaemonContainer = "" // We set kubelet container to its own cgroup below the executor cgroup. // In contrast to the docker and system container, this has no other // undesired side-effects. s.ResourceContainer = mesosCgroup + "/kubelet" // create apiserver client var apiclient *client.Client clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { apiclient, err = client.New(clientConfig) } if err != nil { // required for k8sm since we need to send api.Binding information // back to the apiserver log.Fatalf("No API client: %v", err) } log.Infof("Using root directory: %v", s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) shutdownCloser, err := s.syncExternalShutdownWatcher() if err != nil { return err } cadvisorInterface, err := cadvisor.New(s.CadvisorPort) if err != nil { return err } imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } //TODO(jdef) intentionally NOT initializing a cloud provider here since: //(a) the kubelet doesn't actually use it //(b) we don't need to create N-kubelet connections to zookeeper for no good reason //cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) //log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err } tlsOptions, err := s.InitializeTLS() if err != nil { return err } mounter := mount.New() if s.Containerized { log.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = &mount.NsenterMounter{} } var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": dockerExecHandler = &dockertools.NsenterExecHandler{} default: log.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } kcfg := app.KubeletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, HostNetworkSources: hostNetworkSources, HostnameOverride: s.HostnameOverride, RootDirectory: s.RootDirectory, // ConfigFile: "" // ManifestURL: "" FileCheckFrequency: s.FileCheckFrequency, // HTTPCheckFrequency PodInfraContainerImage: s.PodInfraContainerImage, SyncFrequency: s.SyncFrequency, RegistryPullQPS: s.RegistryPullQPS, RegistryBurst: s.RegistryBurst, MinimumGCAge: s.MinimumGCAge, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxContainerCount: s.MaxContainerCount, RegisterNode: s.RegisterNode, // StandaloneMode: false ClusterDomain: s.ClusterDomain, ClusterDNS: s.ClusterDNS, Runonce: s.RunOnce, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, CadvisorInterface: cadvisorInterface, EnableServer: s.EnableServer, EnableDebuggingHandlers: s.EnableDebuggingHandlers, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), KubeClient: apiclient, MasterServiceNamespace: s.MasterServiceNamespace, VolumePlugins: app.ProbeVolumePlugins(), NetworkPlugins: app.ProbeNetworkPlugins(s.NetworkPluginDir), NetworkPluginName: s.NetworkPluginName, StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, TLSOptions: tlsOptions, ImageGCPolicy: imageGCPolicy, DiskSpacePolicy: diskSpacePolicy, Cloud: nil, // TODO(jdef) Cloud, specifying null here because we don't want all kubelets polling mesos-master; need to account for this in the cloudprovider impl NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, ResourceContainer: s.ResourceContainer, CgroupRoot: s.cgroupRoot, ContainerRuntime: s.ContainerRuntime, Mounter: mounter, DockerDaemonContainer: s.DockerDaemonContainer, SystemContainer: s.SystemContainer, ConfigureCBR0: s.ConfigureCBR0, MaxPods: s.MaxPods, DockerExecHandler: dockerExecHandler, } kcfg.NodeName = kcfg.Hostname err = app.RunKubelet(&kcfg, app.KubeletBuilder(func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) { return s.createAndInitKubelet(kc, hks, clientConfig, shutdownCloser) })) if err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Until(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { log.Errorf("Starting health server failed: %v", err) } }, 5*time.Second, util.NeverStop) } // block until executor is shut down or commits shutdown select {} }
func (k *VolumeMountController) safeFormatAndMount(device string, mountpoint string, fstype string) error { // Wait for the device to show up for { _, err := os.Stat(PathFor(device)) if err == nil { break } if !os.IsNotExist(err) { return fmt.Errorf("error checking for device %q: %v", device, err) } glog.Infof("Waiting for device %q to be attached", device) time.Sleep(1 * time.Second) } glog.Infof("Found device %q", device) //// Mount the device //var mounter mount.Interface //runner := exec.New() //if k.Containerized { // mounter = mount.NewNsenterMounter() // runner = NewChrootRunner(runner, "/rootfs") //} else { // mounter = mount.New() //} // If we are containerized, we still first SafeFormatAndMount in our namespace // This is because SafeFormatAndMount doesn't seem to work in a container safeFormatAndMount := &mount.SafeFormatAndMount{Interface: mount.New(), Runner: exec.New()} // Check if it is already mounted mounts, err := safeFormatAndMount.List() if err != nil { return fmt.Errorf("error listing existing mounts: %v", err) } // Note: IsLikelyNotMountPoint is not containerized findMountpoint := PathFor(mountpoint) var existing []*mount.MountPoint for i := range mounts { m := &mounts[i] glog.V(8).Infof("found existing mount: %v", m) if m.Path == findMountpoint { existing = append(existing, m) } } options := []string{} //if readOnly { // options = append(options, "ro") //} if len(existing) == 0 { glog.Infof("Creating mount directory %q", PathFor(mountpoint)) if err := os.MkdirAll(PathFor(mountpoint), 0750); err != nil { return err } glog.Infof("Mounting device %q on %q", PathFor(device), PathFor(mountpoint)) err = safeFormatAndMount.FormatAndMount(PathFor(device), PathFor(mountpoint), fstype, options) if err != nil { //os.Remove(mountpoint) return fmt.Errorf("error formatting and mounting disk %q on %q: %v", PathFor(device), PathFor(mountpoint), err) } // If we are containerized, we then also mount it into the host if Containerized { hostMounter := mount.NewNsenterMounter() err = hostMounter.Mount(device, mountpoint, fstype, options) if err != nil { //os.Remove(mountpoint) return fmt.Errorf("error formatting and mounting disk %q on %q in host: %v", device, mountpoint, err) } } } else { glog.Infof("Device already mounted on %q, verifying it is our device", mountpoint) if len(existing) != 1 { glog.Infof("Existing mounts unexpected") for i := range mounts { m := &mounts[i] glog.Infof("%s\t%s", m.Device, m.Path) } return fmt.Errorf("Found multiple existing mounts of %q at %q", device, mountpoint) } else { glog.Infof("Found existing mount of %q at %q", device, mountpoint) } } return nil }
// Run runs the specified KubeletExecutorServer. func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error { rand.Seed(time.Now().UTC().UnixNano()) oomAdjuster := oom.NewOOMAdjuster() if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil { log.Info(err) } // empty string for the docker and system containers (= cgroup paths). This // stops the kubelet taking any control over other system processes. s.SystemContainer = "" s.DockerDaemonContainer = "" // create apiserver client var apiclient *client.Client clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { apiclient, err = client.New(clientConfig) } if err != nil { // required for k8sm since we need to send api.Binding information // back to the apiserver log.Fatalf("No API client: %v", err) } log.Infof("Using root directory: %v", s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) cAdvisorInterface, err := cadvisor.New(s.CAdvisorPort) if err != nil { return err } imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } //TODO(jdef) intentionally NOT initializing a cloud provider here since: //(a) the kubelet doesn't actually use it //(b) we don't need to create N-kubelet connections to zookeeper for no good reason //cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) //log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err } hostPIDSources, err := kubelet.GetValidatedSources(strings.Split(s.HostPIDSources, ",")) if err != nil { return err } hostIPCSources, err := kubelet.GetValidatedSources(strings.Split(s.HostIPCSources, ",")) if err != nil { return err } tlsOptions, err := s.InitializeTLS() if err != nil { return err } mounter := mount.New() if s.Containerized { log.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = &mount.NsenterMounter{} } var writer utilio.Writer = &utilio.StdWriter{} var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": writer = &utilio.NsenterWriter{} dockerExecHandler = &dockertools.NsenterExecHandler{} default: log.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } manifestURLHeader := make(http.Header) if s.ManifestURLHeader != "" { pieces := strings.Split(s.ManifestURLHeader, ":") if len(pieces) != 2 { return fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader) } manifestURLHeader.Set(pieces[0], pieces[1]) } kcfg := app.KubeletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, CAdvisorInterface: cAdvisorInterface, CgroupRoot: s.CgroupRoot, Cloud: nil, // TODO(jdef) Cloud, specifying null here because we don't want all kubelets polling mesos-master; need to account for this in the cloudprovider impl ClusterDNS: s.ClusterDNS, ClusterDomain: s.ClusterDomain, // ConfigFile: "" ConfigureCBR0: s.ConfigureCBR0, ContainerRuntime: s.ContainerRuntime, CPUCFSQuota: s.CPUCFSQuota, DiskSpacePolicy: diskSpacePolicy, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), DockerDaemonContainer: s.DockerDaemonContainer, DockerExecHandler: dockerExecHandler, EnableDebuggingHandlers: s.EnableDebuggingHandlers, EnableServer: s.EnableServer, EventBurst: s.EventBurst, EventRecordQPS: s.EventRecordQPS, FileCheckFrequency: s.FileCheckFrequency, HostnameOverride: s.HostnameOverride, HostNetworkSources: hostNetworkSources, HostPIDSources: hostPIDSources, HostIPCSources: hostIPCSources, // HTTPCheckFrequency ImageGCPolicy: imageGCPolicy, KubeClient: apiclient, // ManifestURL: "" ManifestURLHeader: manifestURLHeader, MasterServiceNamespace: s.MasterServiceNamespace, MaxContainerCount: s.MaxContainerCount, MaxOpenFiles: s.MaxOpenFiles, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxPods: s.MaxPods, MinimumGCAge: s.MinimumGCAge, Mounter: mounter, NetworkPluginName: s.NetworkPluginName, NetworkPlugins: app.ProbeNetworkPlugins(s.NetworkPluginDir), NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, OOMAdjuster: oomAdjuster, OSInterface: kubecontainer.RealOS{}, PodCIDR: s.PodCIDR, PodInfraContainerImage: s.PodInfraContainerImage, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, RegisterNode: s.RegisterNode, RegistryBurst: s.RegistryBurst, RegistryPullQPS: s.RegistryPullQPS, ResolverConfig: s.ResolverConfig, ResourceContainer: s.ResourceContainer, RootDirectory: s.RootDirectory, Runonce: s.RunOnce, // StandaloneMode: false StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, SyncFrequency: s.SyncFrequency, SystemContainer: s.SystemContainer, TLSOptions: tlsOptions, VolumePlugins: app.ProbeVolumePlugins(), Writer: writer, } kcfg.NodeName = kcfg.Hostname kcfg.Builder = app.KubeletBuilder(func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) { return s.createAndInitKubelet(kc, hks, clientConfig) }) err = app.RunKubelet(&kcfg) if err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Until(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { log.Errorf("Starting health server failed: %v", err) } }, 5*time.Second, util.NeverStop) } // block until executor is shut down or commits shutdown select {} }
// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup // is not valid. It will not start any background processes, and does not include authentication/authorization func (s *KubeletServer) UnsecuredKubeletConfig() (*KubeletConfig, error) { hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return nil, err } hostPIDSources, err := kubelet.GetValidatedSources(strings.Split(s.HostPIDSources, ",")) if err != nil { return nil, err } hostIPCSources, err := kubelet.GetValidatedSources(strings.Split(s.HostIPCSources, ",")) if err != nil { return nil, err } mounter := mount.New() if s.Containerized { glog.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = mount.NewNsenterMounter() } tlsOptions, err := s.InitializeTLS() if err != nil { return nil, err } var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": dockerExecHandler = &dockertools.NsenterExecHandler{} default: glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } manifestURLHeader := make(http.Header) if s.ManifestURLHeader != "" { pieces := strings.Split(s.ManifestURLHeader, ":") if len(pieces) != 2 { return nil, fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader) } manifestURLHeader.Set(pieces[0], pieces[1]) } return &KubeletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, CadvisorInterface: nil, // launches background processes, not set here Auth: nil, // default does not enforce auth[nz] CgroupRoot: s.CgroupRoot, Cloud: nil, // cloud provider might start background processes ClusterDNS: s.ClusterDNS, ClusterDomain: s.ClusterDomain, ConfigFile: s.Config, ConfigureCBR0: s.ConfigureCBR0, ContainerRuntime: s.ContainerRuntime, CPUCFSQuota: s.CPUCFSQuota, DiskSpacePolicy: diskSpacePolicy, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), DockerDaemonContainer: s.DockerDaemonContainer, DockerExecHandler: dockerExecHandler, EnableDebuggingHandlers: s.EnableDebuggingHandlers, EnableServer: s.EnableServer, EventBurst: s.EventBurst, EventRecordQPS: s.EventRecordQPS, FileCheckFrequency: s.FileCheckFrequency, HostnameOverride: s.HostnameOverride, HostNetworkSources: hostNetworkSources, HostPIDSources: hostPIDSources, HostIPCSources: hostIPCSources, HTTPCheckFrequency: s.HTTPCheckFrequency, ImageGCPolicy: imageGCPolicy, KubeClient: nil, ManifestURL: s.ManifestURL, ManifestURLHeader: manifestURLHeader, MasterServiceNamespace: s.MasterServiceNamespace, MaxContainerCount: s.MaxContainerCount, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxPods: s.MaxPods, MinimumGCAge: s.MinimumGCAge, Mounter: mounter, NetworkPluginName: s.NetworkPluginName, NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir), NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, OSInterface: kubecontainer.RealOS{}, PodCIDR: s.PodCIDR, PodInfraContainerImage: s.PodInfraContainerImage, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, RegisterNode: s.RegisterNode, RegistryBurst: s.RegistryBurst, RegistryPullQPS: s.RegistryPullQPS, ResolverConfig: s.ResolverConfig, ResourceContainer: s.ResourceContainer, RktPath: s.RktPath, RktStage1Image: s.RktStage1Image, RootDirectory: s.RootDirectory, Runonce: s.RunOnce, StandaloneMode: (len(s.APIServerList) == 0), StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, SyncFrequency: s.SyncFrequency, SystemContainer: s.SystemContainer, TLSOptions: tlsOptions, VolumePlugins: ProbeVolumePlugins(), }, nil }
// Run runs the specified KubeletServer. This should never exit. func (s *KubeletServer) Run(_ []string) error { util.ReallyCrash = s.ReallyCrashForTesting rand.Seed(time.Now().UTC().UnixNano()) // TODO(vmarmol): Do this through container config. if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil { glog.Warning(err) } var apiclient *client.Client clientConfig, err := s.CreateAPIServerClientConfig() if err == nil { apiclient, err = client.New(clientConfig) } if err != nil && len(s.APIServerList) > 0 { glog.Warningf("No API client: %v", err) } glog.V(2).Infof("Using root directory: %v", s.RootDirectory) credentialprovider.SetPreferredDockercfgPath(s.RootDirectory) cadvisorInterface, err := cadvisor.New(s.CadvisorPort) if err != nil { return err } imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: s.ImageGCHighThresholdPercent, LowThresholdPercent: s.ImageGCLowThresholdPercent, } diskSpacePolicy := kubelet.DiskSpacePolicy{ DockerFreeDiskMB: s.LowDiskSpaceThresholdMB, RootFreeDiskMB: s.LowDiskSpaceThresholdMB, } cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) if err != nil { return err } glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) manifestURLHeader := make(http.Header) if s.ManifestURLHeader != "" { pieces := strings.Split(s.ManifestURLHeader, ":") if len(pieces) != 2 { return fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader) } manifestURLHeader.Set(pieces[0], pieces[1]) } hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err } tlsOptions, err := s.InitializeTLS() if err != nil { return err } mounter := mount.New() if s.Containerized { glog.V(2).Info("Running kubelet in containerized mode (experimental)") mounter = mount.NewNsenterMounter() } var dockerExecHandler dockertools.ExecHandler switch s.DockerExecHandlerName { case "native": dockerExecHandler = &dockertools.NativeExecHandler{} case "nsenter": dockerExecHandler = &dockertools.NsenterExecHandler{} default: glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName) dockerExecHandler = &dockertools.NativeExecHandler{} } kcfg := KubeletConfig{ Address: s.Address, AllowPrivileged: s.AllowPrivileged, HostNetworkSources: hostNetworkSources, HostnameOverride: s.HostnameOverride, RootDirectory: s.RootDirectory, ConfigFile: s.Config, ManifestURL: s.ManifestURL, ManifestURLHeader: manifestURLHeader, FileCheckFrequency: s.FileCheckFrequency, HTTPCheckFrequency: s.HTTPCheckFrequency, PodInfraContainerImage: s.PodInfraContainerImage, SyncFrequency: s.SyncFrequency, RegistryPullQPS: s.RegistryPullQPS, RegistryBurst: s.RegistryBurst, MinimumGCAge: s.MinimumGCAge, MaxPerPodContainerCount: s.MaxPerPodContainerCount, MaxContainerCount: s.MaxContainerCount, RegisterNode: s.RegisterNode, StandaloneMode: (len(s.APIServerList) == 0), ClusterDomain: s.ClusterDomain, ClusterDNS: s.ClusterDNS, Runonce: s.RunOnce, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, CadvisorInterface: cadvisorInterface, EnableServer: s.EnableServer, EnableDebuggingHandlers: s.EnableDebuggingHandlers, DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), KubeClient: apiclient, MasterServiceNamespace: s.MasterServiceNamespace, VolumePlugins: ProbeVolumePlugins(), NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir), NetworkPluginName: s.NetworkPluginName, StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, TLSOptions: tlsOptions, ImageGCPolicy: imageGCPolicy, DiskSpacePolicy: diskSpacePolicy, Cloud: cloud, NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, ResourceContainer: s.ResourceContainer, CgroupRoot: s.CgroupRoot, ContainerRuntime: s.ContainerRuntime, Mounter: mounter, DockerDaemonContainer: s.DockerDaemonContainer, SystemContainer: s.SystemContainer, ConfigureCBR0: s.ConfigureCBR0, PodCIDR: s.PodCIDR, MaxPods: s.MaxPods, DockerExecHandler: dockerExecHandler, } if err := RunKubelet(&kcfg, nil); err != nil { return err } if s.HealthzPort > 0 { healthz.DefaultHealthz() go util.Forever(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil) if err != nil { glog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second) } if s.RunOnce { return nil } // run forever select {} }