Beispiel #1
0
func (self *dockerContainerHandler) readLibcontainerState() (state *libcontainer.State, err error) {
	statePath := path.Join(dockerRootDir, self.id, "state.json")
	if !utils.FileExists(statePath) {
		// TODO(vmarmol): Remove this once we can depend on a newer Docker.
		// Libcontainer changed how its state was stored, try the old way of a "pid" file
		if utils.FileExists(path.Join(dockerRootDir, self.id, "pid")) {
			// We don't need the old state, return an empty state and we'll gracefully degrade.
			state = new(libcontainer.State)
			return
		}

		// TODO(vishh): Return file name as well once we have a better error interface.
		err = fileNotFound
		return
	}
	f, err := os.Open(statePath)
	if err != nil {
		return nil, fmt.Errorf("failed to open %s - %s\n", statePath, err)
	}
	defer f.Close()
	d := json.NewDecoder(f)
	retState := new(libcontainer.State)
	err = d.Decode(retState)
	if err != nil {
		return
	}
	state = retState

	return
}
Beispiel #2
0
func (self *dockerContainerHandler) readLibcontainerState() (state *libcontainer.State, err error) {
	// TODO(vmarmol): Remove this once we can depend on a newer Docker.
	// Libcontainer changed how its state was stored, try the old way of a "pid" file
	if !utils.FileExists(self.libcontainerStatePath) {
		if utils.FileExists(self.libcontainerPidPath) {
			// We don't need the old state, return an empty state and we'll gracefully degrade.
			return &libcontainer.State{}, nil
		}
	}
	f, err := os.Open(self.libcontainerStatePath)
	if err != nil {
		return nil, fmt.Errorf("failed to open %s - %s\n", self.libcontainerStatePath, err)
	}
	defer f.Close()
	d := json.NewDecoder(f)
	retState := new(libcontainer.State)
	err = d.Decode(retState)
	if err != nil {
		return nil, fmt.Errorf("failed to parse libcontainer state at %q: %v", self.libcontainerStatePath, err)
	}
	state = retState

	// Create cgroup paths if they don't exist. This is since older Docker clients don't write it.
	if len(state.CgroupPaths) == 0 {
		state.CgroupPaths = self.cgroupPaths
	}

	return
}
Beispiel #3
0
func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
	var spec info.ContainerSpec

	// The raw driver assumes unified hierarchy containers.

	// Get machine info.
	mi, err := self.machineInfoFactory.GetMachineInfo()
	if err != nil {
		return spec, err
	}

	// CPU.
	cpuRoot, ok := self.cgroupSubsystems.mountPoints["cpu"]
	if ok {
		cpuRoot = path.Join(cpuRoot, self.name)
		if utils.FileExists(cpuRoot) {
			spec.HasCpu = true
			spec.Cpu.Limit = readInt64(cpuRoot, "cpu.shares")
		}
	}

	// Cpu Mask.
	// This will fail for non-unified hierarchies. We'll return the whole machine mask in that case.
	cpusetRoot, ok := self.cgroupSubsystems.mountPoints["cpuset"]
	if ok {
		cpusetRoot = path.Join(cpusetRoot, self.name)
		if utils.FileExists(cpusetRoot) {
			spec.HasCpu = true
			spec.Cpu.Mask = readString(cpusetRoot, "cpuset.cpus")
			if spec.Cpu.Mask == "" {
				spec.Cpu.Mask = fmt.Sprintf("0-%d", mi.NumCores-1)
			}
		}
	}

	// Memory.
	memoryRoot, ok := self.cgroupSubsystems.mountPoints["memory"]
	if ok {
		memoryRoot = path.Join(memoryRoot, self.name)
		if utils.FileExists(memoryRoot) {
			spec.HasMemory = true
			spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes")
			spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.memsw.limit_in_bytes")
		}
	}

	// Fs.
	if self.name == "/" || self.externalMounts != nil {
		spec.HasFilesystem = true
	}

	//Network
	if self.networkInterface != nil {
		spec.HasNetwork = true
	}
	return spec, nil
}
Beispiel #4
0
// looks for system files that contain kernel messages and if one is found, sets
// the systemFile attribute of the OomParser object
func getSystemFile() (string, error) {
	const varLogMessages = "/var/log/messages"
	const varLogSyslog = "/var/log/syslog"
	if utils.FileExists(varLogMessages) {
		return varLogMessages, nil
	} else if utils.FileExists(varLogSyslog) {
		return varLogSyslog, nil
	}
	return "", fmt.Errorf("neither %s nor %s exists from which to read kernel errors", varLogMessages, varLogSyslog)
}
Beispiel #5
0
func (self *rawContainerHandler) GetSpec() (*info.ContainerSpec, error) {
	spec := new(info.ContainerSpec)

	// The raw driver assumes unified hierarchy containers.

	// Get machine info.
	mi, err := self.machineInfoFactory.GetMachineInfo()
	if err != nil {
		return nil, err
	}

	// CPU.
	cpuRoot, ok := self.cgroupSubsystems.mountPoints["cpu"]
	if ok {
		cpuRoot = path.Join(cpuRoot, self.name)
		if utils.FileExists(cpuRoot) {
			spec.Cpu = new(info.CpuSpec)
			spec.Cpu.Limit = readInt64(cpuRoot, "cpu.shares")
		}
	}

	// Cpu Mask.
	// This will fail for non-unified hierarchies. We'll return the whole machine mask in that case.
	cpusetRoot, ok := self.cgroupSubsystems.mountPoints["cpuset"]
	if ok {
		if spec.Cpu == nil {
			spec.Cpu = new(info.CpuSpec)
		}
		cpusetRoot = path.Join(cpusetRoot, self.name)
		if utils.FileExists(cpusetRoot) {
			spec.Cpu.Mask = readString(cpusetRoot, "cpuset.cpus")
			if spec.Cpu.Mask == "" {
				spec.Cpu.Mask = fmt.Sprintf("0-%d", mi.NumCores-1)
			}
		}
	}

	// Memory.
	memoryRoot, ok := self.cgroupSubsystems.mountPoints["memory"]
	if ok {
		memoryRoot = path.Join(memoryRoot, self.name)
		if utils.FileExists(memoryRoot) {
			spec.Memory = new(info.MemorySpec)
			spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes")
			spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.memsw.limit_in_bytes")
		}
	}

	return spec, nil
}
Beispiel #6
0
// GetClockSpeed returns the CPU clock speed, given a []byte formatted as the /proc/cpuinfo file.
func GetClockSpeed(procInfo []byte) (uint64, error) {
	// s390/s390x and aarch64 changes
	if true == isSystemZ() || true == isAArch64() {
		return 0, nil
	}

	// First look through sys to find a max supported cpu frequency.
	if utils.FileExists(maxFreqFile) {
		val, err := ioutil.ReadFile(maxFreqFile)
		if err != nil {
			return 0, err
		}
		var maxFreq uint64
		n, err := fmt.Sscanf(string(val), "%d", &maxFreq)
		if err != nil || n != 1 {
			return 0, fmt.Errorf("could not parse frequency %q", val)
		}
		return maxFreq, nil
	}
	// Fall back to /proc/cpuinfo
	matches := cpuClockSpeedMHz.FindSubmatch(procInfo)
	if len(matches) != 2 {
		return 0, fmt.Errorf("could not detect clock speed from output: %q", string(procInfo))
	}

	speed, err := strconv.ParseFloat(string(matches[1]), 64)
	if err != nil {
		return 0, err
	}
	// Convert to kHz
	return uint64(speed * 1000), nil
}
Beispiel #7
0
func validateDockerInfo() (string, string) {
	client, err := docker.Client()
	if err == nil {
		info, err := client.Info()
		if err == nil {
			execDriver := info.Get("ExecutionDriver")
			storageDriver := info.Get("Driver")
			desc := fmt.Sprintf("Docker exec driver is %s. Storage driver is %s.\n", execDriver, storageDriver)
			if docker.UseSystemd() {
				desc += "\tsystemd is being used to create cgroups.\n"
			} else {
				desc += "\tCgroups are being created through cgroup filesystem.\n"
			}
			if strings.Contains(execDriver, "native") {
				stateFile := docker.DockerStateDir()
				if !utils.FileExists(stateFile) {
					desc += fmt.Sprintf("\tDocker container state directory %q is not accessible.\n", stateFile)
					return Unsupported, desc
				}
				desc += fmt.Sprintf("\tDocker container state directory is at %q and is accessible.\n", stateFile)
				return Recommended, desc
			} else if strings.Contains(execDriver, "lxc") {
				return Supported, desc
			}
			return Unknown, desc
		}
	}
	return Unknown, "Docker remote API not reachable\n\t"
}
Beispiel #8
0
// TODO(vmarmol): Switch to getting this from libcontainer once we have a solid API.
func (self *dockerContainerHandler) readLibcontainerConfig() (config *libcontainer.Config, err error) {
	configPath := path.Join(dockerRootDir, self.id, "container.json")
	if !utils.FileExists(configPath) {
		// TODO(vishh): Return file name as well once we have a better error interface.
		err = fileNotFound
		return
	}
	f, err := os.Open(configPath)
	if err != nil {
		return nil, fmt.Errorf("failed to open %s - %s\n", configPath, err)
	}
	defer f.Close()
	d := json.NewDecoder(f)
	retConfig := new(libcontainer.Config)
	err = d.Decode(retConfig)
	if err != nil {
		return
	}
	config = retConfig

	// Replace cgroup parent and name with our own since we may be running in a different context.
	config.Cgroups.Parent = self.parent
	config.Cgroups.Name = self.id

	return
}
Beispiel #9
0
// Lists all directories under "path" and outputs the results as children of "parent".
func listDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}) error {
	// Ignore if this hierarchy does not exist.
	if !utils.FileExists(dirpath) {
		return nil
	}

	entries, err := ioutil.ReadDir(dirpath)
	if err != nil {
		return err
	}
	for _, entry := range entries {
		// We only grab directories.
		if entry.IsDir() {
			name := path.Join(parent, entry.Name())
			output[name] = struct{}{}

			// List subcontainers if asked to.
			if recursive {
				err := listDirectories(path.Join(dirpath, entry.Name()), name, true, output)
				if err != nil {
					return err
				}
			}
		}
	}
	return nil
}
Beispiel #10
0
func getClockSpeed(procInfo []byte) (uint64, error) {
	// First look through sys to find a max supported cpu frequency.
	const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
	if utils.FileExists(maxFreqFile) {
		val, err := ioutil.ReadFile(maxFreqFile)
		if err != nil {
			return 0, err
		}
		var maxFreq uint64
		n, err := fmt.Sscanf(string(val), "%d", &maxFreq)
		if err != nil || n != 1 {
			return 0, fmt.Errorf("could not parse frequency %q", val)
		}
		return maxFreq, nil
	}
	// Fall back to /proc/cpuinfo
	matches := CpuClockSpeedMHz.FindSubmatch(procInfo)
	if len(matches) != 2 {
		//Check if we are running on Power systems which have a different format
		CpuClockSpeedMHz, _ = regexp.Compile("clock\\t*: +([0-9]+.[0-9]+)MHz")
		matches = CpuClockSpeedMHz.FindSubmatch(procInfo)
		if len(matches) != 2 {
			return 0, fmt.Errorf("could not detect clock speed from output: %q", string(procInfo))
		}
	}
	speed, err := strconv.ParseFloat(string(matches[1]), 64)
	if err != nil {
		return 0, err
	}
	// Convert to kHz
	return uint64(speed * 1000), nil
}
Beispiel #11
0
func CgroupExists(cgroupPaths map[string]string) bool {
	// If any cgroup exists, the container is still alive.
	for _, cgroupPath := range cgroupPaths {
		if utils.FileExists(cgroupPath) {
			return true
		}
	}
	return false
}
Beispiel #12
0
// looks for system files that contain kernel messages and if one is found, sets
// the systemFile attribute of the OomParser object
func getSystemFile() (string, error) {
	for _, logFile := range kernelLogFiles {
		if utils.FileExists(logFile) {
			glog.Infof("OOM parser using kernel log file: %q", logFile)
			return logFile, nil
		}
	}
	return "", fmt.Errorf("unable to find any kernel log file available from our set: %v", kernelLogFiles)
}
Beispiel #13
0
func (self *rawContainerHandler) Exists() bool {
	// If any cgroup exists, the container is still alive.
	for _, cgroupPath := range self.cgroupPaths {
		if utils.FileExists(cgroupPath) {
			return true
		}
	}
	return false
}
Beispiel #14
0
func New() (*SchedReader, error) {
	if !utils.FileExists(schedDebugPath) {
		return nil, fmt.Errorf("sched debug file %q not accessible", schedDebugPath)
	}
	selfCgroup, err := getSelfCgroup()
	if err != nil {
		glog.Infof("failed to get cgroup for cadvisor: %v", err)
	}
	return &SchedReader{selfCgroup: selfCgroup}, nil
}
Beispiel #15
0
func (self *rawContainerHandler) GetSpec() (*info.ContainerSpec, error) {
	spec := new(info.ContainerSpec)

	// The raw driver assumes unified hierarchy containers.

	// CPU.
	cpuRoot, ok := self.cgroupSubsystems.mountPoints["cpu"]
	if ok {
		cpuRoot = filepath.Join(cpuRoot, self.name)
		if utils.FileExists(cpuRoot) {
			// Get machine info.
			mi, err := self.machineInfoFactory.GetMachineInfo()
			if err != nil {
				return nil, err
			}

			spec.Cpu = new(info.CpuSpec)
			spec.Cpu.Limit = readInt64(cpuRoot, "cpu.shares")

			// TODO(vmarmol): Get CPUs from config.Cgroups.CpusetCpus
			n := (mi.NumCores + 63) / 64
			spec.Cpu.Mask.Data = make([]uint64, n)
			for i := 0; i < n; i++ {
				spec.Cpu.Mask.Data[i] = math.MaxUint64
			}
		}
	}

	// Memory.
	memoryRoot, ok := self.cgroupSubsystems.mountPoints["memory"]
	if ok {
		memoryRoot = filepath.Join(memoryRoot, self.name)
		if utils.FileExists(memoryRoot) {
			spec.Memory = new(info.MemorySpec)
			spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes")
			spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.limit_in_bytes")
		}
	}

	return spec, nil
}
Beispiel #16
0
func init() {
	useSystemd = systemd.UseSystemd()
	if !useSystemd {
		// Second attempt at checking for systemd, check for a "name=systemd" cgroup.
		mnt, err := cgroups.FindCgroupMountpoint("cpu")
		if err == nil {
			// systemd presence does not mean systemd controls cgroups.
			// If system.slice cgroup exists, then systemd is taking control.
			// This breaks if user creates system.slice manually :)
			useSystemd = utils.FileExists(mnt + "/system.slice")
		}
	}
}
func readState(dockerRoot, containerID string) (preAPIState, error) {
	// pre-API libcontainer changed how its state was stored, try the old way of a "pid" file
	statePath := path.Join(dockerRoot, libcontainerExecDriverPath, containerID, "state.json")
	if !utils.FileExists(statePath) {
		pidPath := path.Join(dockerRoot, libcontainerExecDriverPath, containerID, "pid")
		if utils.FileExists(pidPath) {
			// We don't need the old state, return an empty state and we'll gracefully degrade.
			return preAPIState{}, nil
		}
	}
	out, err := ioutil.ReadFile(statePath)
	if err != nil {
		return preAPIState{}, err
	}

	// Parse the state.
	var state preAPIState
	err = json.Unmarshal(out, &state)
	if err != nil {
		return preAPIState{}, err
	}

	return state, nil
}
Beispiel #18
0
func validateDockerInfo() (string, string) {
	info, err := docker.ValidateInfo()
	if err != nil {
		return Unsupported, fmt.Sprintf("Docker setup is invalid: %v", err)
	}

	desc := fmt.Sprintf("Docker exec driver is %s. Storage driver is %s.\n", info.ExecutionDriver, info.Driver)
	stateFile := docker.DockerStateDir()
	if !utils.FileExists(stateFile) {
		desc += fmt.Sprintf("\tDocker container state directory %q is not accessible.\n", stateFile)
		return Unsupported, desc
	}
	desc += fmt.Sprintf("\tDocker container state directory is at %q and is accessible.\n", stateFile)
	return Recommended, desc
}
Beispiel #19
0
func readString(dirpath string, file string) string {
	cgroupFile := path.Join(dirpath, file)

	// Ignore non-existent files
	if !utils.FileExists(cgroupFile) {
		return ""
	}

	// Read
	out, err := ioutil.ReadFile(cgroupFile)
	if err != nil {
		glog.Errorf("raw driver: Failed to read %q: %s", cgroupFile, err)
		return ""
	}
	return string(out)
}
Beispiel #20
0
func validateCgroupMounts() (string, string) {
	const recommendedMount = "/sys/fs/cgroup"
	desc := fmt.Sprintf("\tAny cgroup mount point that is detectible and accessible is supported. %s is recommended as a standard location.\n", recommendedMount)
	mnt, err := cgroups.FindCgroupMountpoint("cpu")
	if err != nil {
		out := "Could not locate cgroup mount point.\n"
		out += desc
		return Unknown, out
	}
	mnt = path.Dir(mnt)
	if !utils.FileExists(mnt) {
		out := fmt.Sprintf("Cgroup mount directory %s inaccessible.\n", mnt)
		out += desc
		return Unsupported, out
	}
	mounts, err := ioutil.ReadDir(mnt)
	if err != nil {
		out := fmt.Sprintf("Could not read cgroup mount directory %s.\n", mnt)
		out += desc
		return Unsupported, out
	}
	mountNames := "\tCgroup mount directories: "
	for _, mount := range mounts {
		mountNames += mount.Name() + " "
	}
	mountNames += "\n"
	out := fmt.Sprintf("Cgroups are mounted at %s.\n", mnt)
	out += mountNames
	out += desc
	info, err := ioutil.ReadFile("/proc/mounts")
	if err != nil {
		out := fmt.Sprintf("Could not read /proc/mounts.\n")
		out += desc
		return Unsupported, out
	}
	out += "\tCgroup mounts:\n"
	for _, line := range strings.Split(string(info), "\n") {
		if strings.Contains(line, " cgroup ") {
			out += "\t" + line + "\n"
		}
	}
	if mnt == recommendedMount {
		return Recommended, out
	}
	return Supported, out
}
Beispiel #21
0
func AttributeFiles(path string) ([]string, error) {
	res := []string{}
	if !utils.FileExists(path) {
		return res, nil
	}

	in := attributes.NewInputs(path)
	// initialize input files list
	err := in.ListFiles()
	if err != nil {
		return nil, err
	}

	for _, file := range in.Files {
		if strings.HasSuffix(file, ".yml") || strings.HasSuffix(file, ".yaml") {
			res = append(res, in.Directory+file)
		}
	}
	return res, nil
}
Beispiel #22
0
func UseSystemd() bool {
	check.Do(func() {
		useSystemd = false

		// Check for system.slice in systemd and cpu cgroup.
		for _, cgroupType := range []string{"name=systemd", "cpu"} {
			mnt, err := cgroups.FindCgroupMountpoint(cgroupType)
			if err == nil {
				// systemd presence does not mean systemd controls cgroups.
				// If system.slice cgroup exists, then systemd is taking control.
				// This breaks if user creates system.slice manually :)
				if utils.FileExists(path.Join(mnt, "system.slice")) {
					useSystemd = true
					break
				}
			}
		}
	})
	return useSystemd
}
Beispiel #23
0
func readInt64(path string, file string) uint64 {
	cgroupFile := filepath.Join(path, file)

	// Ignore non-existent files
	if !utils.FileExists(cgroupFile) {
		return 0
	}

	// Read
	out, err := ioutil.ReadFile(cgroupFile)
	if err != nil {
		log.Printf("raw driver: Failed to read %q: %s", cgroupFile, err)
		return 0
	}
	val, err := strconv.ParseUint(strings.TrimSpace(string(out)), 10, 64)
	if err != nil {
		log.Printf("raw driver: Failed to parse in %q from file %q: %s", string(out), cgroupFile, err)
		return 0
	}

	return val
}
Beispiel #24
0
func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
	var spec info.ContainerSpec

	// The raw driver assumes unified hierarchy containers.

	// Get machine info.
	mi, err := self.machineInfoFactory.GetMachineInfo()
	if err != nil {
		return spec, err
	}

	// CPU.
	cpuRoot, ok := self.cgroupPaths["cpu"]
	if ok {
		if utils.FileExists(cpuRoot) {
			spec.HasCpu = true
			spec.Cpu.Limit = readInt64(cpuRoot, "cpu.shares")
		}
	}

	// Cpu Mask.
	// This will fail for non-unified hierarchies. We'll return the whole machine mask in that case.
	cpusetRoot, ok := self.cgroupPaths["cpuset"]
	if ok {
		if utils.FileExists(cpusetRoot) {
			spec.HasCpu = true
			spec.Cpu.Mask = readString(cpusetRoot, "cpuset.cpus")
			if spec.Cpu.Mask == "" {
				spec.Cpu.Mask = fmt.Sprintf("0-%d", mi.NumCores-1)
			}
		}
	}

	// Memory.
	memoryRoot, ok := self.cgroupPaths["memory"]
	if ok {
		if utils.FileExists(memoryRoot) {
			spec.HasMemory = true
			spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes")
			spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.memsw.limit_in_bytes")
		}
	}

	// Fs.
	if self.name == "/" || self.externalMounts != nil {
		spec.HasFilesystem = true
	}

	//Network
	spec.HasNetwork = self.hasNetwork

	// DiskIo.
	if blkioRoot, ok := self.cgroupPaths["blkio"]; ok && utils.FileExists(blkioRoot) {
		spec.HasDiskIo = true
	}

	// Check physical network devices for root container.
	nd, err := self.GetRootNetworkDevices()
	if err != nil {
		return spec, err
	}
	if len(nd) != 0 {
		spec.HasNetwork = true
	}
	return spec, nil
}
Beispiel #25
0
func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
	var spec info.ContainerSpec

	// The raw driver assumes unified hierarchy containers.

	// Get the lowest creation time from all hierarchies as the container creation time.
	now := time.Now()
	lowestTime := now
	for _, cgroupPath := range self.cgroupPaths {
		// The modified time of the cgroup directory is when the container was created.
		fi, err := os.Stat(cgroupPath)
		if err == nil && fi.ModTime().Before(lowestTime) {
			lowestTime = fi.ModTime()
		}
	}
	if lowestTime != now {
		spec.CreationTime = lowestTime
	}

	// Get machine info.
	mi, err := self.machineInfoFactory.GetMachineInfo()
	if err != nil {
		return spec, err
	}

	// CPU.
	cpuRoot, ok := self.cgroupPaths["cpu"]
	if ok {
		if utils.FileExists(cpuRoot) {
			spec.HasCpu = true
			spec.Cpu.Limit = readInt64(cpuRoot, "cpu.shares")
		}
	}

	// Cpu Mask.
	// This will fail for non-unified hierarchies. We'll return the whole machine mask in that case.
	cpusetRoot, ok := self.cgroupPaths["cpuset"]
	if ok {
		if utils.FileExists(cpusetRoot) {
			spec.HasCpu = true
			mask := readString(cpusetRoot, "cpuset.cpus")
			spec.Cpu.Mask = utils.FixCpuMask(mask, mi.NumCores)
		}
	}

	// Memory.
	memoryRoot, ok := self.cgroupPaths["memory"]
	if ok {
		if utils.FileExists(memoryRoot) {
			spec.HasMemory = true
			spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes")
			spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.memsw.limit_in_bytes")
		}
	}

	// Fs.
	if self.name == "/" || self.externalMounts != nil {
		spec.HasFilesystem = true
	}

	//Network
	spec.HasNetwork = self.hasNetwork

	// DiskIo.
	if blkioRoot, ok := self.cgroupPaths["blkio"]; ok && utils.FileExists(blkioRoot) {
		spec.HasDiskIo = true
	}

	// Check physical network devices for root container.
	nd, err := self.GetRootNetworkDevices()
	if err != nil {
		return spec, err
	}
	if len(nd) != 0 {
		spec.HasNetwork = true
	}
	return spec, nil
}
// TODO(vmarmol): Deprecate over time as old Dockers are phased out.
func ReadConfig(dockerRoot, dockerRun, containerID string) (*configs.Config, error) {
	// Try using the new config if it is available.
	configPath := configPath(dockerRun, containerID)
	if utils.FileExists(configPath) {
		out, err := ioutil.ReadFile(configPath)
		if err != nil {
			return nil, err
		}

		var state libcontainer.State
		if err = json.Unmarshal(out, &state); err != nil {
			if _, ok := err.(*json.UnmarshalTypeError); ok {
				// Since some fields changes in Cgroup struct, it will be failed while unmarshalling to libcontainer.State struct.
				// This failure is caused by a change of runc(https://github.com/opencontainers/runc/commit/c6e406af243fab0c9636539c1cb5f4d60fe0787f).
				// If we encountered the UnmarshalTypeError, try to unmarshal it again to v1State struct and convert it.
				var state v1State
				err2 := json.Unmarshal(out, &state)
				if err2 != nil {
					return nil, err
				}
				return convertOldConfigToNew(state.Config), nil
			} else {
				return nil, err
			}
		}
		return &state.Config, nil
	}

	// Fallback to reading the old config which is comprised of the state and config files.
	oldConfigPath := oldConfigPath(dockerRoot, containerID)
	out, err := ioutil.ReadFile(oldConfigPath)
	if err != nil {
		return nil, err
	}

	// Try reading the preAPIConfig.
	var config preAPIConfig
	err = json.Unmarshal(out, &config)
	if err != nil {
		// Try to parse the old pre-API config. The main difference is that namespaces used to be a map, now it is a slice of structs.
		// The JSON marshaler will use the non-nested field before the nested one.
		type oldLibcontainerConfig struct {
			preAPIConfig
			OldNamespaces map[string]bool `json:"namespaces,omitempty"`
		}
		var oldConfig oldLibcontainerConfig
		err2 := json.Unmarshal(out, &oldConfig)
		if err2 != nil {
			// Use original error.
			return nil, err
		}

		// Translate the old pre-API config into the new config.
		config = oldConfig.preAPIConfig
		for ns := range oldConfig.OldNamespaces {
			config.Namespaces = append(config.Namespaces, configs.Namespace{
				Type: configs.NamespaceType(ns),
			})
		}
	}

	// Read the old state file as well.
	state, err := readState(dockerRoot, containerID)
	if err != nil {
		return nil, err
	}

	// Convert preAPIConfig + old state file to Config.
	// This only converts some of the fields, the ones we use.
	// You may need to add fields if the one you're interested in is not available.
	var result configs.Config
	result.Cgroups = new(configs.Cgroup)
	result.Rootfs = config.RootFs
	result.Hostname = config.Hostname
	result.Namespaces = config.Namespaces
	result.Capabilities = config.Capabilities
	for _, net := range config.Networks {
		n := &configs.Network{
			Name:              state.NetworkState.VethChild,
			Bridge:            net.Bridge,
			MacAddress:        net.MacAddress,
			Address:           net.Address,
			Gateway:           net.Gateway,
			IPv6Address:       net.IPv6Address,
			IPv6Gateway:       net.IPv6Gateway,
			HostInterfaceName: state.NetworkState.VethHost,
		}
		result.Networks = append(result.Networks, n)
	}
	result.Routes = config.Routes
	if config.Cgroups != nil {
		result.Cgroups = config.Cgroups
	}

	return &result, nil
}
Beispiel #27
0
func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
	var spec info.ContainerSpec

	// The raw driver assumes unified hierarchy containers.

	// Get the lowest creation time from all hierarchies as the container creation time.
	now := time.Now()
	lowestTime := now
	for _, cgroupPath := range self.cgroupPaths {
		// The modified time of the cgroup directory changes whenever a subcontainer is created.
		// eg. /docker will have creation time matching the creation of latest docker container.
		// Use clone_children as a workaround as it isn't usually modified. It is only likely changed
		// immediately after creating a container.
		cgroupPath = path.Join(cgroupPath, "cgroup.clone_children")
		fi, err := os.Stat(cgroupPath)
		if err == nil && fi.ModTime().Before(lowestTime) {
			lowestTime = fi.ModTime()
		}
	}
	if lowestTime != now {
		spec.CreationTime = lowestTime
	}

	// Get machine info.
	mi, err := self.machineInfoFactory.GetMachineInfo()
	if err != nil {
		return spec, err
	}

	// CPU.
	cpuRoot, ok := self.cgroupPaths["cpu"]
	if ok {
		if utils.FileExists(cpuRoot) {
			spec.HasCpu = true
			spec.Cpu.Limit = readInt64(cpuRoot, "cpu.shares")
		}
	}

	// Cpu Mask.
	// This will fail for non-unified hierarchies. We'll return the whole machine mask in that case.
	cpusetRoot, ok := self.cgroupPaths["cpuset"]
	if ok {
		if utils.FileExists(cpusetRoot) {
			spec.HasCpu = true
			mask := readString(cpusetRoot, "cpuset.cpus")
			spec.Cpu.Mask = utils.FixCpuMask(mask, mi.NumCores)
		}
	}

	// Memory
	if self.name == "/" {
		// Get memory and swap limits of the running machine
		memLimit, err := machine.GetMachineMemoryCapacity()
		if err != nil {
			glog.Warningf("failed to obtain memory limit for machine container")
			spec.HasMemory = false
		} else {
			spec.Memory.Limit = uint64(memLimit)
			// Spec is marked to have memory only if the memory limit is set
			spec.HasMemory = true
		}

		swapLimit, err := machine.GetMachineSwapCapacity()
		if err != nil {
			glog.Warningf("failed to obtain swap limit for machine container")
		} else {
			spec.Memory.SwapLimit = uint64(swapLimit)
		}
	} else {
		memoryRoot, ok := self.cgroupPaths["memory"]
		if ok {
			if utils.FileExists(memoryRoot) {
				spec.HasMemory = true
				spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes")
				spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.memsw.limit_in_bytes")
			}
		}
	}

	// Fs.
	if self.name == "/" || self.externalMounts != nil {
		spec.HasFilesystem = true
	}

	//Network
	spec.HasNetwork = self.hasNetwork

	// DiskIo.
	if blkioRoot, ok := self.cgroupPaths["blkio"]; ok && utils.FileExists(blkioRoot) {
		spec.HasDiskIo = true
	}

	// Check physical network devices for root container.
	nd, err := self.GetRootNetworkDevices()
	if err != nil {
		return spec, err
	}
	if len(nd) != 0 {
		spec.HasNetwork = true
	}
	return spec, nil
}
Beispiel #28
0
func GetSpec(handler AbstractContainerHandler) (info.ContainerSpec, error) {
	cgroupPaths := handler.GetCgroupPaths()
	machineInfoFactory := handler.GetMachineInfoFactory()
	name := handler.GetName()
	externalMounts := handler.GetExternalMounts()

	var spec info.ContainerSpec

	// The raw driver assumes unified hierarchy containers.

	// Get the lowest creation time from all hierarchies as the container creation time.
	now := time.Now()
	lowestTime := now
	for _, cgroupPath := range cgroupPaths {
		// The modified time of the cgroup directory changes whenever a subcontainer is created.
		// eg. /docker will have creation time matching the creation of latest docker container.
		// Use clone_children as a workaround as it isn't usually modified. It is only likely changed
		// immediately after creating a container.
		cgroupPath = path.Join(cgroupPath, "cgroup.clone_children")
		fi, err := os.Stat(cgroupPath)
		if err == nil && fi.ModTime().Before(lowestTime) {
			lowestTime = fi.ModTime()
		}
	}
	if lowestTime != now {
		spec.CreationTime = lowestTime
	}

	// Get machine info.
	mi, err := machineInfoFactory.GetMachineInfo()
	if err != nil {
		return spec, err
	}

	// CPU.
	cpuRoot, ok := cgroupPaths["cpu"]
	if ok {
		if utils.FileExists(cpuRoot) {
			spec.HasCpu = true
			spec.Cpu.Limit = readUInt64(cpuRoot, "cpu.shares")
			spec.Cpu.Period = readUInt64(cpuRoot, "cpu.cfs_period_us")
			quota := readString(cpuRoot, "cpu.cfs_quota_us")

			if quota != "" && quota != "-1" {
				val, err := strconv.ParseUint(quota, 10, 64)
				if err != nil {
					glog.Errorf("GetSpec: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err)
				}
				spec.Cpu.Quota = val
			}
		}
	}

	// Cpu Mask.
	// This will fail for non-unified hierarchies. We'll return the whole machine mask in that case.
	cpusetRoot, ok := cgroupPaths["cpuset"]
	if ok {
		if utils.FileExists(cpusetRoot) {
			spec.HasCpu = true
			mask := readString(cpusetRoot, "cpuset.cpus")
			spec.Cpu.Mask = utils.FixCpuMask(mask, mi.NumCores)
		}
	}

	// Memory
	if name == "/" {
		// Get memory and swap limits of the running machine
		memLimit, err := machine.GetMachineMemoryCapacity()
		if err != nil {
			glog.Warningf("failed to obtain memory limit for machine container")
			spec.HasMemory = false
		} else {
			spec.Memory.Limit = uint64(memLimit)
			// Spec is marked to have memory only if the memory limit is set
			spec.HasMemory = true
		}

		swapLimit, err := machine.GetMachineSwapCapacity()
		if err != nil {
			glog.Warningf("failed to obtain swap limit for machine container")
		} else {
			spec.Memory.SwapLimit = uint64(swapLimit)
		}
	} else {
		memoryRoot, ok := cgroupPaths["memory"]
		if ok {
			if utils.FileExists(memoryRoot) {
				spec.HasMemory = true
				spec.Memory.Limit = readUInt64(memoryRoot, "memory.limit_in_bytes")
				spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.memsw.limit_in_bytes")
			}
		}
	}

	spec.HasFilesystem = name == "/" || externalMounts != nil || handler.HasFilesystem()

	spec.HasNetwork = handler.HasNetwork()

	if blkioRoot, ok := cgroupPaths["blkio"]; ok && utils.FileExists(blkioRoot) {
		spec.HasDiskIo = true
	}

	// Check physical network devices for root container.
	nd, err := handler.GetRootNetworkDevices()
	if err != nil {
		return spec, err
	}

	spec.HasNetwork = spec.HasNetwork || len(nd) != 0

	return spec, nil
}
Beispiel #29
0
func GetSpec(cgroupPaths map[string]string, machineInfoFactory info.MachineInfoFactory, hasNetwork, hasFilesystem bool) (info.ContainerSpec, error) {
	var spec info.ContainerSpec

	// Assume unified hierarchy containers.
	// Get the lowest creation time from all hierarchies as the container creation time.
	now := time.Now()
	lowestTime := now
	for _, cgroupPath := range cgroupPaths {
		// The modified time of the cgroup directory changes whenever a subcontainer is created.
		// eg. /docker will have creation time matching the creation of latest docker container.
		// Use clone_children as a workaround as it isn't usually modified. It is only likely changed
		// immediately after creating a container.
		cgroupPath = path.Join(cgroupPath, "cgroup.clone_children")
		fi, err := os.Stat(cgroupPath)
		if err == nil && fi.ModTime().Before(lowestTime) {
			lowestTime = fi.ModTime()
		}
	}
	if lowestTime != now {
		spec.CreationTime = lowestTime
	}

	// Get machine info.
	mi, err := machineInfoFactory.GetMachineInfo()
	if err != nil {
		return spec, err
	}

	// CPU.
	cpuRoot, ok := cgroupPaths["cpu"]
	if ok {
		if utils.FileExists(cpuRoot) {
			spec.HasCpu = true
			spec.Cpu.Limit = readUInt64(cpuRoot, "cpu.shares")
			spec.Cpu.Period = readUInt64(cpuRoot, "cpu.cfs_period_us")
			quota := readString(cpuRoot, "cpu.cfs_quota_us")

			if quota != "" && quota != "-1" {
				val, err := strconv.ParseUint(quota, 10, 64)
				if err != nil {
					glog.Errorf("GetSpec: Failed to parse CPUQuota from %q: %s", path.Join(cpuRoot, "cpu.cfs_quota_us"), err)
				}
				spec.Cpu.Quota = val
			}
		}
	}

	// Cpu Mask.
	// This will fail for non-unified hierarchies. We'll return the whole machine mask in that case.
	cpusetRoot, ok := cgroupPaths["cpuset"]
	if ok {
		if utils.FileExists(cpusetRoot) {
			spec.HasCpu = true
			mask := readString(cpusetRoot, "cpuset.cpus")
			spec.Cpu.Mask = utils.FixCpuMask(mask, mi.NumCores)
		}
	}

	// Memory
	memoryRoot, ok := cgroupPaths["memory"]
	if ok {
		if utils.FileExists(memoryRoot) {
			spec.HasMemory = true
			spec.Memory.Limit = readUInt64(memoryRoot, "memory.limit_in_bytes")
			spec.Memory.SwapLimit = readUInt64(memoryRoot, "memory.memsw.limit_in_bytes")
		}
	}

	spec.HasNetwork = hasNetwork
	spec.HasFilesystem = hasFilesystem

	if blkioRoot, ok := cgroupPaths["blkio"]; ok && utils.FileExists(blkioRoot) {
		spec.HasDiskIo = true
	}

	return spec, nil
}
// Gets whether the specified container exists.
func Exists(dockerRoot, dockerRun, containerID string) bool {
	// New or old config must exist for the container to be considered alive.
	return utils.FileExists(configPath(dockerRun, containerID)) || utils.FileExists(oldConfigPath(dockerRoot, containerID))
}