// New takes a memory storage and returns a new manager. func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool) (Manager, error) { if memoryCache == nil { return nil, fmt.Errorf("manager requires memory storage") } // Detect the container we are running on. selfContainer, err := cgroups.GetThisCgroupDir("cpu") if err != nil { return nil, err } glog.Infof("cAdvisor running in container: %q", selfContainer) dockerInfo, err := docker.DockerInfo() if err != nil { glog.Warningf("Unable to connect to Docker: %v", err) } context := fs.Context{DockerRoot: docker.RootDir(), DockerInfo: dockerInfo} fsInfo, err := fs.NewFsInfo(context) if err != nil { return nil, err } // If cAdvisor was started with host's rootfs mounted, assume that its running // in its own namespaces. inHostNamespace := false if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) { inHostNamespace = true } newManager := &manager{ containers: make(map[namespacedContainerName]*containerData), quitChannels: make([]chan error, 0, 2), memoryCache: memoryCache, fsInfo: fsInfo, cadvisorContainer: selfContainer, inHostNamespace: inHostNamespace, startupTime: time.Now(), maxHousekeepingInterval: maxHousekeepingInterval, allowDynamicHousekeeping: allowDynamicHousekeeping, ignoreMetrics: ignoreMetrics.MetricSet, } machineInfo, err := getMachineInfo(sysfs, fsInfo, inHostNamespace) if err != nil { return nil, err } newManager.machineInfo = *machineInfo glog.Infof("Machine: %+v", newManager.machineInfo) versionInfo, err := getVersionInfo() if err != nil { return nil, err } glog.Infof("Version: %+v", *versionInfo) newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy()) return newManager, nil }
func (raw *data) parent(subsystem, mountpoint, src string) (string, error) { initPath, err := cgroups.GetThisCgroupDir(subsystem) if err != nil { return "", err } relDir, err := filepath.Rel(src, initPath) if err != nil { return "", err } return filepath.Join(mountpoint, relDir), nil }
func findCgroupRootAndDir(subsystem string) (string, string, error) { cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) if err != nil { return "", "", err } cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) if err != nil { return "", "", err } return cgroupRoot, cgroupDir, nil }
func createCgroupConfig(spec *LinuxSpec, devices []*configs.Device) (*configs.Cgroup, error) { myCgroupPath, err := cgroups.GetThisCgroupDir("devices") if err != nil { return nil, err } c := &configs.Cgroup{ Name: getDefaultID(), Parent: myCgroupPath, AllowedDevices: append(devices, allowedDevices...), CpuQuota: getCPUQuota(spec.Cpus), Memory: spec.Memory * 1024 * 1024, MemorySwap: -1, MemorySwappiness: -1, } if r := spec.Resources; r != nil { c.MemoryReservation = r.MemoryReservation c.MemorySwap = r.MemorySwap c.KernelMemory = r.KernelMemory c.CpuShares = r.CpuShares c.CpuQuota = r.CpuQuota c.CpuPeriod = r.CpuPeriod c.CpuRtRuntime = r.CpuRtRuntime c.CpuRtPeriod = r.CpuRtPeriod c.CpusetCpus = r.CpusetCpus c.CpusetMems = r.CpusetMems c.BlkioThrottleReadBpsDevice = r.BlkioThrottleReadBpsDevice c.BlkioThrottleWriteBpsDevice = r.BlkioThrottleWriteBpsDevice c.BlkioThrottleReadIOpsDevice = r.BlkioThrottleReadIOpsDevice c.BlkioThrottleWriteIOpsDevice = r.BlkioThrottleWriteIOpsDevice c.BlkioWeight = r.BlkioWeight c.BlkioWeightDevice = r.BlkioWeightDevice for _, l := range r.HugetlbLimit { c.HugetlbLimit = append(c.HugetlbLimit, &configs.HugepageLimit{ Pagesize: l.Pagesize, Limit: l.Limit, }) } c.OomKillDisable = r.DisableOOMKiller for _, m := range r.NetPrioIfpriomap { c.NetPrioIfpriomap = append(c.NetPrioIfpriomap, &configs.IfPrioMap{ Interface: m.Interface, Priority: m.Priority, }) } c.NetClsClassid = r.NetClsClassid } return c, nil }
func (raw *cgroupData) parentPath(subsystem, mountpoint, root string) (string, error) { // Use GetThisCgroupDir instead of GetInitCgroupDir, because the creating // process could in container and shared pid namespace with host, and // /proc/1/cgroup could point to whole other world of cgroups. initPath, err := cgroups.GetThisCgroupDir(subsystem) if err != nil { return "", err } // This is needed for nested containers, because in /proc/self/cgroup we // see pathes from host, which don't exist in container. relDir, err := filepath.Rel(root, initPath) if err != nil { return "", err } return filepath.Join(mountpoint, relDir), nil }
func createCgroupConfig(name string, spec *specs.LinuxRuntimeSpec, devices []*configs.Device) (*configs.Cgroup, error) { myCgroupPath, err := cgroups.GetThisCgroupDir("devices") if err != nil { return nil, err } c := &configs.Cgroup{ Name: name, Parent: myCgroupPath, AllowedDevices: append(devices, allowedDevices...), } r := spec.Linux.Resources c.Memory = r.Memory.Limit c.MemoryReservation = r.Memory.Reservation c.MemorySwap = r.Memory.Swap c.KernelMemory = r.Memory.Kernel c.MemorySwappiness = r.Memory.Swappiness c.CpuShares = r.CPU.Shares c.CpuQuota = r.CPU.Quota c.CpuPeriod = r.CPU.Period c.CpuRtRuntime = r.CPU.RealtimeRuntime c.CpuRtPeriod = r.CPU.RealtimePeriod c.CpusetCpus = r.CPU.Cpus c.CpusetMems = r.CPU.Mems c.BlkioThrottleReadBpsDevice = r.BlockIO.ThrottleReadBpsDevice c.BlkioThrottleWriteBpsDevice = r.BlockIO.ThrottleWriteBpsDevice c.BlkioThrottleReadIOpsDevice = r.BlockIO.ThrottleReadIOpsDevice c.BlkioThrottleWriteIOpsDevice = r.BlockIO.ThrottleWriteIOpsDevice c.BlkioWeight = r.BlockIO.Weight c.BlkioWeightDevice = r.BlockIO.WeightDevice for _, l := range r.HugepageLimits { c.HugetlbLimit = append(c.HugetlbLimit, &configs.HugepageLimit{ Pagesize: l.Pagesize, Limit: l.Limit, }) } c.OomKillDisable = r.DisableOOMKiller c.NetClsClassid = r.Network.ClassID for _, m := range r.Network.Priorities { c.NetPrioIfpriomap = append(c.NetPrioIfpriomap, &configs.IfPrioMap{ Interface: m.Name, Priority: m.Priority, }) } return c, nil }
func createCgroupConfig(name string, useSystemdCgroup bool, spec *specs.Spec) (*configs.Cgroup, error) { var ( err error myCgroupPath string ) c := &configs.Cgroup{ Resources: &configs.Resources{}, } if spec.Linux.CgroupsPath != nil { myCgroupPath = libcontainerUtils.CleanPath(*spec.Linux.CgroupsPath) if useSystemdCgroup { myCgroupPath = *spec.Linux.CgroupsPath } } if useSystemdCgroup { if myCgroupPath == "" { c.Parent = "system.slice" c.ScopePrefix = "runc" c.Name = name } else { // Parse the path from expected "slice:prefix:name" // for e.g. "system.slice:docker:1234" parts := strings.Split(myCgroupPath, ":") if len(parts) != 3 { return nil, fmt.Errorf("expected cgroupsPath to be of format \"slice:prefix:name\" for systemd cgroups") } c.Parent = parts[0] c.ScopePrefix = parts[1] c.Name = parts[2] } } else { if myCgroupPath == "" { myCgroupPath, err = cgroups.GetThisCgroupDir("devices") if err != nil { return nil, err } myCgroupPath = filepath.Join(myCgroupPath, name) } c.Path = myCgroupPath } c.Resources.AllowedDevices = allowedDevices r := spec.Linux.Resources if r == nil { return c, nil } for i, d := range spec.Linux.Resources.Devices { var ( t = "a" major = int64(-1) minor = int64(-1) ) if d.Type != nil { t = *d.Type } if d.Major != nil { major = *d.Major } if d.Minor != nil { minor = *d.Minor } if d.Access == nil || *d.Access == "" { return nil, fmt.Errorf("device access at %d field cannot be empty", i) } dt, err := stringToDeviceRune(t) if err != nil { return nil, err } dd := &configs.Device{ Type: dt, Major: major, Minor: minor, Permissions: *d.Access, Allow: d.Allow, } c.Resources.Devices = append(c.Resources.Devices, dd) } // append the default allowed devices to the end of the list c.Resources.Devices = append(c.Resources.Devices, allowedDevices...) if r.Memory != nil { if r.Memory.Limit != nil { c.Resources.Memory = int64(*r.Memory.Limit) } if r.Memory.Reservation != nil { c.Resources.MemoryReservation = int64(*r.Memory.Reservation) } if r.Memory.Swap != nil { c.Resources.MemorySwap = int64(*r.Memory.Swap) } if r.Memory.Kernel != nil { c.Resources.KernelMemory = int64(*r.Memory.Kernel) } if r.Memory.KernelTCP != nil { c.Resources.KernelMemoryTCP = int64(*r.Memory.KernelTCP) } if r.Memory.Swappiness != nil { swappiness := int64(*r.Memory.Swappiness) c.Resources.MemorySwappiness = &swappiness } } if r.CPU != nil { if r.CPU.Shares != nil { c.Resources.CpuShares = int64(*r.CPU.Shares) } if r.CPU.Quota != nil { c.Resources.CpuQuota = int64(*r.CPU.Quota) } if r.CPU.Period != nil { c.Resources.CpuPeriod = int64(*r.CPU.Period) } if r.CPU.RealtimeRuntime != nil { c.Resources.CpuRtRuntime = int64(*r.CPU.RealtimeRuntime) } if r.CPU.RealtimePeriod != nil { c.Resources.CpuRtPeriod = int64(*r.CPU.RealtimePeriod) } if r.CPU.Cpus != nil { c.Resources.CpusetCpus = *r.CPU.Cpus } if r.CPU.Mems != nil { c.Resources.CpusetMems = *r.CPU.Mems } } if r.Pids != nil { c.Resources.PidsLimit = *r.Pids.Limit } if r.BlockIO != nil { if r.BlockIO.Weight != nil { c.Resources.BlkioWeight = *r.BlockIO.Weight } if r.BlockIO.LeafWeight != nil { c.Resources.BlkioLeafWeight = *r.BlockIO.LeafWeight } if r.BlockIO.WeightDevice != nil { for _, wd := range r.BlockIO.WeightDevice { var weight, leafWeight uint16 if wd.Weight != nil { weight = *wd.Weight } if wd.LeafWeight != nil { leafWeight = *wd.LeafWeight } weightDevice := configs.NewWeightDevice(wd.Major, wd.Minor, weight, leafWeight) c.Resources.BlkioWeightDevice = append(c.Resources.BlkioWeightDevice, weightDevice) } } if r.BlockIO.ThrottleReadBpsDevice != nil { for _, td := range r.BlockIO.ThrottleReadBpsDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleReadBpsDevice = append(c.Resources.BlkioThrottleReadBpsDevice, throttleDevice) } } if r.BlockIO.ThrottleWriteBpsDevice != nil { for _, td := range r.BlockIO.ThrottleWriteBpsDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleWriteBpsDevice = append(c.Resources.BlkioThrottleWriteBpsDevice, throttleDevice) } } if r.BlockIO.ThrottleReadIOPSDevice != nil { for _, td := range r.BlockIO.ThrottleReadIOPSDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleReadIOPSDevice = append(c.Resources.BlkioThrottleReadIOPSDevice, throttleDevice) } } if r.BlockIO.ThrottleWriteIOPSDevice != nil { for _, td := range r.BlockIO.ThrottleWriteIOPSDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleWriteIOPSDevice = append(c.Resources.BlkioThrottleWriteIOPSDevice, throttleDevice) } } } for _, l := range r.HugepageLimits { c.Resources.HugetlbLimit = append(c.Resources.HugetlbLimit, &configs.HugepageLimit{ Pagesize: *l.Pagesize, Limit: *l.Limit, }) } if r.DisableOOMKiller != nil { c.Resources.OomKillDisable = *r.DisableOOMKiller } if r.Network != nil { if r.Network.ClassID != nil { c.Resources.NetClsClassid = string(*r.Network.ClassID) } for _, m := range r.Network.Priorities { c.Resources.NetPrioIfpriomap = append(c.Resources.NetPrioIfpriomap, &configs.IfPrioMap{ Interface: m.Name, Priority: int64(m.Priority), }) } } return c, nil }
func createCgroupConfig(name string, spec *specs.LinuxRuntimeSpec, devices []*configs.Device) (*configs.Cgroup, error) { myCgroupPath, err := cgroups.GetThisCgroupDir("devices") if err != nil { return nil, err } c := &configs.Cgroup{ Name: name, Parent: myCgroupPath, Resources: &configs.Resources{}, } c.Resources.AllowedDevices = append(devices, allowedDevices...) r := spec.Linux.Resources if r != nil { if r.Memory != nil { if r.Memory.Limit != nil { c.Resources.Memory = int64(*r.Memory.Limit) } if r.Memory.Reservation != nil { c.Resources.MemoryReservation = int64(*r.Memory.Reservation) } if r.Memory.Swap != nil { c.Resources.MemorySwap = int64(*r.Memory.Swap) } if r.Memory.Kernel != nil { c.Resources.KernelMemory = int64(*r.Memory.Kernel) } if r.Memory.Swappiness != nil { c.Resources.MemorySwappiness = int64(*r.Memory.Swappiness) } } if r.CPU != nil { if r.CPU.Shares != nil { c.Resources.CpuShares = int64(*r.CPU.Shares) } if r.CPU.Quota != nil { c.Resources.CpuQuota = int64(*r.CPU.Quota) } if r.CPU.Period != nil { c.Resources.CpuPeriod = int64(*r.CPU.Period) } if r.CPU.RealtimeRuntime != nil { c.Resources.CpuRtRuntime = int64(*r.CPU.RealtimeRuntime) } if r.CPU.RealtimePeriod != nil { c.Resources.CpuRtPeriod = int64(*r.CPU.RealtimePeriod) } if r.CPU.Cpus != nil { c.Resources.CpusetCpus = *r.CPU.Cpus } if r.CPU.Mems != nil { c.Resources.CpusetMems = *r.CPU.Mems } } if r.Pids != nil { c.Resources.PidsLimit = *r.Pids.Limit } if r.BlockIO != nil { if r.BlockIO.Weight != nil { c.Resources.BlkioWeight = *r.BlockIO.Weight } if r.BlockIO.LeafWeight != nil { c.Resources.BlkioLeafWeight = *r.BlockIO.LeafWeight } if r.BlockIO.WeightDevice != nil { for _, wd := range r.BlockIO.WeightDevice { weightDevice := configs.NewWeightDevice(wd.Major, wd.Minor, *wd.Weight, *wd.LeafWeight) c.Resources.BlkioWeightDevice = append(c.Resources.BlkioWeightDevice, weightDevice) } } if r.BlockIO.ThrottleReadBpsDevice != nil { for _, td := range r.BlockIO.ThrottleReadBpsDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleReadBpsDevice = append(c.Resources.BlkioThrottleReadBpsDevice, throttleDevice) } } if r.BlockIO.ThrottleWriteBpsDevice != nil { for _, td := range r.BlockIO.ThrottleWriteBpsDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleWriteBpsDevice = append(c.Resources.BlkioThrottleWriteBpsDevice, throttleDevice) } } if r.BlockIO.ThrottleReadIOPSDevice != nil { for _, td := range r.BlockIO.ThrottleReadIOPSDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleReadIOPSDevice = append(c.Resources.BlkioThrottleReadIOPSDevice, throttleDevice) } } if r.BlockIO.ThrottleWriteIOPSDevice != nil { for _, td := range r.BlockIO.ThrottleWriteIOPSDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleWriteIOPSDevice = append(c.Resources.BlkioThrottleWriteIOPSDevice, throttleDevice) } } } for _, l := range r.HugepageLimits { c.Resources.HugetlbLimit = append(c.Resources.HugetlbLimit, &configs.HugepageLimit{ Pagesize: *l.Pagesize, Limit: *l.Limit, }) } if r.DisableOOMKiller != nil { c.Resources.OomKillDisable = *r.DisableOOMKiller } if r.Network != nil { if r.Network.ClassID != nil { c.Resources.NetClsClassid = string(*r.Network.ClassID) } for _, m := range r.Network.Priorities { c.Resources.NetPrioIfpriomap = append(c.Resources.NetPrioIfpriomap, &configs.IfPrioMap{ Interface: m.Name, Priority: int64(m.Priority), }) } } } return c, nil }
func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { s := oci.DefaultSpec() if err := daemon.populateCommonSpec(&s, c); err != nil { return nil, err } var cgroupsPath string scopePrefix := "docker" parent := "/docker" useSystemd := UsingSystemd(daemon.configStore) if useSystemd { parent = "system.slice" } if c.HostConfig.CgroupParent != "" { parent = c.HostConfig.CgroupParent } else if daemon.configStore.CgroupParent != "" { parent = daemon.configStore.CgroupParent } if useSystemd { cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) } else { cgroupsPath = filepath.Join(parent, c.ID) } s.Linux.CgroupsPath = &cgroupsPath if err := setResources(&s, c.HostConfig.Resources); err != nil { return nil, fmt.Errorf("linux runtime spec resources: %v", err) } s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj s.Linux.Sysctl = c.HostConfig.Sysctls p := *s.Linux.CgroupsPath if useSystemd { initPath, err := cgroups.GetInitCgroupDir("cpu") if err != nil { return nil, err } p, _ = cgroups.GetThisCgroupDir("cpu") if err != nil { return nil, err } p = filepath.Join(initPath, p) } // Clean path to guard against things like ../../../BAD parentPath := filepath.Dir(p) if !filepath.IsAbs(parentPath) { parentPath = filepath.Clean("/" + parentPath) } if err := daemon.initCgroupsPath(parentPath); err != nil { return nil, fmt.Errorf("linux init cgroups path: %v", err) } if err := setDevices(&s, c); err != nil { return nil, fmt.Errorf("linux runtime spec devices: %v", err) } if err := setRlimits(daemon, &s, c); err != nil { return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) } if err := setUser(&s, c); err != nil { return nil, fmt.Errorf("linux spec user: %v", err) } if err := setNamespaces(daemon, &s, c); err != nil { return nil, fmt.Errorf("linux spec namespaces: %v", err) } if err := setCapabilities(&s, c); err != nil { return nil, fmt.Errorf("linux spec capabilities: %v", err) } if err := setSeccomp(daemon, &s, c); err != nil { return nil, fmt.Errorf("linux seccomp: %v", err) } if err := daemon.setupIpcDirs(c); err != nil { return nil, err } if err := daemon.setupSecretDir(c); err != nil { return nil, err } ms, err := daemon.setupMounts(c) if err != nil { return nil, err } ms = append(ms, c.IpcMounts()...) tmpfsMounts, err := c.TmpfsMounts() if err != nil { return nil, err } ms = append(ms, tmpfsMounts...) if m := c.SecretMount(); m != nil { ms = append(ms, *m) } sort.Sort(mounts(ms)) if err := setMounts(daemon, &s, c, ms); err != nil { return nil, fmt.Errorf("linux mounts: %v", err) } for _, ns := range s.Linux.Namespaces { if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) if err != nil { return nil, err } s.Hooks = specs.Hooks{ Prestart: []specs.Hook{{ Path: target, // FIXME: cross-platform Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, }}, } } } if apparmor.IsEnabled() { var appArmorProfile string if c.AppArmorProfile != "" { appArmorProfile = c.AppArmorProfile } else if c.HostConfig.Privileged { appArmorProfile = "unconfined" } else { appArmorProfile = "docker-default" } if appArmorProfile == "docker-default" { // Unattended upgrades and other fun services can unload AppArmor // profiles inadvertently. Since we cannot store our profile in // /etc/apparmor.d, nor can we practically add other ways of // telling the system to keep our profile loaded, in order to make // sure that we keep the default profile enabled we dynamically // reload it if necessary. if err := ensureDefaultAppArmorProfile(); err != nil { return nil, err } } s.Process.ApparmorProfile = appArmorProfile } s.Process.SelinuxLabel = c.GetProcessLabel() s.Process.NoNewPrivileges = c.NoNewPrivileges s.Linux.MountLabel = c.MountLabel return (*specs.Spec)(&s), nil }
func createCgroupConfig(name string, spec *specs.LinuxSpec) (*configs.Cgroup, error) { var ( err error myCgroupPath string ) if spec.Linux.CgroupsPath != nil { myCgroupPath = libcontainerUtils.CleanPath(*spec.Linux.CgroupsPath) } else { myCgroupPath, err = cgroups.GetThisCgroupDir("devices") if err != nil { return nil, err } } c := &configs.Cgroup{ Path: filepath.Join(myCgroupPath, name), Resources: &configs.Resources{}, } c.Resources.AllowedDevices = allowedDevices r := spec.Linux.Resources if r == nil { return c, nil } for i, d := range spec.Linux.Resources.Devices { var ( t = 'a' major = int64(-1) minor = int64(-1) ) if d.Type != nil { t = *d.Type } if d.Major != nil { major = *d.Major } if d.Minor != nil { minor = *d.Minor } if d.Access == nil || *d.Access == "" { return nil, fmt.Errorf("device access at %d field canot be empty", i) } dd := &configs.Device{ Type: t, Major: major, Minor: minor, Permissions: *d.Access, Allow: d.Allow, } c.Resources.Devices = append(c.Resources.Devices, dd) } // append the default allowed devices to the end of the list c.Resources.Devices = append(c.Resources.Devices, allowedDevices...) if r.Memory != nil { if r.Memory.Limit != nil { c.Resources.Memory = int64(*r.Memory.Limit) } if r.Memory.Reservation != nil { c.Resources.MemoryReservation = int64(*r.Memory.Reservation) } if r.Memory.Swap != nil { c.Resources.MemorySwap = int64(*r.Memory.Swap) } if r.Memory.Kernel != nil { c.Resources.KernelMemory = int64(*r.Memory.Kernel) } if r.Memory.Swappiness != nil { c.Resources.MemorySwappiness = int64(*r.Memory.Swappiness) } } if r.CPU != nil { if r.CPU.Shares != nil { c.Resources.CpuShares = int64(*r.CPU.Shares) } if r.CPU.Quota != nil { c.Resources.CpuQuota = int64(*r.CPU.Quota) } if r.CPU.Period != nil { c.Resources.CpuPeriod = int64(*r.CPU.Period) } if r.CPU.RealtimeRuntime != nil { c.Resources.CpuRtRuntime = int64(*r.CPU.RealtimeRuntime) } if r.CPU.RealtimePeriod != nil { c.Resources.CpuRtPeriod = int64(*r.CPU.RealtimePeriod) } if r.CPU.Cpus != nil { c.Resources.CpusetCpus = *r.CPU.Cpus } if r.CPU.Mems != nil { c.Resources.CpusetMems = *r.CPU.Mems } } if r.Pids != nil { c.Resources.PidsLimit = *r.Pids.Limit } if r.BlockIO != nil { if r.BlockIO.Weight != nil { c.Resources.BlkioWeight = *r.BlockIO.Weight } if r.BlockIO.LeafWeight != nil { c.Resources.BlkioLeafWeight = *r.BlockIO.LeafWeight } if r.BlockIO.WeightDevice != nil { for _, wd := range r.BlockIO.WeightDevice { weightDevice := configs.NewWeightDevice(wd.Major, wd.Minor, *wd.Weight, *wd.LeafWeight) c.Resources.BlkioWeightDevice = append(c.Resources.BlkioWeightDevice, weightDevice) } } if r.BlockIO.ThrottleReadBpsDevice != nil { for _, td := range r.BlockIO.ThrottleReadBpsDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleReadBpsDevice = append(c.Resources.BlkioThrottleReadBpsDevice, throttleDevice) } } if r.BlockIO.ThrottleWriteBpsDevice != nil { for _, td := range r.BlockIO.ThrottleWriteBpsDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleWriteBpsDevice = append(c.Resources.BlkioThrottleWriteBpsDevice, throttleDevice) } } if r.BlockIO.ThrottleReadIOPSDevice != nil { for _, td := range r.BlockIO.ThrottleReadIOPSDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleReadIOPSDevice = append(c.Resources.BlkioThrottleReadIOPSDevice, throttleDevice) } } if r.BlockIO.ThrottleWriteIOPSDevice != nil { for _, td := range r.BlockIO.ThrottleWriteIOPSDevice { throttleDevice := configs.NewThrottleDevice(td.Major, td.Minor, *td.Rate) c.Resources.BlkioThrottleWriteIOPSDevice = append(c.Resources.BlkioThrottleWriteIOPSDevice, throttleDevice) } } } for _, l := range r.HugepageLimits { c.Resources.HugetlbLimit = append(c.Resources.HugetlbLimit, &configs.HugepageLimit{ Pagesize: *l.Pagesize, Limit: *l.Limit, }) } if r.DisableOOMKiller != nil { c.Resources.OomKillDisable = *r.DisableOOMKiller } if r.Network != nil { if r.Network.ClassID != nil { c.Resources.NetClsClassid = string(*r.Network.ClassID) } for _, m := range r.Network.Priorities { c.Resources.NetPrioIfpriomap = append(c.Resources.NetPrioIfpriomap, &configs.IfPrioMap{ Interface: m.Name, Priority: int64(m.Priority), }) } } return c, nil }
func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { s := oci.DefaultSpec() if err := daemon.populateCommonSpec(&s, c); err != nil { return nil, err } var cgroupsPath string scopePrefix := "docker" parent := "/docker" useSystemd := UsingSystemd(daemon.configStore) if useSystemd { parent = "system.slice" } if c.HostConfig.CgroupParent != "" { parent = c.HostConfig.CgroupParent } else if daemon.configStore.CgroupParent != "" { parent = daemon.configStore.CgroupParent } if useSystemd { cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) } else { cgroupsPath = filepath.Join(parent, c.ID) } s.Linux.CgroupsPath = &cgroupsPath if err := setResources(&s, c.HostConfig.Resources); err != nil { return nil, fmt.Errorf("linux runtime spec resources: %v", err) } s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj s.Linux.Sysctl = c.HostConfig.Sysctls p := *s.Linux.CgroupsPath if useSystemd { initPath, err := cgroups.GetInitCgroupDir("cpu") if err != nil { return nil, err } p, _ = cgroups.GetThisCgroupDir("cpu") if err != nil { return nil, err } p = filepath.Join(initPath, p) } // Clean path to guard against things like ../../../BAD parentPath := filepath.Dir(p) if !filepath.IsAbs(parentPath) { parentPath = filepath.Clean("/" + parentPath) } if err := daemon.initCgroupsPath(parentPath); err != nil { return nil, fmt.Errorf("linux init cgroups path: %v", err) } if err := setDevices(&s, c); err != nil { return nil, fmt.Errorf("linux runtime spec devices: %v", err) } if err := setRlimits(daemon, &s, c); err != nil { return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) } if err := setUser(&s, c); err != nil { return nil, fmt.Errorf("linux spec user: %v", err) } if err := setNamespaces(daemon, &s, c); err != nil { return nil, fmt.Errorf("linux spec namespaces: %v", err) } if err := setCapabilities(&s, c); err != nil { return nil, fmt.Errorf("linux spec capabilities: %v", err) } if err := setSeccomp(daemon, &s, c); err != nil { return nil, fmt.Errorf("linux seccomp: %v", err) } if err := daemon.setupIpcDirs(c); err != nil { return nil, err } ms, err := daemon.setupMounts(c) if err != nil { return nil, err } ms = append(ms, c.IpcMounts()...) tmpfsMounts, err := c.TmpfsMounts() if err != nil { return nil, err } ms = append(ms, tmpfsMounts...) sort.Sort(mounts(ms)) if err := setMounts(daemon, &s, c, ms); err != nil { return nil, fmt.Errorf("linux mounts: %v", err) } for _, ns := range s.Linux.Namespaces { if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) if err != nil { return nil, err } s.Hooks = specs.Hooks{ Prestart: []specs.Hook{{ Path: target, // FIXME: cross-platform Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, }}, } } } if apparmor.IsEnabled() { appArmorProfile := "docker-default" if len(c.AppArmorProfile) > 0 { appArmorProfile = c.AppArmorProfile } else if c.HostConfig.Privileged { appArmorProfile = "unconfined" } s.Process.ApparmorProfile = appArmorProfile } s.Process.SelinuxLabel = c.GetProcessLabel() s.Process.NoNewPrivileges = c.NoNewPrivileges s.Linux.MountLabel = c.MountLabel return (*specs.Spec)(&s), nil }
// New takes a memory storage and returns a new manager. func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool, ignoreMetricsSet container.MetricSet) (Manager, error) { if memoryCache == nil { return nil, fmt.Errorf("manager requires memory storage") } // Detect the container we are running on. selfContainer, err := cgroups.GetThisCgroupDir("cpu") if err != nil { return nil, err } glog.Infof("cAdvisor running in container: %q", selfContainer) dockerStatus, err := docker.Status() newManager := &manager{ containers: make(map[namespacedContainerName]*containerData), quitChannels: make([]chan error, 0, 2), memoryCache: memoryCache, cadvisorContainer: selfContainer, startupTime: time.Now(), maxHousekeepingInterval: maxHousekeepingInterval, allowDynamicHousekeeping: allowDynamicHousekeeping, ignoreMetrics: ignoreMetricsSet, containerWatchers: []watcher.ContainerWatcher{}, } hyperStatus, err := newManager.HyperInfo() if err != nil { glog.Warningf("Unable to connect to Docker: %v", err) } rktPath, err := rkt.RktPath() if err != nil { glog.Warningf("unable to connect to Rkt api service: %v", err) } context := fs.Context{ Docker: fs.DockerContext{ Root: docker.RootDir(), Driver: dockerStatus.Driver, DriverStatus: dockerStatus.DriverStatus, }, Hyper: fs.HyperContext{ Root: hyper.RootDir(hyperStatus), Driver: hyperStatus.Driver, DriverStatus: hyperStatus.DriverStatus, }, RktPath: rktPath, } fsInfo, err := fs.NewFsInfo(context) if err != nil { return nil, err } // If cAdvisor was started with host's rootfs mounted, assume that its running // in its own namespaces. inHostNamespace := false if _, err := os.Stat("/rootfs/proc"); os.IsNotExist(err) { inHostNamespace = true } // Register for new subcontainers. eventsChannel := make(chan watcher.ContainerEvent, 16) newManager.fsInfo = fsInfo newManager.eventsChannel = eventsChannel newManager.inHostNamespace = inHostNamespace machineInfo, err := machine.Info(sysfs, fsInfo, inHostNamespace) if err != nil { return nil, err } newManager.machineInfo = *machineInfo glog.Infof("Machine: %+v", newManager.machineInfo) versionInfo, err := getVersionInfo() if err != nil { return nil, err } glog.Infof("Version: %+v", *versionInfo) newManager.eventHandler = events.NewEventManager(parseEventsStoragePolicy()) return newManager, nil }