func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error { // Get Filesystem information only for the root cgroup. if self.name == "/" { filesystems, err := self.fsInfo.GetGlobalFsInfo() if err != nil { return err } for _, fs := range filesystems { stats.Filesystem = append(stats.Filesystem, info.FsStats{ Device: fs.Device, Limit: fs.Capacity, Usage: fs.Capacity - fs.Free, ReadsCompleted: fs.DiskStats.ReadsCompleted, ReadsMerged: fs.DiskStats.ReadsMerged, SectorsRead: fs.DiskStats.SectorsRead, ReadTime: fs.DiskStats.ReadTime, WritesCompleted: fs.DiskStats.WritesCompleted, WritesMerged: fs.DiskStats.WritesMerged, SectorsWritten: fs.DiskStats.SectorsWritten, WriteTime: fs.DiskStats.WriteTime, IoInProgress: fs.DiskStats.IoInProgress, IoTime: fs.DiskStats.IoTime, WeightedIoTime: fs.DiskStats.WeightedIoTime, }) } } else if len(self.externalMounts) > 0 { var mountSet map[string]struct{} mountSet = make(map[string]struct{}) for _, mount := range self.externalMounts { mountSet[mount.HostDir] = struct{}{} } filesystems, err := self.fsInfo.GetFsInfoForPath(mountSet) if err != nil { return err } for _, fs := range filesystems { stats.Filesystem = append(stats.Filesystem, info.FsStats{ Device: fs.Device, Limit: fs.Capacity, Usage: fs.Capacity - fs.Free, ReadsCompleted: fs.DiskStats.ReadsCompleted, ReadsMerged: fs.DiskStats.ReadsMerged, SectorsRead: fs.DiskStats.SectorsRead, ReadTime: fs.DiskStats.ReadTime, WritesCompleted: fs.DiskStats.WritesCompleted, WritesMerged: fs.DiskStats.WritesMerged, SectorsWritten: fs.DiskStats.SectorsWritten, WriteTime: fs.DiskStats.WriteTime, IoInProgress: fs.DiskStats.IoInProgress, IoTime: fs.DiskStats.IoTime, WeightedIoTime: fs.DiskStats.WeightedIoTime, }) } } return nil }
func buildTrace(cpu, mem []uint64, duration time.Duration) []*info.ContainerStats { if len(cpu) != len(mem) { panic("len(cpu) != len(mem)") } ret := make([]*info.ContainerStats, len(cpu)) currentTime := time.Now() var cpuTotalUsage uint64 = 0 for i, cpuUsage := range cpu { cpuTotalUsage += cpuUsage stats := new(info.ContainerStats) stats.Timestamp = currentTime currentTime = currentTime.Add(duration) stats.Cpu.Usage.Total = cpuTotalUsage stats.Cpu.Usage.User = stats.Cpu.Usage.Total stats.Cpu.Usage.System = 0 stats.Cpu.Usage.PerCpu = []uint64{cpuTotalUsage} stats.Memory.Usage = mem[i] stats.Network.RxBytes = uint64(rand.Intn(10000)) stats.Network.RxErrors = uint64(rand.Intn(1000)) stats.Network.TxBytes = uint64(rand.Intn(100000)) stats.Network.TxErrors = uint64(rand.Intn(1000)) stats.Filesystem = make([]info.FsStats, 1) stats.Filesystem[0].Device = "/dev/sda1" stats.Filesystem[0].Limit = 1024000000 stats.Filesystem[0].Usage = 1024000 ret[i] = stats } return ret }
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { // No support for non-aufs storage drivers. if !self.usesAufsDriver { return nil } // As of now we assume that all the storage dirs are on the same device. // The first storage dir will be that of the image layers. deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0]) if err != nil { return err } mi, err := self.machineInfoFactory.GetMachineInfo() if err != nil { return err } var limit uint64 = 0 // Docker does not impose any filesystem limits for containers. So use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == deviceInfo.Device { limit = fs.Capacity break } } fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} var usage uint64 = 0 for _, dir := range self.storageDirs { // TODO(Vishh): Add support for external mounts. dirUsage, err := self.fsInfo.GetDirUsage(dir) if err != nil { return err } usage += dirUsage } fsStat.Usage = usage stats.Filesystem = append(stats.Filesystem, fsStat) return nil }