func buildTrace(cpu, mem []uint64, duration time.Duration) []*info.ContainerStats { if len(cpu) != len(mem) { panic("len(cpu) != len(mem)") } ret := make([]*info.ContainerStats, len(cpu)) currentTime := time.Now() var cpuTotalUsage uint64 = 0 for i, cpuUsage := range cpu { cpuTotalUsage += cpuUsage stats := new(info.ContainerStats) stats.Timestamp = currentTime currentTime = currentTime.Add(duration) stats.Cpu.Usage.Total = cpuTotalUsage stats.Cpu.Usage.User = stats.Cpu.Usage.Total stats.Cpu.Usage.System = 0 stats.Cpu.Usage.PerCpu = []uint64{cpuTotalUsage} stats.Memory.Usage = mem[i] stats.Network.RxBytes = uint64(rand.Intn(10000)) stats.Network.RxErrors = uint64(rand.Intn(1000)) stats.Network.TxBytes = uint64(rand.Intn(100000)) stats.Network.TxErrors = uint64(rand.Intn(1000)) stats.Filesystem = make([]info.FsStats, 1) stats.Filesystem[0].Device = "/dev/sda1" stats.Filesystem[0].Limit = 1024000000 stats.Filesystem[0].Usage = 1024000 ret[i] = stats } return ret }
func GenerateRandomStats(numStats, numCores int, duration time.Duration) []*info.ContainerStats { ret := make([]*info.ContainerStats, numStats) perCoreUsages := make([]uint64, numCores) currentTime := time.Now() for i := range perCoreUsages { perCoreUsages[i] = uint64(rand.Int63n(1000)) } for i := 0; i < numStats; i++ { stats := new(info.ContainerStats) stats.Timestamp = currentTime currentTime = currentTime.Add(duration) percore := make([]uint64, numCores) for i := range perCoreUsages { perCoreUsages[i] += uint64(rand.Int63n(1000)) percore[i] = perCoreUsages[i] stats.Cpu.Usage.Total += percore[i] } stats.Cpu.Usage.PerCpu = percore stats.Cpu.Usage.User = stats.Cpu.Usage.Total stats.Cpu.Usage.System = 0 stats.Memory.Usage = uint64(rand.Int63n(4096)) stats.Memory.Cache = uint64(rand.Int63n(4096)) stats.Memory.RSS = uint64(rand.Int63n(4096)) ret[i] = stats } return ret }
func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error { // Get Filesystem information only for the root cgroup. if isRootCgroup(self.name) { filesystems, err := self.fsInfo.GetGlobalFsInfo() if err != nil { return err } for i := range filesystems { fs := filesystems[i] stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs)) } } else if len(self.externalMounts) > 0 { var mountSet map[string]struct{} mountSet = make(map[string]struct{}) for _, mount := range self.externalMounts { mountSet[mount.HostDir] = struct{}{} } filesystems, err := self.fsInfo.GetFsInfoForPath(mountSet) if err != nil { return err } for i := range filesystems { fs := filesystems[i] stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs)) } } return nil }
func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error { // Get Filesystem information only for the root cgroup. if self.name == "/" { filesystems, err := self.fsInfo.GetGlobalFsInfo() if err != nil { return err } for _, fs := range filesystems { stats.Filesystem = append(stats.Filesystem, info.FsStats{ Device: fs.Device, Limit: fs.Capacity, Usage: fs.Capacity - fs.Free, Available: fs.Available, ReadsCompleted: fs.DiskStats.ReadsCompleted, ReadsMerged: fs.DiskStats.ReadsMerged, SectorsRead: fs.DiskStats.SectorsRead, ReadTime: fs.DiskStats.ReadTime, WritesCompleted: fs.DiskStats.WritesCompleted, WritesMerged: fs.DiskStats.WritesMerged, SectorsWritten: fs.DiskStats.SectorsWritten, WriteTime: fs.DiskStats.WriteTime, IoInProgress: fs.DiskStats.IoInProgress, IoTime: fs.DiskStats.IoTime, WeightedIoTime: fs.DiskStats.WeightedIoTime, }) } } else if len(self.externalMounts) > 0 { var mountSet map[string]struct{} mountSet = make(map[string]struct{}) for _, mount := range self.externalMounts { mountSet[mount.HostDir] = struct{}{} } filesystems, err := self.fsInfo.GetFsInfoForPath(mountSet) if err != nil { return err } for _, fs := range filesystems { stats.Filesystem = append(stats.Filesystem, info.FsStats{ Device: fs.Device, Limit: fs.Capacity, Usage: fs.Capacity - fs.Free, ReadsCompleted: fs.DiskStats.ReadsCompleted, ReadsMerged: fs.DiskStats.ReadsMerged, SectorsRead: fs.DiskStats.SectorsRead, ReadTime: fs.DiskStats.ReadTime, WritesCompleted: fs.DiskStats.WritesCompleted, WritesMerged: fs.DiskStats.WritesMerged, SectorsWritten: fs.DiskStats.SectorsWritten, WriteTime: fs.DiskStats.WriteTime, IoInProgress: fs.DiskStats.IoInProgress, IoTime: fs.DiskStats.IoTime, WeightedIoTime: fs.DiskStats.WeightedIoTime, }) } } return nil }
// Convert libcontainer stats to info.ContainerStats. func toContainerStats(libcontainerStats *libcontainer.Stats) *info.ContainerStats { s := libcontainerStats.CgroupStats ret := new(info.ContainerStats) ret.Timestamp = time.Now() if s != nil { ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode n := len(s.CpuStats.CpuUsage.PercpuUsage) ret.Cpu.Usage.PerCpu = make([]uint64, n) ret.Cpu.Usage.Total = 0 for i := 0; i < n; i++ { ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i] ret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i] } ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive) ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive) ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive) ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive) ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive) ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive) ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive) ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive) ret.Memory.Usage = s.MemoryStats.Usage if v, ok := s.MemoryStats.Stats["pgfault"]; ok { ret.Memory.ContainerData.Pgfault = v ret.Memory.HierarchicalData.Pgfault = v } if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok { ret.Memory.ContainerData.Pgmajfault = v ret.Memory.HierarchicalData.Pgmajfault = v } if v, ok := s.MemoryStats.Stats["total_inactive_anon"]; ok { ret.Memory.WorkingSet = ret.Memory.Usage - v if v, ok := s.MemoryStats.Stats["total_active_file"]; ok { ret.Memory.WorkingSet -= v } } } if len(libcontainerStats.Interfaces) > 0 { // TODO(vmarmol): Handle multiple interfaces. ret.Network.RxBytes = libcontainerStats.Interfaces[0].RxBytes ret.Network.RxPackets = libcontainerStats.Interfaces[0].RxPackets ret.Network.RxErrors = libcontainerStats.Interfaces[0].RxErrors ret.Network.RxDropped = libcontainerStats.Interfaces[0].RxDropped ret.Network.TxBytes = libcontainerStats.Interfaces[0].TxBytes ret.Network.TxPackets = libcontainerStats.Interfaces[0].TxPackets ret.Network.TxErrors = libcontainerStats.Interfaces[0].TxErrors ret.Network.TxDropped = libcontainerStats.Interfaces[0].TxDropped } return ret }
// Convert libcontainer stats to info.ContainerStats. func toContainerStats(libcontainerStats *libcontainer.ContainerStats) *info.ContainerStats { s := libcontainerStats.CgroupStats ret := new(info.ContainerStats) ret.Timestamp = time.Now() if s != nil { ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode n := len(s.CpuStats.CpuUsage.PercpuUsage) ret.Cpu.Usage.PerCpu = make([]uint64, n) ret.Cpu.Usage.Total = 0 for i := 0; i < n; i++ { ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i] ret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i] } ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive) ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive) ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive) ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive) ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive) ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive) ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive) ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive) ret.Memory.Usage = s.MemoryStats.Usage if v, ok := s.MemoryStats.Stats["pgfault"]; ok { ret.Memory.ContainerData.Pgfault = v ret.Memory.HierarchicalData.Pgfault = v } if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok { ret.Memory.ContainerData.Pgmajfault = v ret.Memory.HierarchicalData.Pgmajfault = v } if v, ok := s.MemoryStats.Stats["total_inactive_anon"]; ok { ret.Memory.WorkingSet = ret.Memory.Usage - v if v, ok := s.MemoryStats.Stats["total_active_file"]; ok { ret.Memory.WorkingSet -= v } } } // TODO(vishh): Perform a deep copy or alias libcontainer network stats. if libcontainerStats.NetworkStats != nil { ret.Network = *(*info.NetworkStats)(libcontainerStats.NetworkStats) } return ret }
func toContainerStats(libcontainerStats *libcontainer.Stats) *info.ContainerStats { s := libcontainerStats.CgroupStats ret := new(info.ContainerStats) ret.Timestamp = time.Now() if s != nil { toContainerStats0(s, ret) toContainerStats1(s, ret) toContainerStats2(s, ret) } if len(libcontainerStats.Interfaces) > 0 { toContainerStats3(libcontainerStats, ret) } return ret }
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { switch self.storageDriver { case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver: default: return nil } // As of now we assume that all the storage dirs are on the same device. // The first storage dir will be that of the image layers. deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0]) if err != nil { return err } mi, err := self.machineInfoFactory.GetMachineInfo() if err != nil { return err } var limit uint64 = 0 // Docker does not impose any filesystem limits for containers. So use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == deviceInfo.Device { limit = fs.Capacity break } } fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} fsStat.Usage = self.fsHandler.usage() stats.Filesystem = append(stats.Filesystem, fsStat) return nil }
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { switch self.storageDriver { case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver: default: return nil } deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir) if err != nil { return err } mi, err := self.machineInfoFactory.GetMachineInfo() if err != nil { return err } var limit uint64 = 0 // Docker does not impose any filesystem limits for containers. So use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == deviceInfo.Device { limit = fs.Capacity break } } fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} fsStat.BaseUsage, fsStat.Usage = self.fsHandler.usage() stats.Filesystem = append(stats.Filesystem, fsStat) return nil }
func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error { if handler.ignoreMetrics.Has(container.DiskUsageMetrics) { return nil } deviceInfo, err := handler.fsInfo.GetDirFsDevice(handler.rootfsStorageDir) if err != nil { return err } mi, err := handler.machineInfoFactory.GetMachineInfo() if err != nil { return err } var limit uint64 = 0 // Use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == deviceInfo.Device { limit = fs.Capacity break } } fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} fsStat.BaseUsage, fsStat.Usage = handler.fsHandler.Usage() stats.Filesystem = append(stats.Filesystem, fsStat) return nil }
// cmeFactory generates a complete ContainerMetricElement with fuzzed data. // CMEs created by cmeFactory contain partially fuzzed stats, aside from hardcoded values for Memory usage. // The timestamp of the CME is rouded to the current minute and offset by a random number of hours. func cmeFactory() *cache.ContainerMetricElement { f := fuzz.New().NilChance(0).NumElements(1, 1) containerSpec := cadvisor.ContainerSpec{ CreationTime: time.Now(), HasCpu: true, HasMemory: true, HasNetwork: true, HasFilesystem: true, HasDiskIo: true, } containerSpec.Cpu.Limit = 1024 containerSpec.Memory.Limit = 10000000 // Create a fuzzed ContainerStats struct var containerStats cadvisor.ContainerStats f.Fuzz(&containerStats) // Standardize timestamp to the current minute plus a random number of hours ([1, 10]) now_time := time.Now().Round(time.Minute) new_time := now_time for new_time == now_time { new_time = now_time.Add(time.Duration(rand.Intn(10)) * 5 * time.Minute) } containerStats.Timestamp = new_time containerSpec.CreationTime = new_time.Add(-time.Hour) // Standardize memory usage and limit to test aggregation containerStats.Memory.Usage = uint64(5000) containerStats.Memory.WorkingSet = uint64(602) // Standardize the device name, usage and limit new_fs := cadvisor.FsStats{} f.Fuzz(&new_fs) new_fs.Device = "/dev/device1" new_fs.Usage = 50000 new_fs.Limit = 100000 containerStats.Filesystem = []cadvisor.FsStats{new_fs} return &cache.ContainerMetricElement{ Spec: &containerSpec, Stats: &containerStats, } }
// emptyCMEFactory generates an empty ContainerMetricElement. func emptyCMEFactory() *cache.ContainerMetricElement { f := fuzz.New().NilChance(0).NumElements(1, 1) containerSpec := cadvisor.ContainerSpec{ CreationTime: time.Now(), HasCpu: false, HasMemory: false, HasNetwork: false, HasFilesystem: false, HasDiskIo: false, } var containerStats cadvisor.ContainerStats f.Fuzz(&containerStats) containerStats.Timestamp = time.Now() return &cache.ContainerMetricElement{ Spec: &containerSpec, Stats: &containerStats, } }
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { if self.ignoreMetrics.Has(container.DiskUsageMetrics) { return nil } var device string switch self.storageDriver { case devicemapperStorageDriver: // Device has to be the pool name to correlate with the device name as // set in the machine info filesystems. device = self.poolName case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver: deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir) if err != nil { return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err) } device = deviceInfo.Device default: return nil } mi, err := self.machineInfoFactory.GetMachineInfo() if err != nil { return err } var ( limit uint64 fsType string ) // Docker does not impose any filesystem limits for containers. So use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == device { limit = fs.Capacity fsType = fs.Type break } } fsStat := info.FsStats{Device: device, Type: fsType, Limit: limit} usage := self.fsHandler.Usage() fsStat.BaseUsage = usage.BaseUsageBytes fsStat.Usage = usage.TotalUsageBytes fsStat.Inodes = usage.InodeUsage stats.Filesystem = append(stats.Filesystem, fsStat) return nil }
// cmeFactory generates a complete ContainerMetricElement with fuzzed data. func cmeFactory() *cache.ContainerMetricElement { f := fuzz.New().NilChance(0).NumElements(1, 1) containerSpec := cadvisor.ContainerSpec{ CreationTime: time.Now(), HasCpu: true, HasMemory: true, HasNetwork: true, HasFilesystem: true, HasDiskIo: true, } var containerStats cadvisor.ContainerStats f.Fuzz(&containerStats) containerStats.Timestamp = time.Now() new_fs := cadvisor.FsStats{} f.Fuzz(&new_fs) new_fs.Device = "/dev/device1" containerStats.Filesystem = []cadvisor.FsStats{new_fs} return &cache.ContainerMetricElement{ Spec: &containerSpec, Stats: &containerStats, } }
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { // No support for non-aufs storage drivers. if !self.usesAufsDriver { return nil } // As of now we assume that all the storage dirs are on the same device. // The first storage dir will be that of the image layers. deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0]) if err != nil { return err } mi, err := self.machineInfoFactory.GetMachineInfo() if err != nil { return err } var limit uint64 = 0 // Docker does not impose any filesystem limits for containers. So use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == deviceInfo.Device { limit = fs.Capacity break } } fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} var usage uint64 = 0 for _, dir := range self.storageDirs { // TODO(Vishh): Add support for external mounts. dirUsage, err := self.fsInfo.GetDirUsage(dir) if err != nil { return err } usage += dirUsage } fsStat.Usage = usage stats.Filesystem = append(stats.Filesystem, fsStat) return nil }
func (self *hyperContainerHandler) GetStats() (*info.ContainerStats, error) { stats := info.ContainerStats{ Timestamp: time.Now(), DiskIo: info.DiskIoStats{ IoServiceBytes: make([]info.PerDiskStats, 0, 1), IoServiced: make([]info.PerDiskStats, 0, 1), }, Network: info.NetworkStats{ Interfaces: make([]info.InterfaceStats, 0, 1), }, } // TODO: container stats is not supported now if !self.isPod { return self.fakeStats() } podStats, err := self.client.GetPodStats(self.podID) if err != nil { return nil, fmt.Errorf("Failed to get hyper pod stats: %v", err) } stats.Cpu = info.CpuStats{ Usage: info.CpuUsage{ Total: podStats.Cpu.Usage.Total, PerCpu: podStats.Cpu.Usage.PerCpu, User: podStats.Cpu.Usage.User, System: podStats.Cpu.Usage.System, }, } for _, stat := range podStats.Block.IoServiceBytesRecursive { stats.DiskIo.IoServiceBytes = append(stats.DiskIo.IoServiceBytes, info.PerDiskStats{ Major: stat.Major, Minor: stat.Minor, Stats: stat.Stat, }) } for _, stat := range podStats.Block.IoServicedRecursive { stats.DiskIo.IoServiced = append(stats.DiskIo.IoServiced, info.PerDiskStats{ Major: stat.Major, Minor: stat.Minor, Stats: stat.Stat, }) } stats.Memory = info.MemoryStats{ Usage: podStats.Memory.Usage, } for _, stat := range podStats.Network.Interfaces { stats.Network.Interfaces = append(stats.Network.Interfaces, info.InterfaceStats{ Name: stat.Name, RxBytes: stat.RxBytes, RxDropped: stat.RxDropped, RxErrors: stat.RxErrors, RxPackets: stat.RxPackets, TxBytes: stat.TxBytes, TxPackets: stat.TxPackets, TxErrors: stat.TxErrors, TxDropped: stat.TxDropped, }) stats.Network.RxBytes += stat.RxBytes stats.Network.RxPackets += stat.RxPackets stats.Network.RxErrors += stat.RxErrors stats.Network.RxDropped += stat.RxDropped stats.Network.TxBytes += stat.TxBytes stats.Network.TxPackets += stat.TxPackets stats.Network.TxErrors += stat.TxErrors stats.Network.TxDropped += stat.TxDropped } // TODO: fsstats and taskstats is not supported now stats.Filesystem = []info.FsStats{} stats.TaskStats = info.LoadStats{} return &stats, nil }
func (self *hyperContainerHandler) fakeStats() (*info.ContainerStats, error) { stats := info.ContainerStats{Timestamp: time.Now()} stats.Cpu = info.CpuStats{ Usage: info.CpuUsage{ Total: 24750780, PerCpu: []uint64{18354559, 6396221}, User: 0, System: 10000000, }, LoadAverage: 0, } stats.DiskIo = info.DiskIoStats{ IoServiceBytes: []info.PerDiskStats{ { Major: 253, Minor: 8, Stats: map[string]uint64{"Async": 5353472, "Read": 5353472, "Sync": 0, "Total": 5353472, "Write": 0}, }, }, } stats.Memory = info.MemoryStats{ Usage: 5763072, WorkingSet: 1871872, ContainerData: info.MemoryStatsMemoryData{ Pgfault: 3174, Pgmajfault: 12, }, HierarchicalData: info.MemoryStatsMemoryData{ Pgfault: 3174, Pgmajfault: 12, }, } stats.Network = info.NetworkStats{ InterfaceStats: info.InterfaceStats{ Name: "eth0", RxBytes: 123223, RxPackets: 128, TxBytes: 10240, TxPackets: 10, }, Interfaces: []info.InterfaceStats{ { Name: "eth0", RxBytes: 123223, RxPackets: 128, TxBytes: 10240, TxPackets: 10, }, }, } stats.Filesystem = []info.FsStats{} stats.TaskStats = info.LoadStats{} return &stats, nil }