func buildTrace(cpu, mem []uint64, duration time.Duration) []*info.ContainerStats { if len(cpu) != len(mem) { panic("len(cpu) != len(mem)") } ret := make([]*info.ContainerStats, len(cpu)) currentTime := time.Now() var cpuTotalUsage uint64 = 0 for i, cpuUsage := range cpu { cpuTotalUsage += cpuUsage stats := new(info.ContainerStats) stats.Cpu = new(info.CpuStats) stats.Memory = new(info.MemoryStats) stats.Timestamp = currentTime currentTime = currentTime.Add(duration) stats.Cpu.Usage.Total = cpuTotalUsage stats.Cpu.Usage.User = stats.Cpu.Usage.Total stats.Cpu.Usage.System = 0 stats.Cpu.Usage.PerCpu = []uint64{cpuUsage} stats.Memory.Usage = mem[i] ret[i] = stats } return ret }
func (self *influxdbStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { if stats == nil || stats.Cpu == nil || stats.Memory == nil { return nil } // AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write. var seriesToFlush []*influxdb.Series func() { self.lock.Lock() defer self.lock.Unlock() series := self.newSeries(self.containerStatsToValues(ref, stats)) self.series = append(self.series, series) self.prevStats = stats.Copy(self.prevStats) if self.readyToFlush() { seriesToFlush = self.series self.series = make([]*influxdb.Series, 0) self.lastWrite = time.Now() } }() if len(seriesToFlush) > 0 { err := self.client.WriteSeries(seriesToFlush) if err != nil { return fmt.Errorf("failed to write stats to influxDb - %s", err) } } return nil }
func GenerateRandomStats(numStats, numCores int, duration time.Duration) []*info.ContainerStats { ret := make([]*info.ContainerStats, numStats) perCoreUsages := make([]uint64, numCores) currentTime := time.Now() for i := range perCoreUsages { perCoreUsages[i] = uint64(rand.Int63n(1000)) } for i := 0; i < numStats; i++ { stats := new(info.ContainerStats) stats.Cpu = new(info.CpuStats) stats.Memory = new(info.MemoryStats) stats.Timestamp = currentTime currentTime = currentTime.Add(duration) percore := make([]uint64, numCores) for i := range perCoreUsages { perCoreUsages[i] += uint64(rand.Int63n(1000)) percore[i] = perCoreUsages[i] stats.Cpu.Usage.Total += percore[i] } stats.Cpu.Usage.PerCpu = percore stats.Cpu.Usage.User = stats.Cpu.Usage.Total stats.Cpu.Usage.System = 0 stats.Memory.Usage = uint64(rand.Int63n(4096)) ret[i] = stats } return ret }
func libcontainerToContainerStats(s *cgroups.Stats, mi *info.MachineInfo) *info.ContainerStats { ret := new(info.ContainerStats) ret.Timestamp = time.Now() ret.Cpu = new(info.CpuStats) ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode n := len(s.CpuStats.CpuUsage.PercpuUsage) ret.Cpu.Usage.PerCpu = make([]uint64, n) ret.Cpu.Usage.Total = 0 for i := 0; i < n; i++ { ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i] ret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i] } ret.Memory = new(info.MemoryStats) ret.Memory.Usage = s.MemoryStats.Usage if v, ok := s.MemoryStats.Stats["pgfault"]; ok { ret.Memory.ContainerData.Pgfault = v ret.Memory.HierarchicalData.Pgfault = v } if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok { ret.Memory.ContainerData.Pgmajfault = v ret.Memory.HierarchicalData.Pgmajfault = v } return ret }
func (self *randomStatsContainer) GetStats() (*info.ContainerStats, error) { stats := new(info.ContainerStats) stats.Cpu = new(info.CpuStats) stats.Memory = new(info.MemoryStats) stats.Memory.Usage = uint64(rand.Intn(2048)) return stats, nil }
func buildTrace(cpu, mem []uint64, duration time.Duration) []*info.ContainerStats { if len(cpu) != len(mem) { panic("len(cpu) != len(mem)") } ret := make([]*info.ContainerStats, len(cpu)) currentTime := time.Now() var cpuTotalUsage uint64 = 0 for i, cpuUsage := range cpu { cpuTotalUsage += cpuUsage stats := new(info.ContainerStats) stats.Timestamp = currentTime currentTime = currentTime.Add(duration) stats.Cpu.Usage.Total = cpuTotalUsage stats.Cpu.Usage.User = stats.Cpu.Usage.Total stats.Cpu.Usage.System = 0 stats.Cpu.Usage.PerCpu = []uint64{cpuTotalUsage} stats.Memory.Usage = mem[i] stats.Network.RxBytes = uint64(rand.Intn(10000)) stats.Network.RxErrors = uint64(rand.Intn(1000)) stats.Network.TxBytes = uint64(rand.Intn(100000)) stats.Network.TxErrors = uint64(rand.Intn(1000)) stats.Filesystem = make([]info.FsStats, 1) stats.Filesystem[0].Device = "/dev/sda1" stats.Filesystem[0].Limit = 1024000000 stats.Filesystem[0].Usage = 1024000 ret[i] = stats } return ret }
func (self *replayTrace) GetStats() (*info.ContainerStats, error) { stats := new(info.ContainerStats) stats.Cpu = new(info.CpuStats) stats.Memory = new(info.MemoryStats) if len(self.memTrace) > 0 { stats.Memory.Usage = self.memTrace[0] self.memTrace = self.memTrace[1:] } self.lock.Lock() defer self.lock.Unlock() cpuTrace := self.totalUsage if len(self.cpuTrace) > 0 { cpuTrace += self.cpuTrace[0] self.cpuTrace = self.cpuTrace[1:] } self.totalUsage = cpuTrace stats.Timestamp = self.currenttime self.currenttime = self.currenttime.Add(self.duration) stats.Cpu.Usage.Total = cpuTrace stats.Cpu.Usage.PerCpu = []uint64{cpuTrace} stats.Cpu.Usage.User = cpuTrace stats.Cpu.Usage.System = 0 return stats, nil }
func protobufToContainerStats(pstats *ContainerStats) *info.ContainerStats { ret := new(info.ContainerStats) if pstats.GetCpu() != nil { pcpu := pstats.GetCpu() cpustats := new(info.CpuStats) cpustats.Usage.Total = pcpu.GetUsage().GetTotal() percpu := pcpu.GetUsage().GetPerCpu() if len(percpu) > 0 { cpustats.Usage.PerCpu = make([]uint64, len(percpu)) for i, p := range percpu { cpustats.Usage.PerCpu[i] = uint64(p) } } cpustats.Usage.User = uint64(pcpu.GetUsage().GetUser()) cpustats.Usage.System = uint64(pcpu.GetUsage().GetSystem()) cpustats.Load = pcpu.GetLoad() ret.Cpu = cpustats } if pstats.GetMemory() != nil { pmem := pstats.GetMemory() memstats := new(info.MemoryStats) memstats.Limit = uint64(pmem.GetLimit()) memstats.Usage = uint64(pmem.GetUsage()) protobufToMemoryData(pmem.GetContainerData(), &memstats.ContainerData) protobufToMemoryData(pmem.GetHierarchicalData(), &memstats.HierarchicalData) ret.Memory = memstats } return ret }
func (self *containerStorage) updatePrevStats(stats *info.ContainerStats) { if stats == nil || stats.Cpu == nil || stats.Memory == nil { // discard incomplete stats self.prevStats = nil return } self.prevStats = stats.Copy(self.prevStats) }
func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error { // Get Filesystem information only for the root cgroup. if self.name == "/" { filesystems, err := self.fsInfo.GetGlobalFsInfo() if err != nil { return err } for _, fs := range filesystems { stats.Filesystem = append(stats.Filesystem, info.FsStats{ Device: fs.Device, Limit: fs.Capacity, Usage: fs.Capacity - fs.Free, ReadsCompleted: fs.DiskStats.ReadsCompleted, ReadsMerged: fs.DiskStats.ReadsMerged, SectorsRead: fs.DiskStats.SectorsRead, ReadTime: fs.DiskStats.ReadTime, WritesCompleted: fs.DiskStats.WritesCompleted, WritesMerged: fs.DiskStats.WritesMerged, SectorsWritten: fs.DiskStats.SectorsWritten, WriteTime: fs.DiskStats.WriteTime, IoInProgress: fs.DiskStats.IoInProgress, IoTime: fs.DiskStats.IoTime, WeightedIoTime: fs.DiskStats.WeightedIoTime, }) } } else if len(self.externalMounts) > 0 { var mountSet map[string]struct{} mountSet = make(map[string]struct{}) for _, mount := range self.externalMounts { mountSet[mount.HostDir] = struct{}{} } filesystems, err := self.fsInfo.GetFsInfoForPath(mountSet) if err != nil { return err } for _, fs := range filesystems { stats.Filesystem = append(stats.Filesystem, info.FsStats{ Device: fs.Device, Limit: fs.Capacity, Usage: fs.Capacity - fs.Free, ReadsCompleted: fs.DiskStats.ReadsCompleted, ReadsMerged: fs.DiskStats.ReadsMerged, SectorsRead: fs.DiskStats.SectorsRead, ReadTime: fs.DiskStats.ReadTime, WritesCompleted: fs.DiskStats.WritesCompleted, WritesMerged: fs.DiskStats.WritesMerged, SectorsWritten: fs.DiskStats.SectorsWritten, WriteTime: fs.DiskStats.WriteTime, IoInProgress: fs.DiskStats.IoInProgress, IoTime: fs.DiskStats.IoTime, WeightedIoTime: fs.DiskStats.WeightedIoTime, }) } } return nil }
func (self *bigqueryStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { if stats == nil || stats.Cpu == nil || stats.Memory == nil { return nil } row := self.containerStatsToValues(ref, stats) self.prevStats = stats.Copy(self.prevStats) err := self.client.InsertRow(row) if err != nil { return err } return nil }
// Convert libcontainer stats to info.ContainerStats. func toContainerStats(libcontainerStats *libcontainer.ContainerStats) *info.ContainerStats { s := libcontainerStats.CgroupStats ret := new(info.ContainerStats) ret.Timestamp = time.Now() if s != nil { ret.Cpu.Usage.User = s.CpuStats.CpuUsage.UsageInUsermode ret.Cpu.Usage.System = s.CpuStats.CpuUsage.UsageInKernelmode n := len(s.CpuStats.CpuUsage.PercpuUsage) ret.Cpu.Usage.PerCpu = make([]uint64, n) ret.Cpu.Usage.Total = 0 for i := 0; i < n; i++ { ret.Cpu.Usage.PerCpu[i] = s.CpuStats.CpuUsage.PercpuUsage[i] ret.Cpu.Usage.Total += s.CpuStats.CpuUsage.PercpuUsage[i] } ret.DiskIo.IoServiceBytes = DiskStatsCopy(s.BlkioStats.IoServiceBytesRecursive) ret.DiskIo.IoServiced = DiskStatsCopy(s.BlkioStats.IoServicedRecursive) ret.DiskIo.IoQueued = DiskStatsCopy(s.BlkioStats.IoQueuedRecursive) ret.DiskIo.Sectors = DiskStatsCopy(s.BlkioStats.SectorsRecursive) ret.DiskIo.IoServiceTime = DiskStatsCopy(s.BlkioStats.IoServiceTimeRecursive) ret.DiskIo.IoWaitTime = DiskStatsCopy(s.BlkioStats.IoWaitTimeRecursive) ret.DiskIo.IoMerged = DiskStatsCopy(s.BlkioStats.IoMergedRecursive) ret.DiskIo.IoTime = DiskStatsCopy(s.BlkioStats.IoTimeRecursive) ret.Memory.Usage = s.MemoryStats.Usage if v, ok := s.MemoryStats.Stats["pgfault"]; ok { ret.Memory.ContainerData.Pgfault = v ret.Memory.HierarchicalData.Pgfault = v } if v, ok := s.MemoryStats.Stats["pgmajfault"]; ok { ret.Memory.ContainerData.Pgmajfault = v ret.Memory.HierarchicalData.Pgmajfault = v } if v, ok := s.MemoryStats.Stats["total_inactive_anon"]; ok { ret.Memory.WorkingSet = ret.Memory.Usage - v if v, ok := s.MemoryStats.Stats["total_active_file"]; ok { ret.Memory.WorkingSet -= v } } } // TODO(vishh): Perform a deep copy or alias libcontainer network stats. if libcontainerStats.NetworkStats != nil { ret.Network = *(*info.NetworkStats)(libcontainerStats.NetworkStats) } return ret }
func (self *influxdbStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { series := &influxdb.Series{ Name: self.tableName, // There's only one point for each stats Points: make([][]interface{}, 1), } if stats == nil || stats.Cpu == nil || stats.Memory == nil { return nil } series.Columns, series.Points[0] = self.containerStatsToValues(ref, stats) self.prevStats = stats.Copy(self.prevStats) err := self.client.WriteSeries([]*influxdb.Series{series}) if err != nil { return err } return nil }
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { // No support for non-aufs storage drivers. if !self.usesAufsDriver { return nil } // As of now we assume that all the storage dirs are on the same device. // The first storage dir will be that of the image layers. deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0]) if err != nil { return err } mi, err := self.machineInfoFactory.GetMachineInfo() if err != nil { return err } var limit uint64 = 0 // Docker does not impose any filesystem limits for containers. So use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == deviceInfo.Device { limit = fs.Capacity break } } fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} var usage uint64 = 0 for _, dir := range self.storageDirs { // TODO(Vishh): Add support for external mounts. dirUsage, err := self.fsInfo.GetDirUsage(dir) if err != nil { return err } usage += dirUsage } fsStat.Usage = usage stats.Filesystem = append(stats.Filesystem, fsStat) return nil }