func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { switch self.storageDriver { case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver: default: return nil } // As of now we assume that all the storage dirs are on the same device. // The first storage dir will be that of the image layers. deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0]) if err != nil { return err } mi, err := self.machineInfoFactory.GetMachineInfo() if err != nil { return err } var limit uint64 = 0 // Docker does not impose any filesystem limits for containers. So use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == deviceInfo.Device { limit = fs.Capacity break } } fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} fsStat.Usage = self.fsHandler.usage() stats.Filesystem = append(stats.Filesystem, fsStat) return nil }
func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error { if handler.ignoreMetrics.Has(container.DiskUsageMetrics) { return nil } deviceInfo, err := handler.fsInfo.GetDirFsDevice(handler.rootfsStorageDir) if err != nil { return err } mi, err := handler.machineInfoFactory.GetMachineInfo() if err != nil { return err } var limit uint64 = 0 // Use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == deviceInfo.Device { limit = fs.Capacity break } } fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} usage := handler.fsHandler.Usage() fsStat.BaseUsage = usage.BaseUsageBytes fsStat.Usage = usage.TotalUsageBytes fsStat.Inodes = usage.InodeUsage stats.Filesystem = append(stats.Filesystem, fsStat) return nil }
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { if self.ignoreMetrics.Has(container.DiskUsageMetrics) { return nil } var device string switch self.storageDriver { case devicemapperStorageDriver: // Device has to be the pool name to correlate with the device name as // set in the machine info filesystems. device = self.poolName case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver: deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir) if err != nil { return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err) } device = deviceInfo.Device default: return nil } mi, err := self.machineInfoFactory.GetMachineInfo() if err != nil { return err } var ( limit uint64 fsType string ) // Docker does not impose any filesystem limits for containers. So use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == device { limit = fs.Capacity fsType = fs.Type break } } fsStat := info.FsStats{Device: device, Type: fsType, Limit: limit} usage := self.fsHandler.Usage() fsStat.BaseUsage = usage.BaseUsageBytes fsStat.Usage = usage.TotalUsageBytes fsStat.Inodes = usage.InodeUsage stats.Filesystem = append(stats.Filesystem, fsStat) return nil }
// cmeFactory generates a complete ContainerMetricElement with fuzzed data. // CMEs created by cmeFactory contain partially fuzzed stats, aside from hardcoded values for Memory usage. // The timestamp of the CME is rouded to the current minute and offset by a random number of hours. func cmeFactory() *cache.ContainerMetricElement { f := fuzz.New().NilChance(0).NumElements(1, 1) containerSpec := source_api.ContainerSpec{ ContainerSpec: cadvisor.ContainerSpec{ CreationTime: time.Now(), HasCpu: true, HasMemory: true, HasNetwork: true, HasFilesystem: true, HasDiskIo: true, }, } containerSpec.Cpu.Limit = 1024 containerSpec.Memory.Limit = 10000000 // Create a fuzzed ContainerStats struct var containerStats source_api.ContainerStats f.Fuzz(&containerStats) // Standardize timestamp to the current minute plus a random number of hours ([1, 10]) now_time := time.Now().Round(time.Minute) new_time := now_time for new_time == now_time { new_time = now_time.Add(time.Duration(rand.Intn(10)) * 5 * time.Minute) } containerStats.Timestamp = new_time containerSpec.CreationTime = new_time.Add(-time.Hour) // Standardize memory usage and limit to test aggregation containerStats.Memory.Usage = uint64(5000) containerStats.Memory.WorkingSet = uint64(602) // Standardize the device name, usage and limit new_fs := cadvisor.FsStats{} f.Fuzz(&new_fs) new_fs.Device = "/dev/device1" new_fs.Usage = 50000 new_fs.Limit = 100000 containerStats.Filesystem = []cadvisor.FsStats{new_fs} return &cache.ContainerMetricElement{ Spec: &containerSpec, Stats: &containerStats, } }
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error { // No support for non-aufs storage drivers. if !self.usesAufsDriver { return nil } // As of now we assume that all the storage dirs are on the same device. // The first storage dir will be that of the image layers. deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0]) if err != nil { return err } mi, err := self.machineInfoFactory.GetMachineInfo() if err != nil { return err } var limit uint64 = 0 // Docker does not impose any filesystem limits for containers. So use capacity as limit. for _, fs := range mi.Filesystems { if fs.Device == deviceInfo.Device { limit = fs.Capacity break } } fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit} var usage uint64 = 0 for _, dir := range self.storageDirs { // TODO(Vishh): Add support for external mounts. dirUsage, err := self.fsInfo.GetDirUsage(dir) if err != nil { return err } usage += dirUsage } fsStat.Usage = usage stats.Filesystem = append(stats.Filesystem, fsStat) return nil }