示例#1
0
func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
	// Get Filesystem information only for the root cgroup.
	if isRootCgroup(self.name) {
		filesystems, err := self.fsInfo.GetGlobalFsInfo()
		if err != nil {
			return err
		}
		for i := range filesystems {
			fs := filesystems[i]
			stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
		}
	} else if len(self.externalMounts) > 0 {
		var mountSet map[string]struct{}
		mountSet = make(map[string]struct{})
		for _, mount := range self.externalMounts {
			mountSet[mount.HostDir] = struct{}{}
		}
		filesystems, err := self.fsInfo.GetFsInfoForPath(mountSet)
		if err != nil {
			return err
		}
		for i := range filesystems {
			fs := filesystems[i]
			stats.Filesystem = append(stats.Filesystem, fsToFsStats(&fs))
		}
	}
	return nil
}
示例#2
0
func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
	// Get Filesystem information only for the root cgroup.
	if self.name == "/" {
		filesystems, err := self.fsInfo.GetGlobalFsInfo()
		if err != nil {
			return err
		}
		for _, fs := range filesystems {
			stats.Filesystem = append(stats.Filesystem,
				info.FsStats{
					Device:          fs.Device,
					Limit:           fs.Capacity,
					Usage:           fs.Capacity - fs.Free,
					Available:       fs.Available,
					ReadsCompleted:  fs.DiskStats.ReadsCompleted,
					ReadsMerged:     fs.DiskStats.ReadsMerged,
					SectorsRead:     fs.DiskStats.SectorsRead,
					ReadTime:        fs.DiskStats.ReadTime,
					WritesCompleted: fs.DiskStats.WritesCompleted,
					WritesMerged:    fs.DiskStats.WritesMerged,
					SectorsWritten:  fs.DiskStats.SectorsWritten,
					WriteTime:       fs.DiskStats.WriteTime,
					IoInProgress:    fs.DiskStats.IoInProgress,
					IoTime:          fs.DiskStats.IoTime,
					WeightedIoTime:  fs.DiskStats.WeightedIoTime,
				})
		}
	} else if len(self.externalMounts) > 0 {
		var mountSet map[string]struct{}
		mountSet = make(map[string]struct{})
		for _, mount := range self.externalMounts {
			mountSet[mount.HostDir] = struct{}{}
		}
		filesystems, err := self.fsInfo.GetFsInfoForPath(mountSet)
		if err != nil {
			return err
		}
		for _, fs := range filesystems {
			stats.Filesystem = append(stats.Filesystem,
				info.FsStats{
					Device:          fs.Device,
					Limit:           fs.Capacity,
					Usage:           fs.Capacity - fs.Free,
					ReadsCompleted:  fs.DiskStats.ReadsCompleted,
					ReadsMerged:     fs.DiskStats.ReadsMerged,
					SectorsRead:     fs.DiskStats.SectorsRead,
					ReadTime:        fs.DiskStats.ReadTime,
					WritesCompleted: fs.DiskStats.WritesCompleted,
					WritesMerged:    fs.DiskStats.WritesMerged,
					SectorsWritten:  fs.DiskStats.SectorsWritten,
					WriteTime:       fs.DiskStats.WriteTime,
					IoInProgress:    fs.DiskStats.IoInProgress,
					IoTime:          fs.DiskStats.IoTime,
					WeightedIoTime:  fs.DiskStats.WeightedIoTime,
				})
		}
	}
	return nil
}
示例#3
0
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
	switch self.storageDriver {
	case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver:
	default:
		return nil
	}

	// As of now we assume that all the storage dirs are on the same device.
	// The first storage dir will be that of the image layers.
	deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0])
	if err != nil {
		return err
	}

	mi, err := self.machineInfoFactory.GetMachineInfo()
	if err != nil {
		return err
	}
	var limit uint64 = 0
	// Docker does not impose any filesystem limits for containers. So use capacity as limit.
	for _, fs := range mi.Filesystems {
		if fs.Device == deviceInfo.Device {
			limit = fs.Capacity
			break
		}
	}

	fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit}

	fsStat.Usage = self.fsHandler.usage()
	stats.Filesystem = append(stats.Filesystem, fsStat)

	return nil
}
示例#4
0
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
	switch self.storageDriver {
	case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver:
	default:
		return nil
	}

	deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
	if err != nil {
		return err
	}

	mi, err := self.machineInfoFactory.GetMachineInfo()
	if err != nil {
		return err
	}
	var limit uint64 = 0
	// Docker does not impose any filesystem limits for containers. So use capacity as limit.
	for _, fs := range mi.Filesystems {
		if fs.Device == deviceInfo.Device {
			limit = fs.Capacity
			break
		}
	}

	fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit}

	fsStat.BaseUsage, fsStat.Usage = self.fsHandler.usage()
	stats.Filesystem = append(stats.Filesystem, fsStat)

	return nil
}
示例#5
0
func (handler *rktContainerHandler) getFsStats(stats *info.ContainerStats) error {
	if handler.ignoreMetrics.Has(container.DiskUsageMetrics) {
		return nil
	}

	deviceInfo, err := handler.fsInfo.GetDirFsDevice(handler.rootfsStorageDir)
	if err != nil {
		return err
	}

	mi, err := handler.machineInfoFactory.GetMachineInfo()
	if err != nil {
		return err
	}
	var limit uint64 = 0

	// Use capacity as limit.
	for _, fs := range mi.Filesystems {
		if fs.Device == deviceInfo.Device {
			limit = fs.Capacity
			break
		}
	}

	fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit}

	fsStat.BaseUsage, fsStat.Usage = handler.fsHandler.Usage()

	stats.Filesystem = append(stats.Filesystem, fsStat)

	return nil
}
示例#6
0
func buildTrace(cpu, mem []uint64, duration time.Duration) []*info.ContainerStats {
	if len(cpu) != len(mem) {
		panic("len(cpu) != len(mem)")
	}

	ret := make([]*info.ContainerStats, len(cpu))
	currentTime := time.Now()

	var cpuTotalUsage uint64 = 0
	for i, cpuUsage := range cpu {
		cpuTotalUsage += cpuUsage
		stats := new(info.ContainerStats)
		stats.Timestamp = currentTime
		currentTime = currentTime.Add(duration)

		stats.Cpu.Usage.Total = cpuTotalUsage
		stats.Cpu.Usage.User = stats.Cpu.Usage.Total
		stats.Cpu.Usage.System = 0
		stats.Cpu.Usage.PerCpu = []uint64{cpuTotalUsage}

		stats.Memory.Usage = mem[i]

		stats.Network.RxBytes = uint64(rand.Intn(10000))
		stats.Network.RxErrors = uint64(rand.Intn(1000))
		stats.Network.TxBytes = uint64(rand.Intn(100000))
		stats.Network.TxErrors = uint64(rand.Intn(1000))

		stats.Filesystem = make([]info.FsStats, 1)
		stats.Filesystem[0].Device = "/dev/sda1"
		stats.Filesystem[0].Limit = 1024000000
		stats.Filesystem[0].Usage = 1024000
		ret[i] = stats
	}
	return ret
}
示例#7
0
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
	if self.ignoreMetrics.Has(container.DiskUsageMetrics) {
		return nil
	}
	var device string
	switch self.storageDriver {
	case devicemapperStorageDriver:
		// Device has to be the pool name to correlate with the device name as
		// set in the machine info filesystems.
		device = self.poolName
	case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver:
		deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
		if err != nil {
			return fmt.Errorf("unable to determine device info for dir: %v: %v", self.rootfsStorageDir, err)
		}
		device = deviceInfo.Device
	default:
		return nil
	}

	mi, err := self.machineInfoFactory.GetMachineInfo()
	if err != nil {
		return err
	}

	var (
		limit  uint64
		fsType string
	)

	// Docker does not impose any filesystem limits for containers. So use capacity as limit.
	for _, fs := range mi.Filesystems {
		if fs.Device == device {
			limit = fs.Capacity
			fsType = fs.Type
			break
		}
	}

	fsStat := info.FsStats{Device: device, Type: fsType, Limit: limit}
	usage := self.fsHandler.Usage()
	fsStat.BaseUsage = usage.BaseUsageBytes
	fsStat.Usage = usage.TotalUsageBytes
	fsStat.Inodes = usage.InodeUsage

	stats.Filesystem = append(stats.Filesystem, fsStat)

	return nil
}
示例#8
0
// cmeFactory generates a complete ContainerMetricElement with fuzzed data.
// CMEs created by cmeFactory contain partially fuzzed stats, aside from hardcoded values for Memory usage.
// The timestamp of the CME is rouded to the current minute and offset by a random number of hours.
func cmeFactory() *cache.ContainerMetricElement {
	f := fuzz.New().NilChance(0).NumElements(1, 1)
	containerSpec := cadvisor.ContainerSpec{
		CreationTime:  time.Now(),
		HasCpu:        true,
		HasMemory:     true,
		HasNetwork:    true,
		HasFilesystem: true,
		HasDiskIo:     true,
	}
	containerSpec.Cpu.Limit = 1024
	containerSpec.Memory.Limit = 10000000

	// Create a fuzzed ContainerStats struct
	var containerStats cadvisor.ContainerStats
	f.Fuzz(&containerStats)

	// Standardize timestamp to the current minute plus a random number of hours ([1, 10])
	now_time := time.Now().Round(time.Minute)
	new_time := now_time
	for new_time == now_time {
		new_time = now_time.Add(time.Duration(rand.Intn(10)) * 5 * time.Minute)
	}
	containerStats.Timestamp = new_time
	containerSpec.CreationTime = new_time.Add(-time.Hour)

	// Standardize memory usage and limit to test aggregation
	containerStats.Memory.Usage = uint64(5000)
	containerStats.Memory.WorkingSet = uint64(602)

	// Standardize the device name, usage and limit
	new_fs := cadvisor.FsStats{}
	f.Fuzz(&new_fs)
	new_fs.Device = "/dev/device1"
	new_fs.Usage = 50000
	new_fs.Limit = 100000
	containerStats.Filesystem = []cadvisor.FsStats{new_fs}

	return &cache.ContainerMetricElement{
		Spec:  &containerSpec,
		Stats: &containerStats,
	}
}
示例#9
0
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
	// No support for non-aufs storage drivers.
	if !self.usesAufsDriver {
		return nil
	}

	// As of now we assume that all the storage dirs are on the same device.
	// The first storage dir will be that of the image layers.
	deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0])
	if err != nil {
		return err
	}

	mi, err := self.machineInfoFactory.GetMachineInfo()
	if err != nil {
		return err
	}
	var limit uint64 = 0
	// Docker does not impose any filesystem limits for containers. So use capacity as limit.
	for _, fs := range mi.Filesystems {
		if fs.Device == deviceInfo.Device {
			limit = fs.Capacity
			break
		}
	}

	fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit}

	var usage uint64 = 0
	for _, dir := range self.storageDirs {
		// TODO(Vishh): Add support for external mounts.
		dirUsage, err := self.fsInfo.GetDirUsage(dir)
		if err != nil {
			return err
		}
		usage += dirUsage
	}
	fsStat.Usage = usage
	stats.Filesystem = append(stats.Filesystem, fsStat)

	return nil
}
示例#10
0
// cmeFactory generates a complete ContainerMetricElement with fuzzed data.
func cmeFactory() *cache.ContainerMetricElement {
	f := fuzz.New().NilChance(0).NumElements(1, 1)
	containerSpec := cadvisor.ContainerSpec{
		CreationTime:  time.Now(),
		HasCpu:        true,
		HasMemory:     true,
		HasNetwork:    true,
		HasFilesystem: true,
		HasDiskIo:     true,
	}
	var containerStats cadvisor.ContainerStats
	f.Fuzz(&containerStats)
	containerStats.Timestamp = time.Now()

	new_fs := cadvisor.FsStats{}
	f.Fuzz(&new_fs)
	new_fs.Device = "/dev/device1"
	containerStats.Filesystem = []cadvisor.FsStats{new_fs}

	return &cache.ContainerMetricElement{
		Spec:  &containerSpec,
		Stats: &containerStats,
	}
}
示例#11
0
func (self *hyperContainerHandler) GetStats() (*info.ContainerStats, error) {
	stats := info.ContainerStats{
		Timestamp: time.Now(),
		DiskIo: info.DiskIoStats{
			IoServiceBytes: make([]info.PerDiskStats, 0, 1),
			IoServiced:     make([]info.PerDiskStats, 0, 1),
		},
		Network: info.NetworkStats{
			Interfaces: make([]info.InterfaceStats, 0, 1),
		},
	}

	// TODO: container stats is not supported now
	if !self.isPod {
		return self.fakeStats()
	}

	podStats, err := self.client.GetPodStats(self.podID)
	if err != nil {
		return nil, fmt.Errorf("Failed to get hyper pod stats: %v", err)

	}

	stats.Cpu = info.CpuStats{
		Usage: info.CpuUsage{
			Total:  podStats.Cpu.Usage.Total,
			PerCpu: podStats.Cpu.Usage.PerCpu,
			User:   podStats.Cpu.Usage.User,
			System: podStats.Cpu.Usage.System,
		},
	}

	for _, stat := range podStats.Block.IoServiceBytesRecursive {
		stats.DiskIo.IoServiceBytes = append(stats.DiskIo.IoServiceBytes,
			info.PerDiskStats{
				Major: stat.Major,
				Minor: stat.Minor,
				Stats: stat.Stat,
			})
	}

	for _, stat := range podStats.Block.IoServicedRecursive {
		stats.DiskIo.IoServiced = append(stats.DiskIo.IoServiced,
			info.PerDiskStats{
				Major: stat.Major,
				Minor: stat.Minor,
				Stats: stat.Stat,
			})
	}

	stats.Memory = info.MemoryStats{
		Usage: podStats.Memory.Usage,
	}

	for _, stat := range podStats.Network.Interfaces {
		stats.Network.Interfaces = append(stats.Network.Interfaces,
			info.InterfaceStats{
				Name:      stat.Name,
				RxBytes:   stat.RxBytes,
				RxDropped: stat.RxDropped,
				RxErrors:  stat.RxErrors,
				RxPackets: stat.RxPackets,
				TxBytes:   stat.TxBytes,
				TxPackets: stat.TxPackets,
				TxErrors:  stat.TxErrors,
				TxDropped: stat.TxDropped,
			})

		stats.Network.RxBytes += stat.RxBytes
		stats.Network.RxPackets += stat.RxPackets
		stats.Network.RxErrors += stat.RxErrors
		stats.Network.RxDropped += stat.RxDropped
		stats.Network.TxBytes += stat.TxBytes
		stats.Network.TxPackets += stat.TxPackets
		stats.Network.TxErrors += stat.TxErrors
		stats.Network.TxDropped += stat.TxDropped
	}

	// TODO: fsstats and taskstats is not supported now
	stats.Filesystem = []info.FsStats{}
	stats.TaskStats = info.LoadStats{}

	return &stats, nil
}
示例#12
0
func (self *hyperContainerHandler) fakeStats() (*info.ContainerStats, error) {
	stats := info.ContainerStats{Timestamp: time.Now()}

	stats.Cpu = info.CpuStats{
		Usage: info.CpuUsage{
			Total:  24750780,
			PerCpu: []uint64{18354559, 6396221},
			User:   0,
			System: 10000000,
		},
		LoadAverage: 0,
	}

	stats.DiskIo = info.DiskIoStats{
		IoServiceBytes: []info.PerDiskStats{
			{
				Major: 253,
				Minor: 8,
				Stats: map[string]uint64{"Async": 5353472, "Read": 5353472, "Sync": 0, "Total": 5353472, "Write": 0},
			},
		},
	}

	stats.Memory = info.MemoryStats{
		Usage:      5763072,
		WorkingSet: 1871872,
		ContainerData: info.MemoryStatsMemoryData{
			Pgfault:    3174,
			Pgmajfault: 12,
		},
		HierarchicalData: info.MemoryStatsMemoryData{
			Pgfault:    3174,
			Pgmajfault: 12,
		},
	}

	stats.Network = info.NetworkStats{
		InterfaceStats: info.InterfaceStats{
			Name:      "eth0",
			RxBytes:   123223,
			RxPackets: 128,
			TxBytes:   10240,
			TxPackets: 10,
		},
		Interfaces: []info.InterfaceStats{
			{
				Name:      "eth0",
				RxBytes:   123223,
				RxPackets: 128,

				TxBytes:   10240,
				TxPackets: 10,
			},
		},
	}

	stats.Filesystem = []info.FsStats{}

	stats.TaskStats = info.LoadStats{}

	return &stats, nil
}