示例#1
0
func perCPUTimes() ([]CPUTimesStat, error) {
	var (
		count   C.mach_msg_type_number_t
		cpuload *C.processor_cpu_load_info_data_t
		ncpu    C.natural_t
	)

	status := C.host_processor_info(C.host_t(C.mach_host_self()),
		C.PROCESSOR_CPU_LOAD_INFO,
		&ncpu,
		(*C.processor_info_array_t)(unsafe.Pointer(&cpuload)),
		&count)

	if status != C.KERN_SUCCESS {
		return nil, fmt.Errorf("host_processor_info error=%d", status)
	}

	// jump through some cgo casting hoops and ensure we properly free
	// the memory that cpuload points to
	target := C.vm_map_t(C.mach_task_self_)
	address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload)))
	defer C.vm_deallocate(target, address, C.vm_size_t(ncpu))

	// the body of struct processor_cpu_load_info
	// aka processor_cpu_load_info_data_t
	var cpu_ticks [C.CPU_STATE_MAX]uint32

	// copy the cpuload array to a []byte buffer
	// where we can binary.Read the data
	size := int(ncpu) * binary.Size(cpu_ticks)
	buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size))

	bbuf := bytes.NewBuffer(buf)

	var ret []CPUTimesStat

	for i := 0; i < int(ncpu); i++ {
		err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks)
		if err != nil {
			return nil, err
		}

		c := CPUTimesStat{
			CPU:    fmt.Sprintf("cpu%d", i),
			User:   float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec,
			System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec,
			Nice:   float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec,
			Idle:   float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec,
		}

		ret = append(ret, c)
	}

	return ret, nil
}
示例#2
0
func (self *CpuList) Get() error {
	var count C.mach_msg_type_number_t
	var cpuload *C.processor_cpu_load_info_data_t
	var ncpu C.natural_t

	status := C.host_processor_info(C.host_t(C.mach_host_self()),
		C.PROCESSOR_CPU_LOAD_INFO,
		&ncpu,
		(*C.processor_info_array_t)(unsafe.Pointer(&cpuload)),
		&count)

	if status != C.KERN_SUCCESS {
		return fmt.Errorf("host_processor_info error=%d", status)
	}

	// jump through some cgo casting hoops and ensure we properly free
	// the memory that cpuload points to
	target := C.vm_map_t(C.mach_task_self_)
	address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload)))
	defer C.vm_deallocate(target, address, C.vm_size_t(ncpu))

	// the body of struct processor_cpu_load_info
	// aka processor_cpu_load_info_data_t
	var cpu_ticks [C.CPU_STATE_MAX]uint32

	// copy the cpuload array to a []byte buffer
	// where we can binary.Read the data
	size := int(ncpu) * binary.Size(cpu_ticks)
	buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size))

	bbuf := bytes.NewBuffer(buf)

	self.List = make([]Cpu, 0, ncpu)

	for i := 0; i < int(ncpu); i++ {
		cpu := Cpu{}

		err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks)
		if err != nil {
			return err
		}

		cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER])
		cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM])
		cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE])
		cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE])

		self.List = append(self.List, cpu)
	}

	return nil
}
示例#3
0
// stolen from https://github.com/cloudfoundry/gosigar
func getCPUDetailedTimes() ([]CPUTimesStat, error) {
	var count C.mach_msg_type_number_t
	var cpuload *C.processor_cpu_load_info_data_t
	var ncpu C.natural_t

	status := C.host_processor_info(C.host_t(C.mach_host_self()),
		C.PROCESSOR_CPU_LOAD_INFO,
		&ncpu,
		(*C.processor_info_array_t)(unsafe.Pointer(&cpuload)),
		&count)

	if status != C.KERN_SUCCESS {
		return nil, fmt.Errorf("host_processor_info error=%d", status)
	}

	// jump through some cgo casting hoops and ensure we properly free
	// the memory that cpuload points to
	target := C.vm_map_t(C.mach_task_self_)
	address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload)))
	defer C.vm_deallocate(target, address, C.vm_size_t(ncpu))

	// the body of struct processor_cpu_load_info
	// aka processor_cpu_load_info_data_t
	var cpu_ticks = make([]uint32, C.CPU_STATE_MAX)

	// copy the cpuload array to a []byte buffer
	// where we can binary.Read the data
	size := int(ncpu) * binary.Size(cpu_ticks)
	buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size))

	bbuf := bytes.NewBuffer(buf)

	var ret []CPUTimesStat = make([]CPUTimesStat, 0, ncpu+1)

	for i := 0; i < int(ncpu); i++ {
		err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks)
		if err != nil {
			return nil, err
		}

		cpu := CPUTimesStat{
			CPU: strconv.Itoa(i)}

		fillCPUStats(cpu_ticks, &cpu)

		ret = append(ret, cpu)
	}

	return ret, nil
}
示例#4
0
func (c *statCollector) Update(ch chan<- prometheus.Metric) error {
	var (
		count   C.mach_msg_type_number_t
		cpuload *C.processor_cpu_load_info_data_t
		ncpu    C.natural_t
	)

	status := C.host_processor_info(C.host_t(C.mach_host_self()),
		C.PROCESSOR_CPU_LOAD_INFO,
		&ncpu,
		(*C.processor_info_array_t)(unsafe.Pointer(&cpuload)),
		&count)

	if status != C.KERN_SUCCESS {
		return fmt.Errorf("host_processor_info error=%d", status)
	}

	// jump through some cgo casting hoops and ensure we properly free
	// the memory that cpuload points to
	target := C.vm_map_t(C.mach_task_self_)
	address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload)))
	defer C.vm_deallocate(target, address, C.vm_size_t(ncpu))

	// the body of struct processor_cpu_load_info
	// aka processor_cpu_load_info_data_t
	var cpu_ticks [C.CPU_STATE_MAX]uint32

	// copy the cpuload array to a []byte buffer
	// where we can binary.Read the data
	size := int(ncpu) * binary.Size(cpu_ticks)
	buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size]

	bbuf := bytes.NewBuffer(buf)

	for i := 0; i < int(ncpu); i++ {
		err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks)
		if err != nil {
			return err
		}
		for k, v := range map[string]int{
			"user":   C.CPU_STATE_USER,
			"system": C.CPU_STATE_SYSTEM,
			"nice":   C.CPU_STATE_NICE,
			"idle":   C.CPU_STATE_IDLE,
		} {
			ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, float64(cpu_ticks[v])/ClocksPerSec, "cpu"+strconv.Itoa(i), k)
		}
	}
	return nil
}