func (s *CPUStat) Collect() { // collect CPU stats for All cpus aggregated var cpuinfo C.host_cpu_load_info_data_t count := C.mach_msg_type_number_t(C.HOST_CPU_LOAD_INFO_COUNT) host := C.mach_host_self() ret := C.host_statistics(C.host_t(host), C.HOST_CPU_LOAD_INFO, C.host_info_t(unsafe.Pointer(&cpuinfo)), &count) if ret != C.KERN_SUCCESS { return } s.All.User.Set(uint64(cpuinfo.cpu_ticks[C.CPU_STATE_USER])) s.All.UserLowPrio.Set(uint64(cpuinfo.cpu_ticks[C.CPU_STATE_NICE])) s.All.System.Set(uint64(cpuinfo.cpu_ticks[C.CPU_STATE_SYSTEM])) s.All.Idle.Set(uint64(cpuinfo.cpu_ticks[C.CPU_STATE_IDLE])) s.All.Total.Set(uint64(cpuinfo.cpu_ticks[C.CPU_STATE_USER]) + uint64(cpuinfo.cpu_ticks[C.CPU_STATE_SYSTEM]) + uint64(cpuinfo.cpu_ticks[C.CPU_STATE_NICE]) + uint64(cpuinfo.cpu_ticks[C.CPU_STATE_IDLE])) }
func allCPUTimes() ([]CPUTimesStat, error) { var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT var cpuload C.host_cpu_load_info_data_t status := C.host_statistics(C.host_t(C.mach_host_self()), C.HOST_CPU_LOAD_INFO, C.host_info_t(unsafe.Pointer(&cpuload)), &count) if status != C.KERN_SUCCESS { return nil, fmt.Errorf("host_statistics error=%d", status) } c := CPUTimesStat{ CPU: "cpu-total", User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, Iowait: -1, Irq: -1, Softirq: -1, Steal: -1, Guest: -1, GuestNice: -1, Stolen: -1, } return []CPUTimesStat{c}, nil }
// stolen from https://github.com/cloudfoundry/gosigar func getCPUTotalTimes() (*CPUTimesStat, error) { var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT var cpuload C.host_cpu_load_info_data_t status := C.host_statistics(C.host_t(C.mach_host_self()), C.HOST_CPU_LOAD_INFO, C.host_info_t(unsafe.Pointer(&cpuload)), &count) if status != C.KERN_SUCCESS { return nil, fmt.Errorf("host_statistics error=%d", status) } cpu := CPUTimesStat{ CPU: "total"} var cpu_ticks = make([]uint32, len(cpuload.cpu_ticks)) for i := range cpuload.cpu_ticks { cpu_ticks[i] = uint32(cpuload.cpu_ticks[i]) } fillCPUStats(cpu_ticks, &cpu) return &cpu, nil }
func NewProcessStat(m *metrics.MetricContext, Step time.Duration) *ProcessStat { c := new(ProcessStat) c.m = m c.Processes = make(map[string]*PerProcessStat, 1024) c.hport = C.host_t(C.mach_host_self()) var n int ticker := time.NewTicker(Step) go func() { for _ = range ticker.C { p := int(len(c.Processes) / 1024) if n == 0 { c.Collect(true) } // always collect all metrics for first two samples // and if number of processes < 1024 if p < 1 || n%p == 0 { c.Collect(false) } n++ } }() return c }
func perCPUTimes() ([]CPUTimesStat, error) { var ( count C.mach_msg_type_number_t cpuload *C.processor_cpu_load_info_data_t ncpu C.natural_t ) status := C.host_processor_info(C.host_t(C.mach_host_self()), C.PROCESSOR_CPU_LOAD_INFO, &ncpu, (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), &count) if status != C.KERN_SUCCESS { return nil, fmt.Errorf("host_processor_info error=%d", status) } // jump through some cgo casting hoops and ensure we properly free // the memory that cpuload points to target := C.vm_map_t(C.mach_task_self_) address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) // the body of struct processor_cpu_load_info // aka processor_cpu_load_info_data_t var cpu_ticks [C.CPU_STATE_MAX]uint32 // copy the cpuload array to a []byte buffer // where we can binary.Read the data size := int(ncpu) * binary.Size(cpu_ticks) buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size)) bbuf := bytes.NewBuffer(buf) var ret []CPUTimesStat for i := 0; i < int(ncpu); i++ { err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) if err != nil { return nil, err } c := CPUTimesStat{ CPU: fmt.Sprintf("cpu%d", i), User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, } ret = append(ret, c) } return ret, nil }
func (self *CpuList) Get() error { var count C.mach_msg_type_number_t var cpuload *C.processor_cpu_load_info_data_t var ncpu C.natural_t status := C.host_processor_info(C.host_t(C.mach_host_self()), C.PROCESSOR_CPU_LOAD_INFO, &ncpu, (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), &count) if status != C.KERN_SUCCESS { return fmt.Errorf("host_processor_info error=%d", status) } // jump through some cgo casting hoops and ensure we properly free // the memory that cpuload points to target := C.vm_map_t(C.mach_task_self_) address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) // the body of struct processor_cpu_load_info // aka processor_cpu_load_info_data_t var cpu_ticks [C.CPU_STATE_MAX]uint32 // copy the cpuload array to a []byte buffer // where we can binary.Read the data size := int(ncpu) * binary.Size(cpu_ticks) buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size)) bbuf := bytes.NewBuffer(buf) self.List = make([]Cpu, 0, ncpu) for i := 0; i < int(ncpu); i++ { cpu := Cpu{} err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) if err != nil { return err } cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER]) cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM]) cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE]) cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE]) self.List = append(self.List, cpu) } return nil }
func (c *statCollector) Update(ch chan<- prometheus.Metric) error { var ( count C.mach_msg_type_number_t cpuload *C.processor_cpu_load_info_data_t ncpu C.natural_t ) status := C.host_processor_info(C.host_t(C.mach_host_self()), C.PROCESSOR_CPU_LOAD_INFO, &ncpu, (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), &count) if status != C.KERN_SUCCESS { return fmt.Errorf("host_processor_info error=%d", status) } // jump through some cgo casting hoops and ensure we properly free // the memory that cpuload points to target := C.vm_map_t(C.mach_task_self_) address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) // the body of struct processor_cpu_load_info // aka processor_cpu_load_info_data_t var cpu_ticks [C.CPU_STATE_MAX]uint32 // copy the cpuload array to a []byte buffer // where we can binary.Read the data size := int(ncpu) * binary.Size(cpu_ticks) buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size] bbuf := bytes.NewBuffer(buf) for i := 0; i < int(ncpu); i++ { err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) if err != nil { return err } for k, v := range map[string]int{ "user": C.CPU_STATE_USER, "system": C.CPU_STATE_SYSTEM, "nice": C.CPU_STATE_NICE, "idle": C.CPU_STATE_IDLE, } { ch <- prometheus.MustNewConstMetric(c.cpu, prometheus.CounterValue, float64(cpu_ticks[v])/ClocksPerSec, "cpu"+strconv.Itoa(i), k) } } return nil }
// stolen from https://github.com/cloudfoundry/gosigar func getCPUDetailedTimes() ([]CPUTimesStat, error) { var count C.mach_msg_type_number_t var cpuload *C.processor_cpu_load_info_data_t var ncpu C.natural_t status := C.host_processor_info(C.host_t(C.mach_host_self()), C.PROCESSOR_CPU_LOAD_INFO, &ncpu, (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), &count) if status != C.KERN_SUCCESS { return nil, fmt.Errorf("host_processor_info error=%d", status) } // jump through some cgo casting hoops and ensure we properly free // the memory that cpuload points to target := C.vm_map_t(C.mach_task_self_) address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) // the body of struct processor_cpu_load_info // aka processor_cpu_load_info_data_t var cpu_ticks = make([]uint32, C.CPU_STATE_MAX) // copy the cpuload array to a []byte buffer // where we can binary.Read the data size := int(ncpu) * binary.Size(cpu_ticks) buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size)) bbuf := bytes.NewBuffer(buf) var ret []CPUTimesStat = make([]CPUTimesStat, 0, ncpu+1) for i := 0; i < int(ncpu); i++ { err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) if err != nil { return nil, err } cpu := CPUTimesStat{ CPU: strconv.Itoa(i)} fillCPUStats(cpu_ticks, &cpu) ret = append(ret, cpu) } return ret, nil }
func cpuTimeTotal() int { selfHost := C.mach_host_self() hostInfo := C.malloc(C.size_t(C.HOST_CPU_LOAD_INFO_COUNT)) count := C.mach_msg_type_number_t(C.HOST_CPU_LOAD_INFO_COUNT) err := C.host_statistics(C.host_t(selfHost), C.HOST_CPU_LOAD_INFO, C.host_info_t(hostInfo), &count) defer C.free(hostInfo) if err != C.kern_return_t(C.KERN_SUCCESS) { return 0 } return -1 }
func vm_info(vmstat *C.vm_statistics_data_t) error { var count C.mach_msg_type_number_t = C.HOST_VM_INFO_COUNT status := C.host_statistics( C.host_t(C.mach_host_self()), C.HOST_VM_INFO, C.host_info_t(unsafe.Pointer(vmstat)), &count) if status != C.KERN_SUCCESS { return fmt.Errorf("host_statistics=%d", status) } return nil }
func HostStatisticsCpuLoadInfo() (*HostCpuLoadInfoData, error) { var ( count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT cpuload C.host_cpu_load_info_data_t ) status := C.host_statistics( C.host_t(C.mach_host_self()), C.HOST_CPU_LOAD_INFO, C.host_info_t(unsafe.Pointer(&cpuload)), &count) if status != C.KERN_SUCCESS { return nil, fmt.Errorf("host_statistics error=%d", status) } return (*HostCpuLoadInfoData)(&cpuload), nil }
// New registers with metriccontext and starts metric collection // every Step func New(m *metrics.MetricContext, Step time.Duration) *MemStat { s := new(MemStat) s.m = m // initialize all gauges misc.InitializeMetrics(s, m, "memstat", true) host := C.mach_host_self() C.host_page_size(C.host_t(host), &s.Pagesize) // collect metrics every Step ticker := time.NewTicker(Step) go func() { for _ = range ticker.C { s.Collect() } }() return s }
// Collect uses mach interface to populate various memory usage // metrics func (s *MemStat) Collect() { var meminfo C.vm_statistics64_data_t count := C.mach_msg_type_number_t(C.HOST_VM_INFO64_COUNT) host := C.mach_host_self() ret := C.host_statistics64(C.host_t(host), C.HOST_VM_INFO64, C.host_info_t(unsafe.Pointer(&meminfo)), &count) if ret != C.KERN_SUCCESS { return } s.RawFree.Set(float64(meminfo.free_count) * float64(s.Pagesize)) s.Active.Set(float64(meminfo.active_count) * float64(s.Pagesize)) s.Inactive.Set(float64(meminfo.inactive_count) * float64(s.Pagesize)) s.Wired.Set(float64(meminfo.wire_count) * float64(s.Pagesize)) s.Purgeable.Set(float64(meminfo.purgeable_count) * float64(s.Pagesize)) s.RawTotal.Set(float64(C.get_phys_memory())) }
func MemStatMetricsNew(m *metrics.MetricContext, Step time.Duration) *MemStatMetrics { c := new(MemStatMetrics) // initialize all gauges misc.InitializeMetrics(c, m, "memstat") host := C.mach_host_self() C.host_page_size(C.host_t(host), &c.Pagesize) // collect metrics every Step ticker := time.NewTicker(Step) go func() { for _ = range ticker.C { c.Collect() } }() return c }
func (self *Cpu) Get() error { var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT var cpuload C.host_cpu_load_info_data_t status := C.host_statistics(C.host_t(C.mach_host_self()), C.HOST_CPU_LOAD_INFO, C.host_info_t(unsafe.Pointer(&cpuload)), &count) if status != C.KERN_SUCCESS { return fmt.Errorf("host_statistics error=%d", status) } self.User = uint64(cpuload.cpu_ticks[C.CPU_STATE_USER]) self.Sys = uint64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) self.Idle = uint64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) self.Nice = uint64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) return nil }
// Collect populates various cpu performance statistics - use MACH interface func (s *CPUStat) Collect() { // collect CPU stats for All cpus aggregated var cpuinfo C.host_cpu_load_info_data_t var hostinfo C.host_basic_info_data_t cpuloadnumber := C.mach_msg_type_number_t(C.HOST_CPU_LOAD_INFO_COUNT) hostnumber := C.mach_msg_type_number_t(C.HOST_BASIC_INFO_COUNT) host := C.mach_host_self() ret := C.host_statistics(C.host_t(host), C.HOST_CPU_LOAD_INFO, C.host_info_t(unsafe.Pointer(&cpuinfo)), &cpuloadnumber) if ret != C.KERN_SUCCESS { return } ret = C.host_info(C.host_t(host), C.HOST_BASIC_INFO, C.host_info_t(unsafe.Pointer(&hostinfo)), &hostnumber) if ret != C.KERN_SUCCESS { return } s.All.User.Set(uint64(cpuinfo.cpu_ticks[C.CPU_STATE_USER])) s.All.UserLowPrio.Set(uint64(cpuinfo.cpu_ticks[C.CPU_STATE_NICE])) s.All.System.Set(uint64(cpuinfo.cpu_ticks[C.CPU_STATE_SYSTEM])) s.All.Idle.Set(uint64(cpuinfo.cpu_ticks[C.CPU_STATE_IDLE])) s.All.Total.Set(uint64(cpuinfo.cpu_ticks[C.CPU_STATE_USER]) + uint64(cpuinfo.cpu_ticks[C.CPU_STATE_SYSTEM]) + uint64(cpuinfo.cpu_ticks[C.CPU_STATE_NICE]) + uint64(cpuinfo.cpu_ticks[C.CPU_STATE_IDLE])) s.All.UsageCount.Set(s.All.Usage()) s.All.UserSpaceCount.Set(s.All.UserSpace()) s.All.KernelCount.Set(s.All.Kernel()) s.All.TotalCount.Set(float64(hostinfo.logical_cpu_max)) }
// VirtualMemory returns VirtualmemoryStat. func VirtualMemory() (*VirtualMemoryStat, error) { count := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT) var vmstat C.vm_statistics_data_t status := C.host_statistics(C.host_t(C.mach_host_self()), C.HOST_VM_INFO, C.host_info_t(unsafe.Pointer(&vmstat)), &count) if status != C.KERN_SUCCESS { return nil, fmt.Errorf("host_statistics error=%d", status) } pageSize := uint64(syscall.Getpagesize()) total, err := getHwMemsize() if err != nil { return nil, err } totalCount := C.natural_t(total / pageSize) availableCount := vmstat.inactive_count + vmstat.free_count usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) usedCount := totalCount - availableCount return &VirtualMemoryStat{ Total: total, Available: pageSize * uint64(availableCount), Used: pageSize * uint64(usedCount), UsedPercent: usedPercent, Free: pageSize * uint64(vmstat.free_count), Active: pageSize * uint64(vmstat.active_count), Inactive: pageSize * uint64(vmstat.inactive_count), Wired: pageSize * uint64(vmstat.wire_count), }, nil }
func HostSelf() Host { host := C.mach_host_self() return Host(host) }