func cpuPercents(percpu bool) float64 { numcpu := runtime.NumCPU() if runtime.GOOS != "windows" { v, err := cpu.CPUPercent(time.Millisecond, percpu) if err != nil { fmt.Println("error %v", err) } if (percpu && len(v) != numcpu) || (!percpu && len(v) != 1) { fmt.Println("wrong number of entries from CPUPercent: %v", v) } } duration := time.Duration(10) * time.Microsecond v, err := cpu.CPUPercent(duration, percpu) if err != nil { fmt.Println("error %v", err) } var res float64 res = 0.0 for _, percent := range v { if percent < 0.0 || percent > 100.0*float64(numcpu) { fmt.Println("CPUPercent value is invalid: %f", percent) res += percent } } fmt.Println(res) return res / float64(numcpu) }
func initCPUInfo(monitorData *monitoringData) error { info, err := cpu.CPUInfo() if info != nil { monitorData.CPUInfo = info monitorData.CPUModelName = monitorData.CPUInfo[0].ModelName t, err := cpu.CPUTimes(true) if t != nil { monitorData.CPUTime = t globalTime, err := cpu.CPUTimes(false) if globalTime != nil { monitorData.GlobalCPUTime = globalTime var percentWaitGroup sync.WaitGroup percentWaitGroup.Add(2) go func(monitorData *monitoringData, wg *sync.WaitGroup) { defer wg.Done() cpuPercent, _ := cpu.CPUPercent(monitorData.UpdatePeriod, true) monitorData.CPUPercent = cpuPercent }(monitorData, &percentWaitGroup) go func(monitorData *monitoringData, wg *sync.WaitGroup) { defer wg.Done() globalCpuPercent, _ := cpu.CPUPercent(monitorData.UpdatePeriod, false) monitorData.GlobalCPUPercent = globalCpuPercent[0] }(monitorData, &percentWaitGroup) percentWaitGroup.Wait() cpuCounts, err := cpu.CPUCounts(false) monitorData.CPUCounts = cpuCounts logicalCpuCounts, err := cpu.CPUCounts(false) monitorData.LogicalCPUCounts = logicalCpuCounts return err } return err } return err } return err }
func (monitor Monitor) MonitorCPU() float32 { cpu, err := cpu.CPUPercent(500*time.Millisecond, false) if err != nil { panic(err) } return float32(cpu[0]) }
func (node mgmNode) collectHostStatistics(out chan mgm.HostStat) { for { //start calculating network sent fInet, err := psnet.NetIOCounters(false) if err != nil { node.logger.Error("Error reading networking", err) } s := mgm.HostStat{} s.Running = true c, err := pscpu.CPUPercent(time.Second, true) if err != nil { node.logger.Error("Error readin CPU: ", err) } s.CPUPercent = c v, err := psmem.VirtualMemory() if err != nil { node.logger.Error("Error reading Memory", err) } s.MEMTotal = v.Total / 1000 s.MEMUsed = (v.Total - v.Available) / 1000 s.MEMPercent = v.UsedPercent lInet, err := psnet.NetIOCounters(false) if err != nil { node.logger.Error("Error reading networking", err) } s.NetSent = (lInet[0].BytesSent - fInet[0].BytesSent) s.NetRecv = (lInet[0].BytesRecv - fInet[0].BytesRecv) out <- s } }
func main() { cloud, err := cloud.NewDbusForComDevicehiveCloud() if err != nil { log.Panic(err) } h, _ := os.Hostname() for { time.Sleep(time.Second) c, err := cpu.CPUPercent(time.Second, false) if err != nil { log.Panic(err) } v, err := mem.VirtualMemory() if err != nil { log.Panic(err) } if len(c) > 0 { cloud.SendNotification("stats", map[string]interface{}{ "cpu-usage": c[0], "memory-total": v.Total, "memory-free": v.Free, "name": h, }, 1) } } }
func CPUPercent() float64 { percent, err := cpu.CPUPercent(time.Second*2, false) if err == nil && len(percent) > 0 { return math.Floor(percent[0]) } return 0 }
/** read node resource usage **/ func GetNodeResource(w http.ResponseWriter, r *http.Request) { // get this node memory memory, _ := mem.VirtualMemory() // get this node cpu percent usage cpu_percent, _ := cpu.CPUPercent(time.Duration(1)*time.Second, false) // Disk mount Point disk_partitions, _ := disk.DiskPartitions(true) // Disk usage var disk_usages []*disk.DiskUsageStat for _, disk_partition := range disk_partitions { if disk_partition.Mountpoint == "/" || disk_partition.Mountpoint == "/home" { disk_stat, _ := disk.DiskUsage(disk_partition.Device) disk_usages = append(disk_usages, disk_stat) } } // Network network, _ := net.NetIOCounters(false) // create new node obj with resource usage information node_metric := thoth.NodeMetric{ Cpu: cpu_percent, Memory: memory, DiskUsage: disk_usages, Network: network, } node_json, err := json.MarshalIndent(node_metric, "", "\t") if err != nil { fmt.Println("error:", err) } fmt.Fprint(w, string(node_json)) }
func getCpuUsage() (float64, error) { if res, err := cpu.CPUPercent(0, false); err == nil { return res[0], nil } return 0.0, nil }
func main() { cpuPercent, err := cpu.CPUPercent(time.Second*20, true) if err != nil { log.Panicln(err) } for _, cpu := range cpuPercent { log.Println(cpu) } }
func main() { //v, _ := mem.VirtualMemory() cpu_percent, _ := cpu.CPUPercent(time.Duration(1)*time.Second, false) // almost every return value is a struct // fmt.Printf("Total: %v, Free:%v, UsedPercent:%f%%\n", v.Total, v.Free, v.UsedPercent) fmt.Println(cpu_percent) // convert to JSON. String() is also implemented //fmt.Println(v) }
func (e *Sysmon) MonCPU() { for { // TODO: shirou/gopsutil is utilizing WMI querys // and supports no cpu load for each core on windows // ~ BUMMER cpuPerc, err := cpu.CPUPercent(1, false) if err == nil { e.CpuChannel <- cpuPerc } time.Sleep(time.Second) } }
func logPoll(n time.Duration, done chan bool) { flag := os.O_CREATE | os.O_WRONLY | os.O_APPEND fileLog, err := os.OpenFile(FileLog, flag, 0666) if err != nil { FatalLog.Fatalln("Failed to open log file:", err) } fileStats, err := os.OpenFile(FileStats, flag, 0666) if err != nil { FatalLog.Fatalln("Failed to open stats file:", err) } outLog := io.MultiWriter(fileLog, os.Stdout) StatCollect := &statCollector{w: fileStats} InfoCounter := NewLogCounter(outLog) WarnCounter := NewLogCounter(outLog) ErrCounter := NewLogCounter(outLog) CritCounter := NewLogCounter(outLog) FatalCounter := NewLogCounter(outLog) makeLogHandlers(StatCollect, InfoCounter, WarnCounter, ErrCounter, CritCounter, FatalCounter) done <- true if !DevEnv { InfoLog.Println("Now logging stats with", PollCapacity, "records before flushing to disk, every", PollTime, "| Logging every", IntervalPoll) } var memStats runtime.MemStats var stat [PollCapacity]stats var pollCount int var cpuPercent Percent for range time.Tick(n) { s := &stat[pollCount] runtime.ReadMemStats(&memStats) vMem, _ := mem.VirtualMemory() if c, _ := cpu.CPUPercent(n, false); len(c) != 0 { cpuPercent = Percent(c[0] * 100) } s.CPUUsed = cpuPercent s.MemUsed = memStats.Alloc / KILOBYTE s.Memfree = 100 - Percent(vMem.UsedPercent) StatsLog.Println("Mem free:", s.Memfree, "% | Mem used:", s.MemUsed, "KB | CPU:", s.CPUUsed, "%") if pollCount == PollCapacity-1 { //fmt.Println(StatCollect.b.String()) StatCollect.Flush() pollCount = -1 } pollCount++ } }
func loop(input <-chan common.Command) { for c := range input { switch c.Cmd { case "get-usage-self": usage, err := cpu.CPUPercent(time.Duration(1)*time.Second, false) if err != nil { out <- Event{ Type: Error, Data: fmt.Sprintf("unable to get CPU usage: %s", err.Error()), } } else { out <- Event{ Type: SelfUsageReport, Data: usage[0], } } case "get-usage": usage, err := cpu.CPUPercent(time.Duration(1)*time.Second, false) if err != nil { out <- Event{ Type: Error, Data: map[string]string{ "peer": c.Args["peer"], "error": fmt.Sprintf("unable to get CPU usage: %s", err.Error()), }, } } else { out <- Event{ Type: UsageReport, Data: map[string]string{ "peer": c.Args["peer"], "usage": fmt.Sprintf("%f", usage[0]), }, } } default: } } }
// 系统状态信息 func handleSystem(resp http.ResponseWriter, req *http.Request) { mem, _ := mem.VirtualMemory() cpuNum, _ := cpu.CPUCounts(true) cpuInfo, _ := cpu.CPUPercent(10*time.Microsecond, true) data := make(map[string]interface{}) data["im.conn"] = len(ClientMaps) data["mem.total"] = fmt.Sprintf("%vMB", mem.Total/1024/1024) data["mem.free"] = fmt.Sprintf("%vMB", mem.Free/1024/1024) data["mem.used_percent"] = fmt.Sprintf("%s%%", strconv.FormatFloat(mem.UsedPercent, 'f', 2, 64)) data["cpu.num"] = cpuNum data["cpu.info"] = cpuInfo resp.Write(common.NewIMResponseData(data, "").Encode()) }
func (s *Server) getLoad() *peerLoad { v, _ := mem.VirtualMemory() musage := v.UsedPercent c, _ := cpu.CPUPercent(time.Second, false) cusage := c[0] * 100 s.RLock() clientNum := s.ClientNum s.RUnlock() return &peerLoad{ ClientNum: clientNum, CpuUsage: cusage, MemUsage: musage, } }
func stats(w http.ResponseWriter, r *http.Request) { memory, _ := mem.VirtualMemory() c, _ := cpu.CPUPercent(time.Second, true) cpu := CPUStats{UsedPercentPerCore: c} stats := SystemStats{ MemoryStats: memory, CPUStats: cpu, } js, err := json.Marshal(stats) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(js) }
func GetCPUStat() MultipleStat { perCpu := true usagePercentagePerCore, _ := cpu.CPUPercent(400*time.Millisecond, perCpu) return BuildCPUMultipleStat(usagePercentagePerCore) }
func init() { totime := func(msec int64) time.Duration { return time.Millisecond * time.Duration(msec) } vm.Set("press", func(call otto.FunctionCall) otto.Value { x, _ := call.Argument(0).ToInteger() y, _ := call.Argument(1).ToInteger() airinput.Press(int(x), int(y)) return otto.UndefinedValue() }) vm.Set("move", func(call otto.FunctionCall) otto.Value { x, _ := call.Argument(0).ToInteger() y, _ := call.Argument(1).ToInteger() airinput.Move(int(x), int(y)) return otto.UndefinedValue() }) vm.Set("release", func(call otto.FunctionCall) otto.Value { airinput.Release() return otto.UndefinedValue() }) vm.Set("tap", func(call otto.FunctionCall) otto.Value { x, _ := call.Argument(0).ToInteger() y, _ := call.Argument(1).ToInteger() msec, _ := call.Argument(2).ToInteger() airinput.Tap(int(x), int(y), totime(msec)) return otto.UndefinedValue() }) vm.Set("drag", func(call otto.FunctionCall) otto.Value { x0, _ := call.Argument(0).ToInteger() y0, _ := call.Argument(1).ToInteger() x1, _ := call.Argument(2).ToInteger() y1, _ := call.Argument(3).ToInteger() steps, _ := call.Argument(4).ToInteger() msec, _ := call.Argument(5).ToInteger() airinput.Drag(int(x0), int(y0), int(x1), int(y1), int(steps), totime(msec)) return otto.UndefinedValue() }) vm.Set("sleep", func(call otto.FunctionCall) otto.Value { x0, _ := call.Argument(0).ToInteger() time.Sleep(time.Millisecond * time.Duration(x0)) return otto.UndefinedValue() }) vm.Set("pinch", func(call otto.FunctionCall) otto.Value { ax0, _ := call.Argument(0).ToInteger() ay0, _ := call.Argument(1).ToInteger() ax1, _ := call.Argument(2).ToInteger() ay1, _ := call.Argument(3).ToInteger() bx0, _ := call.Argument(4).ToInteger() by0, _ := call.Argument(5).ToInteger() bx1, _ := call.Argument(6).ToInteger() by1, _ := call.Argument(7).ToInteger() steps, _ := call.Argument(8).ToInteger() msec, _ := call.Argument(9).ToInteger() airinput.Pinch( int(ax0), int(ay0), int(ax1), int(ay1), int(bx0), int(by0), int(bx1), int(by1), int(steps), totime(msec)) return otto.UndefinedValue() }) vm.Set("exec", func(call otto.FunctionCall) otto.Value { if len(call.ArgumentList) == 0 { return otto.UndefinedValue() } params := []string{} for _, p := range call.ArgumentList[1:] { params = append(params, p.String()) } cmd := exec.Command(call.Argument(0).String(), params...) data, err := cmd.CombinedOutput() if err != nil { log.Println("jsrun error", err) } result, _ := otto.ToValue(string(data)) return result }) vm.Set("cpuPercent", func(call otto.FunctionCall) otto.Value { msec, _ := call.Argument(0).ToInteger() percpu, _ := call.Argument(1).ToBoolean() cpup, _ := cpu.CPUPercent(time.Duration(msec)*time.Millisecond, percpu) result, _ := vm.ToValue(cpup) return result }) }
func (st *Stat) stat(t string) string { checkErr := func(err error) string { return "系统酱正在食用作死药丸中..." } switch t { case "free": m, err := mem.VirtualMemory() checkErr(err) s, err := mem.SwapMemory() checkErr(err) mem := new(runtime.MemStats) runtime.ReadMemStats(mem) return fmt.Sprintf( "全局:\n"+ "Total: %s Free: %s\nUsed: %s %s%%\nCache: %s\n"+ "Swap:\nTotal: %s Free: %s\n Used: %s %s%%\n"+ "群组娘:\n"+ "Allocated: %s\nTotal Allocated: %s\nSystem: %s\n", helper.HumanByte(m.Total, m.Free, m.Used, m.UsedPercent, m.Cached, s.Total, s.Free, s.Used, s.UsedPercent, mem.Alloc, mem.TotalAlloc, mem.Sys)..., ) case "df": fs, err := disk.DiskPartitions(false) checkErr(err) var buf bytes.Buffer for k := range fs { du, err := disk.DiskUsage(fs[k].Mountpoint) switch { case err != nil, du.UsedPercent == 0, du.Free == 0: continue } f := fmt.Sprintf("Mountpoint: %s Type: %s \n"+ "Total: %s Free: %s \nUsed: %s %s%%\n", helper.HumanByte(fs[k].Mountpoint, fs[k].Fstype, du.Total, du.Free, du.Used, du.UsedPercent)..., ) buf.WriteString(f) } return buf.String() case "os": h, err := host.HostInfo() checkErr(err) uptime := time.Duration(time.Now().Unix()-int64(h.Uptime)) * time.Second l, err := load.LoadAvg() checkErr(err) c, err := cpu.CPUPercent(time.Second*3, false) checkErr(err) return fmt.Sprintf( "OSRelease: %s\nHostName: %s\nUptime: %s\nLoadAvg: %.2f %.2f %.2f\n"+ "Goroutine: %d\nCPU: %.2f%%", h.Platform, h.Hostname, uptime.String(), l.Load1, l.Load5, l.Load15, runtime.NumGoroutine(), c[0], ) case "redis": info := conf.Redis.Info().Val() if info != "" { infos := strings.Split(info, "\r\n") infoMap := make(map[string]string) for k := range infos { line := strings.Split(infos[k], ":") if len(line) > 1 { infoMap[line[0]] = line[1] } } DBSize := conf.Redis.DbSize().Val() return fmt.Sprintf("Redis Version: %s\nOS: %s\nUsed Memory: %s\n"+ "Used Memory Peak: %s\nDB Size: %d\n", infoMap["redis_version"], infoMap["os"], infoMap["used_memory_human"], infoMap["used_memory_peak_human"], DBSize) } return "" default: return "欢迎来到未知领域(ゝ∀・)" } }