func main() { defer profile.Start(&profile.Config{CPUProfile: true, ProfilePath: "/tmp/"}).Stop() // nCPU := runtime.NumCPU() nCPU := 2 runtime.GOMAXPROCS(nCPU) log.Printf("running on %d CPUs", nCPU) server := network.NewServer("8081") server.Start() world = NewWorld(entityManager, server) go world.worldTick() go world.networkTick() // wait for signal signalChan := make(chan os.Signal, 1) cleanupDone := make(chan bool) signal.Notify(signalChan, os.Interrupt) go func() { for _ = range signalChan { log.Printf("Received an interrupt, stopping services...\n") server.Stop() cleanupDone <- true } }() <-cleanupDone }
func main() { password := flag.String("password", "", "Password for all redis instances") db := flag.Int("db", 0, "DB number") socket := flag.String("socket", "/tmp/redis-monitor.sock", "Socket to provide metrics over") flag.Parse() if len(flag.Args()) < 1 { flag.Usage() os.Exit(1) } s := &http.Server{ Handler: &Addresses{ Addr: flag.Args(), Password: *password, DB: int64(*db), }, } l, err := util.CreateSocket(*socket) go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }() defer profile.Start(profile.MemProfile).Stop() if err != nil { panic(err) } if err := s.Serve(l); err != nil { panic(err) } }
func start(ctx *cli.Context) { if ctx.Bool("profile") { pcfg := profile.Config{ CPUProfile: true, MemProfile: true, BlockProfile: true, ProfilePath: ".", } p := profile.Start(&pcfg) defer p.Stop() } initLogrus(ctx) log.Info("Starting fullerite...") c, err := config.ReadConfig(ctx.String("config")) if err != nil { return } collectors := startCollectors(c) handlers := startHandlers(c) metrics := make(chan metric.Metric) readFromCollectors(collectors, metrics) for metric := range metrics { // Writing to handlers' channels. Sending metrics is // handled asynchronously in handlers' Run functions. writeToHandlers(handlers, metric) } }
func main() { cfg := profile.Config{ MemProfile: true, CPUProfile: true, BlockProfile: true, ProfilePath: ".", NoShutdownHook: true, } defer profile.Start(&cfg).Stop() c := make(chan int) go func() { //http.ListenAndServe("0.0.0.0:6060", nil) }() //<-c for i := 0; i < 1000; i++ { go func(i int) { //fmt.Println(test()) test() c <- i }(i) } //<-c //for http for i := 0; i < 1000; i++ { <-c } //println(http.ListenAndServe("localhost:6062", nil)) }
func main() { defer profile.Start(profile.CPUProfile).Stop() flag.Parse() dm := docmap.NewDocMap() err := dm.DocReader() if err != nil { println(err) return } qm := querrymap.NewQuerryMap() qm.QuerryReader() im := invertmap.NewInvertMap() err = im.DocMToInM(dm) if err != nil { println(err) } if *startQuerry { qproc.QuerriesProc(dm, qm, im) } else { println("No webservice") } }
func main() { // Set up a done channel, that's shared by the whole pipeline. // Closing this channel will kill all pipeline goroutines //done := make(chan struct{}) //defer close(done) // Set up logging initializeLogging() // Flush the log before we shutdown defer log.Flush() // Parse the command line flags config := parseCommandLineFlags() gamq.SetConfig(&config) if config.ProfilingEnabled { defer profile.Start(profile.CPUProfile).Stop() } log.Infof("Broker started on port: %d", gamq.Configuration.Port) log.Infof("Executing on: %d threads", runtime.GOMAXPROCS(-1)) connectionManager := gamq.NewConnectionManager() connectionManager.Start() }
func TestPerft(t *testing.T) { t.SkipNow() cfg := profile.Config{ CPUProfile: false, MemProfile: false, ProfilePath: ".", // store profiles in current directory NoShutdownHook: true, // do not hook SIGINT } p := profile.Start(&cfg) //t.SkipNow() loadPerft(t) for _, depth := range depths { setupBoard() res := perft(depth, t, g) excepted := sharperPerftLookup(depth, t) if res == excepted { t.Log("okPerft("+strconv.Itoa(depth)+") Result:", res, " Expected:", excepted) } else { t.Log("X Perft("+strconv.Itoa(depth)+") Result:", res, " Expected:", excepted) t.Fail() } } p.Stop() }
func start(ctx *cli.Context) { if ctx.Bool("profile") { pcfg := profile.Config{ CPUProfile: true, MemProfile: true, BlockProfile: true, ProfilePath: ".", } p := profile.Start(&pcfg) defer p.Stop() } initLogrus(ctx) log.Info("Starting fullerite...") c, err := config.ReadConfig(ctx.String("config")) if err != nil { return } collectors := startCollectors(c) handlers := startHandlers(c) internalServer := internalserver.New(c, &handlers) go internalServer.Run() metrics := make(chan metric.Metric) readFromCollectors(collectors, metrics) hook := NewLogErrorHook(metrics) log.Logger.Hooks.Add(hook) relayMetricsToHandlers(handlers, metrics) }
func main() { port := flag.Int("port", 8080, "port") backends := flag.Int("workers", 3, "number of workers") strategy := flag.String("strategy", "majority", "balancing strategy ['one', 'two', 'majority', 'all']") flag.Parse() cfg := profile.Config{ CPUProfile: true, MemProfile: true, ProfilePath: ".", } p := profile.Start(&cfg) defer p.Stop() balancer := newBalancer(backends, strategy) a := gin.Default() a.GET("/", func(c *gin.Context) { timeouted := make(chan bool) result := processFirstResponse(timeouted, balancer) select { case data := <-result: c.JSON(200, data) case <-time.After(globalTimeout): c.JSON(500, nil) timeouted <- true } }) a.Run(fmt.Sprintf(":%d", *port)) }
func main() { /* # # # # # ENABLING PROFILING # # # # # */ defer profile.Start(profile.CPUProfile).Stop() /* profileConfig := profile.Config{ CPUProfile: true, MemProfile: true, ProfilePath: ".", // store profiles in current directory NoShutdownHook: true, // do not hook SIGINT } p := profile.Start(&profileConfig) defer p.Stop() */ /* # # # # # ENABLING PROFILING # # # # # */ ts.Cache("index.html", "home.html", "404.html") mux := MuxInstance() mux.Handle("GET", "/index", index) mux.Handle("GET", "/home", home) mux.Handle("GET", "/404", err404) mux.Handle("GET", "/user", user) mux.Handle("GET", "/user/add", userAdd) mux.Handle("GET", "/user/:id", userId) mux.Handle("GET", "/:slug", landing) mux.Handle("GET", "/login/:slug", multiLogin) mux.Handle("GET", "/logout/:slug", logout) mux.Handle("GET", "/protected/:slug", protected) http.ListenAndServe(":8080", mux) }
func main() { address := flag.String("s", "127.0.0.1", "Bind Address") port := flag.String("p", "11211", "Bind Port") filename := flag.String("f", "./memcached.db", "path and file for database") pf := flag.Bool("q", false, "Enable profiling") flag.Usage = func() { fmt.Println("Usage: beano [-s ip] [-p port] [-f /path/to/db/file -q]") fmt.Println("default ip: 127.0.0.1") fmt.Println("default port: 11211") fmt.Println("default file: ./memcached.db") fmt.Println("-q enables profiling to /tmp/*.prof") os.Exit(1) } flag.Parse() if *pf == true { c := profile.Config{BlockProfile: true, CPUProfile: true, ProfilePath: "/tmp", MemProfile: true, Quiet: false} defer profile.Start(&c).Stop() } var cpuinfo string if n := runtime.NumCPU(); n > 1 { runtime.GOMAXPROCS(n) cpuinfo = fmt.Sprintf("%d CPUs", n) } else { cpuinfo = "1 CPU" } log.Info("beano (%s)", cpuinfo) initializeMetrics(*filename) serve(*address, *port, *filename) }
func main() { fmt.Println("Server Starting on port:8080") defer profile.Start(profile.CPUProfile).Stop() pc := controller.NewProjectController(getDbSession()) uc := controller.NewUserController(getDbSession()) sc := controller.NewSensorController(getDbSession()) vc := controller.NewVirtSensorController(getDbSession()) router := httprouter.New() //Project Management Routings router.Handle("GET", "/projects", pc.Project) router.Handle("POST", "/addproject", pc.AddProject) router.Handle("GET", "/projects/:projectName/selectsensors", pc.SelectSensors) router.Handle("POST", "/projects/:projectName/addvirtualsensors", pc.AddSensorToProject) router.Handle("GET", "/projects/:projectName/dashboard", pc.Dashboard) router.Handle("GET", "/projects/:projectName/terminate", pc.TerminateProject) /*router.Handle("GET", "/sendvirtualsensordetails/", handle) router.Handle("POST", "/projects/:projectName/addvirtualsensors", handle) router.Handle("GET", "/projects/:projectName/startvirtsensor/:vsensorid", handle) router.Handle("GET", "/projects/:projectName/stopvirtsensor/:vsensorid", handle) router.Handle("GET", "/projects/:projectName/terminatevirtsensor/:vsensorid", handle)*/ // User Management Routings router.Handle("GET", "/", uc.Index) router.Handle("GET", "/logout", uc.Logout) router.Handle("POST", "/users/login", uc.Login) router.Handle("POST", "/users/signup", uc.Signup) router.Handle("PUT", "/users/updateuser/:username", uc.UpdateUser) router.Handle("DELETE", "/users/deleteuser/:username", uc.DeleteUser) router.Handle("GET", "/time", uc.Datahandler) router.Handle("GET", "/gettime", uc.HandlerGetData) //Physical Sensor Management Routings router.Handle("POST", "/sensors", sc.AddSensor) router.Handle("PUT", "/sensors/updatesensor/:sensorName", sc.UpdateSensor) router.Handle("DELETE", "/sensors/deletesensor/:sensorName", sc.DeleteSensor) router.Handle("GET", "/sensors/getsensor/:sensorName", sc.GetSensorByName) router.Handle("POST", "/authenticatesenor", sc.AuthenticateSensor) router.Handle("GET", "/getsensorbyowner/:ownerId", sc.GetSensorByOwner) router.Handle("GET", "/getallsharedsensors", sc.GetAllSharedSensors) //router.Handle("POST", "/getsensordata",sc.GetSensorData) //router.Handle("PUT", "/sensors/togglesensor/:sensorName", ToggelSensor) /*router.Handle("GET", "/getsensorbyowner/:ownerId", GetSensorById)*/ // Virtual Sensor Management Routings router.Handle("POST", "/virtsensors", vc.AddVirtSensor) router.Handle("PUT", "/removevirtsensors/:vSensorName", vc.RemoveVirtSensor) router.Handle("GET", "/stopvirtsensors/:vSensorName", vc.StopVirtSensor) router.Handle("GET", "/resumevirtsensors/:vSensorName", vc.ResumeVirtSensor) router.Handle(("GET"), "/projects/:projectName/sensors/:vSensorName", vc.GetVSensorDetails) //fileServer := http.StripPrefix("/static/", http.FileServer(http.Dir("static"))) router.NotFound = http.StripPrefix("/static/", http.FileServer(http.Dir("static"))) log.Fatal(http.ListenAndServe(":8081", router)) }
func main() { port := flag.Int("port", 8080, "port") backends := flag.String("workers", "", "knonw workers (ex: 'localhost:8081,localhost:8082')") strategy := flag.String("strategy", "majority", "balancing strategy ['one', 'two', 'majority', 'all']") poolSize := flag.Int("pool", 3, "size of the pool of available worker sets") flag.Parse() cfg := profile.Config{ CPUProfile: true, MemProfile: true, ProfilePath: ".", } p := profile.Start(&cfg) defer p.Stop() proxy := Proxy{NewBalancer(initListOfBckends(backends), strategy, poolSize)} server := gin.Default() server.GET("/", func(c *gin.Context) { pipes := &Pipes{ Done: make(chan struct{}), Result: make(chan *DataFormat), } go proxy.ProcessFirstResponse(pipes) defer close(pipes.Done) select { case data := <-pipes.Result: c.JSON(200, data) case <-time.After(globalTimeout): c.JSON(500, nil) } }) go func() { admin := gin.Default() admin.POST("/worker/*endpoint", func(c *gin.Context) { worker := c.Param("endpoint") done := make(chan struct{}) go proxy.AddBackend(fmt.Sprintf("http:/%s/", worker), done) select { case <-done: c.String(200, "") case <-time.After(globalTimeout): c.String(500, "") close(done) } }) admin.Run(fmt.Sprintf(":%d", *port-10)) }() server.Run(fmt.Sprintf(":%d", *port)) }
func main() { profileKind := os.Args[1] switch profileKind { case "cpu": defer profile.Start(profile.CPUProfile).Stop() case "mem": defer profile.Start(profile.MemProfile).Stop() case "block": defer profile.Start(profile.BlockProfile).Stop() default: fmt.Println("only cpu, mem and block are valid profile arguments") return } for i := 0; i < 1000000; i++ { user := &User{ Name: "Emil Sjölander", Repos: []Repo{ Repo{ Name: "goson", URL: "https://github.com/emilsjolander/goson", Stars: 0, Forks: 0, }, Repo{ Name: "StickyListHeaders", URL: "https://github.com/emilsjolander/StickyListHeaders", Stars: 722, Forks: 197, }, Repo{ Name: "android-FlipView", URL: "https://github.com/emilsjolander/android-FlipView", Stars: 157, Forks: 47, }, }, } goson.Render("user", goson.Args{"User": user}) } }
func main() { defer profile.Start(profile.CPUProfile).Stop() flag.Parse() for _, path := range flag.Args() { err := flac2wav(path) if err != nil { log.Fatalln(err) } } }
func initProfiler() { cfg := profile.Config{ MemProfile: true, CPUProfile: true, ProfilePath: ".", // store profiles in current directory } // p.Stop() must be called before the program exits to // ensure profiling information is written to disk. profiler = profile.Start(&cfg) }
func main() { defer profile.Start(profile.CPUProfile).Stop() if cmdTarget == "" { flag.PrintDefaults() return } s, err := gosnmp.NewGoSNMP(cmdTarget, cmdCommunity, gosnmp.Version2c, cmdTimeout) s.SetVerbose(true) if err != nil { fmt.Printf("UNKNOWN: Error creating SNMP instance: %s\n", err.Error()) os.Exit(utils.UNKNOWN) } const totalSwapOid string = ".1.3.6.1.4.1.2021.4.3.0" const availSwapOid string = ".1.3.6.1.4.1.2021.4.4.0" resp, err := s.Get(totalSwapOid) if err != nil { fmt.Printf("UNKNOWN: Error getting response: %s\n", err.Error()) os.Exit(utils.UNKNOWN) } else { totalSwap = resp.Variables[0].Value.(int) fmt.Printf("total swap [%d]\n", totalSwap) } availResp, err := s.Get(availSwapOid) if err != nil { fmt.Printf("UNKNOWN: Error getting response: %s\n", err.Error()) os.Exit(utils.UNKNOWN) } else { availSwap = availResp.Variables[0].Value.(int) fmt.Printf("availSwap [%d]\n", availSwap) } if availSwap != totalSwap { usedSwap = int(100 - int(100*float64(float64(availSwap)/float64(totalSwap)))) fmt.Printf("Used swap = %d\n", usedSwap) if usedSwap >= swapLimit { fmt.Printf("CRITICAL - SWAP Available [%d] Total [%d] In Use [%d%%]", availSwap, totalSwap, usedSwap) os.Exit(utils.CRITICAL) } } fmt.Printf("OK - SWAP Available [%d] Total [%d] In Use [%d%%]", availSwap, totalSwap, usedSwap) os.Exit(utils.OK) }
func main() { defer profile.Start(profile.CPUProfile).Stop() c := make(chan int, 1) c <- 1 fmt.Println(<-c) c <- 2 fmt.Println(<-c) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU() - 2) gopath := os.Getenv("GOPATH") path := filepath.Join(gopath, "prof", "github.com", "reggo", "reggo", "nnet") nInputs := 10 nOutputs := 3 nLayers := 2 nNeurons := 50 nSamples := 1000000 nRuns := 50 config := &profile.Config{ CPUProfile: true, ProfilePath: path, } defer profile.Start(config).Stop() net, err := nnet.NewSimpleTrainer(nInputs, nOutputs, nLayers, nNeurons, nnet.Linear{}) if err != nil { log.Fatal(err) } // Generate some random data inputs := mat64.NewDense(nSamples, nInputs, nil) outputs := mat64.NewDense(nSamples, nOutputs, nil) for i := 0; i < nSamples; i++ { for j := 0; j < nInputs; j++ { inputs.Set(i, j, rand.Float64()) } for j := 0; j < nOutputs; j++ { outputs.Set(i, j, rand.Float64()) } } // Create trainer prob := train.NewBatchGradBased(net, true, inputs, outputs, nil, nil, nil) nParameters := net.NumParameters() parameters := make([]float64, nParameters) derivative := make([]float64, nParameters) for i := 0; i < nRuns; i++ { net.RandomizeParameters() net.Parameters(parameters) prob.ObjGrad(parameters, derivative) fmt.Println(floats.Sum(derivative)) } }
func main() { for _, arg := range os.Args { if arg == "--debug" { util.DEBUG = true config := &profile.Config{ MemProfile: true, CPUProfile: true, } defer profile.Start(config).Stop() } } client(os.Args) }
func main() { defer profile.Start(profile.CPUProfile).Stop() start := time.Now() args := parseArgs() bc, err := clock.NewBallClock(args.NumBalls, args.Duration) if err != nil { log.Printf("Unable to create the Ball Clock. Error : %s", err.Error()) } result := bc.RunClock() fmt.Println(result) end := time.Now() log.Printf("Elapsed time : %s", end.Sub(start).String()) }
// main is the entry point for the application. func main() { cfg := profile.Config{ MemProfile: true, CPUProfile: true, ProfilePath: ".", // store profiles in current directory NoShutdownHook: true, // do not hook SIGINT } // p.Stop() must be called before the program exits to // ensure profiling information is written to disk. p := profile.Start(&cfg) defer p.Stop() }
// http://saml.rilspace.org/profiling-and-creating-call-graphs-for-go-programs-with-go-tool-pprof#rest func initProfile() { if !SvcConfig.Prof.GetCPU() || !SvcConfig.Prof.GetMem() || !SvcConfig.Prof.GetBlock() { return } cfg := &profile.Config{ CPUProfile: SvcConfig.Prof.GetCPU(), MemProfile: SvcConfig.Prof.GetMem(), BlockProfile: SvcConfig.Prof.GetBlock(), } profiling = profile.Start(cfg) }
func BenchmarkLineParser(b *testing.B) { defer profile.Start(profile.MemProfile).Stop() p, err := filepath.Abs("../../_fixtures/testnextprog") if err != nil { b.Fatal(err) } data := grabDebugLineSection(p, nil) b.ResetTimer() for i := 0; i < b.N; i++ { _ = Parse(data) } }
func main() { cfg := profile.Config{ CPUProfile: true, MemProfile: true, ProfilePath: ".", // store profiles in current directory NoShutdownHook: true, // do not hook SIGINT } defer profile.Start(&cfg).Stop() outFile := flag.String("output", "result.out", "Output file") flag.Parse() sortFiles(*outFile, flag.Args()...) glog.V(1).Info("ALL DONE.") }
func main() { meth, ok := methods[os.Args[1]] if !ok { return } n, err := strconv.Atoi(os.Args[2]) if err != nil { return } defer profile.Start(profile.CPUProfile).Stop() get := meth(n) for i := 0; i < 1e10; i++ { _ = get() //fmt.Println(get()) } }
func main() { x := op.NewStream(1 << 30) it := x.Iterate() defer profile.Start(&profile.Config{ CPUProfile: true, ProfilePath: ".", }).Stop() start := time.Now() for i := 0; i < 1<<20; i += 1 { *it.Translate() = op.Translate{2, 2} Rect{10, 10, 20, 20}.Render(it) } fmt.Println(time.Since(start)) fmt.Println("Last ", it.Head) start = time.Now() it = x.Iterate() z := int32(0) RENDER: for { switch it.Type() { case op.EOF: break RENDER case op.TypeStart: it.Start() case op.TypeClose: it.Close() case op.TypeMoveTo: z += it.MoveTo().X case op.TypeLineTo: z += it.LineTo().X case op.TypeTranslate: z += it.Translate().Dx case op.TypeFill: it.Fill() case op.TypeStroke: it.Stroke() default: panic("unhandled type") } } fmt.Println(time.Since(start)) fmt.Println("Last ", it.Head, z) }
func main() { flagAddr := flag.String("addr", ":6060", "Listening address") flagCorpus := flag.String("corpus", "", "Corpus file in JSON format") flagStates := flag.Int("states", 2, "Number of hidden states") flagIter := flag.Int("iter", 20, "Number of EM iterations") flagModel := flag.String("model", "", "Model file in JSON format") flagLL := flag.String("logl", "", "Log-likelihood file") flagPProf := flag.Bool("pprof", false, "Output pprof file") flagParallel := flag.Bool("parallel", true, "Run multi-threading") flag.Parse() go func() { log.Println(http.ListenAndServe(*flagAddr, nil)) }() var corpus []*core.Instance if f, e := os.Open(*flagCorpus); e != nil { log.Fatalf("Cannot open %s: %v", *flagCorpus, e) } else { defer f.Close() corpus = loader.LoadJSON(f) // Infer unexported fileds of Instance. for i, _ := range corpus { corpus[i].Index() } } C := core.EstimateC(corpus) baseline := core.Init(*flagStates, C, corpus, rand.New(rand.NewSource(99))) f := core.CreateFileOrStdout(*flagLL) if f != os.Stdout { defer f.Close() } if *flagPProf { defer profile.Start(profile.CPUProfile).Stop() } if *flagParallel { runtime.GOMAXPROCS(runtime.NumCPU()) } model := core.Train(corpus, *flagStates, C, *flagIter, baseline, f) core.SaveModel(model, *flagModel) }
func main() { defer profile.Start(profile.CPUProfile).Stop() f, _ := os.Open("big-data") r := bufio.NewReaderSize(f, batch) var counts []float64 for { provision(r) // batch reads bytes, err := r.ReadBytes('\n') if err != nil { // EOF fmt.Println(counts) return } line := unsafeString(bytes) counts = sum(line[:len(line)-1], counts) } }
func BenchmarkParse(b *testing.B) { defer profile.Start(profile.CPUProfile).Stop() f, err := os.Open("testdata/frame") if err != nil { b.Fatal(err) } defer f.Close() data, err := ioutil.ReadAll(f) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { frame.Parse(data) } }