Пример #1
0
func TestGoroutineParallelism(t *testing.T) {
	P := 4
	N := 10
	if testing.Short() {
		P = 3
		N = 3
	}
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
	// If runtime triggers a forced GC during this test then it will deadlock,
	// since the goroutines can't be stopped/preempted.
	// Disable GC for this test (see issue #10958).
	defer debug.SetGCPercent(debug.SetGCPercent(-1))
	for try := 0; try < N; try++ {
		done := make(chan bool)
		x := uint32(0)
		for p := 0; p < P; p++ {
			// Test that all P goroutines are scheduled at the same time
			go func(p int) {
				for i := 0; i < 3; i++ {
					expected := uint32(P*i + p)
					for atomic.LoadUint32(&x) != expected {
					}
					atomic.StoreUint32(&x, expected+1)
				}
				done <- true
			}(p)
		}
		for p := 0; p < P; p++ {
			<-done
		}
	}
}
Пример #2
0
func main() {
	// 禁用GC,并保证在main函数执行结束前恢复GC
	defer debug.SetGCPercent(debug.SetGCPercent(-1))
	var count int32
	newFunc := func() interface{} {
		return atomic.AddInt32(&count, 1)
	}
	pool := sync.Pool{New: newFunc}

	// New 字段值的作用
	v1 := pool.Get()
	fmt.Printf("v1: %v\n", v1)

	// 临时对象池的存取
	pool.Put(newFunc())
	pool.Put(newFunc())
	pool.Put(newFunc())
	v2 := pool.Get()
	fmt.Printf("v2: %v\n", v2)

	// 垃圾回收对临时对象池的影响
	debug.SetGCPercent(100)
	runtime.GC()
	v3 := pool.Get()
	fmt.Printf("v3: %v\n", v3)
	pool.New = nil
	v4 := pool.Get()
	fmt.Printf("v4: %v\n", v4)
}
Пример #3
0
func (t *MSTree) LoadTxt(filename string, limit int) error {
	f, err := os.Open(filename)
	if err != nil {
		return err
	}
	defer f.Close()

	// Turn GC off
	prevGC := debug.SetGCPercent(-1)
	// Defer to turn GC back on
	defer debug.SetGCPercent(prevGC)

	scanner := bufio.NewScanner(f)
	count := 0
	for scanner.Scan() {
		line := strings.TrimRight(scanner.Text(), "\n")
		t.AddNoSync(line)
		count++
		if count%1000000 == 0 {
			log.Info("Reindexed %d items", count)
		}
		if limit != -1 && count == limit {
			break
		}
	}
	log.Info("Reindexed %d items", count)
	err = t.DumpIndex()
	if err != nil {
		return err
	}
	return nil
}
Пример #4
0
func mine(numProcs int) {
	runtime.GOMAXPROCS(numProcs)

	updateWork()
	updateLastBlock()

	debug.SetGCPercent(-1)

	log.Println("using", numProcs, "processes")

	switch {
	case cpuid.AVX2:
		log.Println("using AVX2 optimisations")
	case cpuid.AVX:
		log.Println("using AVX optimisations")
	case cpuid.SSSE3:
		log.Println("using SSSE3 optimisations")
	case cpuid.ArmSha:
		log.Println("using ARMSHA optimisations")
	default:
		log.Println("your CPU isn't supported for optimised mining")
		log.Println("please use v1.1 or get a new CPU.")
		os.Exit(1)
	}

	for proc := 0; proc < numProcs; proc++ {
		// decide on miner and execute
		switch {
		case cpuid.AVX2:
			go mineAVX2()
		case cpuid.AVX:
			go mineAVX()
		case cpuid.SSSE3:
			go mineSSSE3()
		case cpuid.ArmSha:
			go mineARM()
		}
	}

	log.Println("mining for address " + address + "...")

	previousTime := time.Now()
	for {
		for i := 0; i < 10; i++ {
			time.Sleep(time.Second * 5)

			log.Printf("%.2f MH/s\n", float64(hashesThisPeriod)/
				time.Now().Sub(previousTime).Seconds())

			previousTime = time.Now()
			hashesThisPeriod = 0

			updateWork()
			updateLastBlock()
		}

		debug.SetGCPercent(10)
		debug.SetGCPercent(-1)
	}
}
Пример #5
0
func monitor() {
	c := time.Tick(1 * time.Second)
	mem := new(runtime.MemStats)
	origPct := debug.SetGCPercent(100)
	debug.SetGCPercent(origPct)
	for _ = range c {
		runtime.ReadMemStats(mem)
		mu.Lock()
		defer mu.Unlock()
		if tSize < 0 {
			continue
		}
		// Occupancy fraction: 70%. Don't GC before hitting this.
		softLimit := float64(tSize) * 0.7
		pct := softLimit / float64(mem.Alloc) * 100
		fmt.Printf("gctune: pct: %0.5f, target: %d, softLimit: %0.2f, Alloc: %d, Sys: %d\n", pct, tSize, softLimit, mem.Alloc, mem.Sys)
		if pct < 50 {
			// If this is too low, GC frequency increases too much.
			pct = 50
		}
		debug.SetGCPercent(int(pct))
		if mem.Sys > uint64(tSize*70/100) {
			fmt.Println("freeing")
			debug.FreeOSMemory()
		}
	}
}
Пример #6
0
func TestPool(t *testing.T) {
	// disable GC so we can control when it happens.
	defer debug.SetGCPercent(debug.SetGCPercent(-1))
	var p Pool
	if p.Get() != nil {
		t.Fatal("expected empty")
	}
	p.Put("a")
	p.Put("b")
	if g := p.Get(); g != "b" {
		t.Fatalf("got %#v; want b", g)
	}
	if g := p.Get(); g != "a" {
		t.Fatalf("got %#v; want a", g)
	}
	if g := p.Get(); g != nil {
		t.Fatalf("got %#v; want nil", g)
	}

	p.Put("c")
	debug.SetGCPercent(100) // to allow following GC to actually run
	runtime.GC()
	if g := p.Get(); g != nil {
		t.Fatalf("got %#v; want nil after GC", g)
	}
}
Пример #7
0
func TestPoolNew(t *testing.T) {
	// disable GC so we can control when it happens.
	defer debug.SetGCPercent(debug.SetGCPercent(-1))

	i := 0
	p := Pool{
		New: func() interface{} {
			i++
			return i
		},
	}
	if v := p.Get(); v != 1 {
		t.Fatalf("got %v; want 1", v)
	}
	if v := p.Get(); v != 2 {
		t.Fatalf("got %v; want 2", v)
	}
	p.Put(42)
	if v := p.Get(); v != 42 {
		t.Fatalf("got %v; want 42", v)
	}
	if v := p.Get(); v != 3 {
		t.Fatalf("got %v; want 3", v)
	}
}
Пример #8
0
func (s *Server) applyGcPercent(c *core.Config) (err error) {
	if c.Go.GcPercent == 0 {
		debug.SetGCPercent(100)
		return
	}

	pv := debug.SetGCPercent(c.Go.GcPercent)
	core.Trace.Println("set gc percent from", pv, "to", c.Go.GcPercent)
	return
}
Пример #9
0
func garbageCollection() {
	log.Printf("Starting garbageCollection()\n")
	h.broadcastSys <- []byte("{\"gc\":\"starting\"}")
	memoryStats()
	debug.SetGCPercent(100)
	debug.FreeOSMemory()
	debug.SetGCPercent(-1)
	log.Printf("Done with garbageCollection()\n")
	h.broadcastSys <- []byte("{\"gc\":\"done\"}")
	memoryStats()
}
Пример #10
0
func TestGcHashmapIndirection(t *testing.T) {
	defer debug.SetGCPercent(debug.SetGCPercent(1))
	runtime.GC()
	type T struct {
		a [256]int
	}
	m := make(map[T]T)
	for i := 0; i < 2000; i++ {
		var a T
		a.a[0] = i
		m[a] = T{}
	}
}
Пример #11
0
func TestPoolsPutGet(t *testing.T) {
	// disable GC so we can control when it happens.
	defer debug.SetGCPercent(debug.SetGCPercent(-1))
	N := 10000 * 100
	var p = Pools{PrivateSize: N}
	for i := 0; i < N; i++ {
		p.Put(i)
	}
	for i := N - 1; i > 0; i-- {
		if n := p.Get(); n != i {
			t.Fatalf("got %v; want %d", n, i)
		}
	}
}
Пример #12
0
Файл: gc.go Проект: 2thetop/go
func GCFairness2() {
	// Make sure user code can't exploit the GC's high priority
	// scheduling to make scheduling of user code unfair. See
	// issue #15706.
	runtime.GOMAXPROCS(1)
	debug.SetGCPercent(1)
	var count [3]int64
	var sink [3]interface{}
	for i := range count {
		go func(i int) {
			for {
				sink[i] = make([]byte, 1024)
				atomic.AddInt64(&count[i], 1)
			}
		}(i)
	}
	// Note: If the unfairness is really bad, it may not even get
	// past the sleep.
	//
	// If the scheduling rules change, this may not be enough time
	// to let all goroutines run, but for now we cycle through
	// them rapidly.
	time.Sleep(30 * time.Millisecond)
	for i := range count {
		if atomic.LoadInt64(&count[i]) == 0 {
			fmt.Printf("goroutine %d did not run\n", i)
			return
		}
	}
	fmt.Println("OK")
}
Пример #13
0
func proxyServer() {
	loadConfigOrDie()
	debug.SetGCPercent(config.GCPercent)

	db, err := kvl.Open(config.Proxy.Database.Type, config.Proxy.Database.DSN)
	if err != nil {
		log.Fatalf("Couldn't connect to %v database: %v",
			config.Proxy.Database.Type, err)
	}
	defer db.Close()

	var h http.Handler
	h, err = proxyserver.New(db, config.Proxy.Scrubbers, config.Proxy.CacheSize)
	if err != nil {
		log.Fatalf("Couldn't initialize handler: %v", err)
	}

	h = httputil.NewLimitParallelism(config.Proxy.ParallelRequests, h)

	h = httputil.AddDebugHandlers(h, config.Proxy.Debug)

	if !config.Proxy.DisableHTTPLogging {
		h = httputil.LogHTTPRequests(h)
	}

	if config.Proxy.Listen == "none" {
		for {
			time.Sleep(time.Hour)
		}
	} else {
		serveOrDie(config.Proxy.Listen, h)
	}
}
Пример #14
0
func main() {
	// Use all processor cores.
	runtime.GOMAXPROCS(runtime.NumCPU())

	// Block and transaction processing can cause bursty allocations.  This
	// limits the garbage collector from excessively overallocating during
	// bursts.  This value was arrived at with the help of profiling live
	// usage.
	debug.SetGCPercent(10)

	// Up some limits.
	if err := limits.SetLimits(); err != nil {
		fmt.Fprintf(os.Stderr, "failed to set limits: %v\n", err)
		os.Exit(1)
	}

	// Call serviceMain on Windows to handle running as a service.  When
	// the return isService flag is true, exit now since we ran as a
	// service.  Otherwise, just fall through to normal operation.
	if runtime.GOOS == "windows" {
		isService, err := winServiceMain()
		if err != nil {
			fmt.Println(err)
			os.Exit(1)
		}
		if isService {
			os.Exit(0)
		}
	}

	// Work around defer not working after os.Exit()
	if err := btcdMain(nil); err != nil {
		os.Exit(1)
	}
}
Пример #15
0
func handleMem(g *Req) error {
	if g.R.Method == "POST" {
		type memParams struct {
			GCNow     int `schema:"gc_now"`
			GCPercent int `schema:"gc_percent"`
		}
		params := memParams{}
		err := g.Decoder.Decode(&params, g.R.Form)
		if err != nil {
			g.Error("Failed to decode params: " + err.Error())
			return ServerError("Failed to decode params: " + err.Error())
		}
		msg := "Adjusting mem system\n"
		if params.GCNow > 0 {
			info := "Running GC by request to handler"
			g.Info(info)
			msg += info + "\n"

			runtime.GC()
		}
		if params.GCPercent > 0 {
			oldVal := debug.SetGCPercent(params.GCPercent)
			info := fmt.Sprintf("Set GC%% to [%d] was [%d]", params.GCPercent, oldVal)
			g.Info(info)
			msg += info + "\n"
		}
		return g.SendText([]byte(msg))
	}
	var memStats runtime.MemStats
	runtime.ReadMemStats(&memStats)
	return g.SendJson("memstats", memStats)
}
Пример #16
0
func serverSettingsHandler(c web.C, w http.ResponseWriter, r *http.Request) {
	config := dvid.NewConfig()
	if err := config.SetByJSON(r.Body); err != nil {
		BadRequest(w, r, fmt.Sprintf("Error decoding POSTed JSON config for 'new': %v", err))
		return
	}
	w.Header().Set("Content-Type", "text/plain")

	// Handle GC percentage setting
	percent, found, err := config.GetInt("gc")
	if err != nil {
		BadRequest(w, r, "POST on settings endpoint had bad parsing of 'gc' key: %v", err)
		return
	}
	if found {
		old := debug.SetGCPercent(percent)
		fmt.Fprintf(w, "DVID server garbage collection target percentage set to %d from %d\n", percent, old)
	}

	// Handle max throttle ops setting
	maxOps, found, err := config.GetInt("throttle")
	if err != nil {
		BadRequest(w, r, "POST on settings endpoint had bad parsing of 'throttle' key: %v", err)
		return
	}
	if found {
		old := maxThrottledOps
		SetMaxThrottleOps(maxOps)
		fmt.Fprintf(w, "Maximum throttled ops set to %d from %d\n", maxOps, old)
	}
}
Пример #17
0
func Reset() {
	UploadLimit = CFG.Net.MaxUpKBps << 10
	DownloadLimit = CFG.Net.MaxDownKBps << 10
	debug.SetGCPercent(CFG.Memory.GCPercTrshold)
	MaxExpireTime = time.Duration(CFG.TXPool.TxExpireMaxHours) * time.Hour
	ExpirePerKB = time.Duration(CFG.TXPool.TxExpireMinPerKB) * time.Minute
	chain.MaxCachedBlocks = CFG.Memory.MaxCachedBlocks
	if CFG.Net.TCPPort != 0 {
		DefaultTcpPort = uint16(CFG.Net.TCPPort)
	} else {
		if CFG.Testnet {
			DefaultTcpPort = 18333
		} else {
			DefaultTcpPort = 8333
		}
	}

	ips := strings.Split(CFG.WebUI.AllowedIP, ",")
	WebUIAllowed = nil
	for i := range ips {
		oaa := str2oaa(ips[i])
		if oaa != nil {
			WebUIAllowed = append(WebUIAllowed, *oaa)
		} else {
			println("ERROR: Incorrect AllowedIP:", ips[i])
		}
	}
	if len(WebUIAllowed) == 0 {
		println("WARNING: No IP is currently allowed at WebUI")
	}
	SetListenTCP(CFG.Net.ListenTCP, false)
	ReloadMiners()
}
Пример #18
0
func show_mem(p string) {
	al, sy := sys.MemUsed()

	fmt.Println("Allocated:", al>>20, "MB")
	fmt.Println("SystemMem:", sy>>20, "MB")

	if p == "" {
		return
	}
	if p == "free" {
		fmt.Println("Freeing the mem...")
		sys.FreeMem()
		show_mem("")
		return
	}
	if p == "gc" {
		fmt.Println("Running GC...")
		runtime.GC()
		fmt.Println("Done.")
		return
	}
	i, e := strconv.ParseInt(p, 10, 64)
	if e != nil {
		println(e.Error())
		return
	}
	debug.SetGCPercent(int(i))
	fmt.Println("GC treshold set to", i, "percent")
}
Пример #19
0
func TestSchedLocalQueueEmpty(t *testing.T) {
	if runtime.NumCPU() == 1 {
		// Takes too long and does not trigger the race.
		t.Skip("skipping on uniprocessor")
	}
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))

	// If runtime triggers a forced GC during this test then it will deadlock,
	// since the goroutines can't be stopped/preempted during spin wait.
	defer debug.SetGCPercent(debug.SetGCPercent(-1))

	iters := int(1e5)
	if testing.Short() {
		iters = 1e2
	}
	runtime.RunSchedLocalQueueEmptyTest(iters)
}
Пример #20
0
func setup_runtime_vars() {
	runtime.GOMAXPROCS(runtime.NumCPU()) // It seems that Go does not do it by default
	if GCPerc > 0 {
		debug.SetGCPercent(GCPerc)
	}
	qdb.SetDefragPercent(300)
	//qdb.SetMaxPending(1000, 10000)
}
Пример #21
0
func (t *MSTree) LoadIndex() error {
	var globalErr error = nil
	files, err := ioutil.ReadDir(t.indexDir)
	if err != nil {
		log.Error("Error loading index: " + err.Error())
		return err
	}
	if len(files) > 0 {

		// Turn GC off
		prevGC := debug.SetGCPercent(-1)
		// Defer to turn GC back on
		defer debug.SetGCPercent(prevGC)

		ev := make(eventChan, len(files))
		procCount := 0
		for _, idxFile := range files {
			fName := idxFile.Name()
			if !strings.HasSuffix(fName, ".idx") {
				continue
			}
			pref := fName[:len(fName)-4]
			fName = fmt.Sprintf("%s/%s", t.indexDir, fName)
			idxNode := newNode()
			t.Root.Children[pref] = idxNode
			go loadWorker(fName, idxNode, ev, &t.TotalMetrics)
			procCount++
		}
		tm := time.Now()

		for e := range ev {
			procCount--
			if e != nil {
				globalErr = e
			}
			if procCount == 0 {
				break
			}
		}
		log.Notice("Index load complete in %s", time.Now().Sub(tm).String())
	} else {
		log.Debug("Index is empty. Hope that's ok")
	}
	return globalErr
}
Пример #22
0
// BEGIN OMIT
func TestSlice(t *testing.T) {
	debug.SetGCPercent(-1)
	slice := make([]string, 1e6)
	start := time.Now()
	for i := 0; i < 10e6; i++ {
		slice = append(slice, "1234567890abcdef")
	}
	log.Println("TOTAL time string: ", time.Since(start)/1e6)
}
Пример #23
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU() - 1)
	server := NewServer(256 * 1024 * 1024)
	debug.SetGCPercent(10)
	go func() {
		log.Println(http.ListenAndServe("localhost:6060", nil))
	}()
	server.Start(":7788")
}
Пример #24
0
func init() {
	gateway.ParseFlags()
	if !gateway.Options.RunSwaggerServer {
		gateway.ValidateFlags()
	}

	gateway.EnsureServerUlimit()
	debug.SetGCPercent(800) // same env GOGC. in golang1.7, we needn't concern about this
}
Пример #25
0
func Initialize() {
	// Disable the GC for debugging.
	//debug.SetGCPercent(-1)

	debug.SetGCPercent(1)

	// Periodically force a GC flush
	//go periodicFree(1 * time.Minute)
}
Пример #26
0
func setup_runtime_vars() {
	runtime.GOMAXPROCS(runtime.NumCPU()) // It seems that Go does not do it by default
	chain.MinBrowsableOutValue = 0
	if GCPerc > 0 {
		debug.SetGCPercent(GCPerc)
	}
	//qdb.SetDefragPercent(100)
	//qdb.SetMaxPending(1000, 10000)
}
Пример #27
0
func benchmarkPoolsOverflows(b *testing.B, n int) {
	defer debug.SetGCPercent(debug.SetGCPercent(-1))
	switch n {
	case 0, 1:
		b.N = 10000000
	case 2:
		b.N = 1000000
	case 4:
		b.N = 1000000
	case 8:
		b.N = 1000000
	case 16:
		b.N = 100000
	case 32:
		b.N = 100000
	case 64:
		b.N = 100000
	case 128:
		b.N = 10000
	case 256:
		b.N = 10000
	case 512:
		b.N = 10000
	case 1024:
		b.N = 10000
	}
	b.StartTimer()
	var p Pools
	var v = 1
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			for i := 0; i < n; i++ {
				p.Put(&v)
			}
			for i := 0; i < n; i++ {
				p.Get()
			}
		}
	})
	b.StopTimer()
	if n > 0 {
		b.N *= n
	}
}
Пример #28
0
func TestStrings(t *testing.T) {
	printed := false
	f := func() {
		var dst []Win32_Process
		zeros := 0
		q := CreateQuery(&dst, "")
		for i := 0; i < 5; i++ {
			err := Query(q, &dst)
			if err != nil {
				t.Fatal(err, q)
			}
			for _, d := range dst {
				v := reflect.ValueOf(d)
				for j := 0; j < v.NumField(); j++ {
					f := v.Field(j)
					if f.Kind() != reflect.String {
						continue
					}
					s := f.Interface().(string)
					if len(s) > 0 && s[0] == '\u0000' {
						zeros++
						if !printed {
							printed = true
							j, _ := json.MarshalIndent(&d, "", "  ")
							t.Log("Example with \\u0000:\n", string(j))
						}
					}
				}
			}
			fmt.Println("iter", i, "zeros:", zeros)
		}
		if zeros > 0 {
			t.Error("> 0 zeros")
		}
	}

	fmt.Println("Disabling GC")
	debug.SetGCPercent(-1)
	f()
	fmt.Println("Enabling GC")
	debug.SetGCPercent(100)
	f()
}
func main() {
	debug.SetGCPercent(10)
	fmt.Println("Number of entries: ", entries)

	config := bigcache.Config{
		Shards:             256,
		LifeWindow:         100 * time.Minute,
		MaxEntriesInWindow: entries,
		MaxEntrySize:       200,
		Verbose:            true,
	}

	bigcache, _ := bigcache.NewBigCache(config)
	for i := 0; i < entries; i++ {
		key, val := generateKeyValue(i, valueSize)
		bigcache.Set(key, val)
	}

	firstKey, _ := generateKeyValue(1, valueSize)
	checkFirstElement(bigcache.Get(firstKey))

	fmt.Println("GC pause for bigcache: ", gcPause())
	bigcache = nil
	gcPause()

	//------------------------------------------

	freeCache := freecache.NewCache(entries * 200) //allocate entries * 200 bytes
	for i := 0; i < entries; i++ {
		key, val := generateKeyValue(i, valueSize)
		if err := freeCache.Set([]byte(key), val, 0); err != nil {
			fmt.Println("Error in set: ", err.Error())
		}
	}

	firstKey, _ = generateKeyValue(1, valueSize)
	checkFirstElement(freeCache.Get([]byte(firstKey)))

	if freeCache.OverwriteCount() != 0 {
		fmt.Println("Overwritten: ", freeCache.OverwriteCount())
	}
	fmt.Println("GC pause for freecache: ", gcPause())
	freeCache = nil
	gcPause()

	//------------------------------------------

	mapCache := make(map[string][]byte)
	for i := 0; i < entries; i++ {
		key, val := generateKeyValue(i, valueSize)
		mapCache[key] = val
	}
	fmt.Println("GC pause for map: ", gcPause())

}
Пример #30
0
func main() {
	flag.Parse()
	if *flagMaster != "" && *flagSlave != "" {
		log.Fatalf("both -master and -slave are specified")
	}
	if *flagPprof != "" {
		go http.ListenAndServe(*flagPprof, nil)
	} else {
		runtime.MemProfileRate = 0
	}

	go func() {
		c := make(chan os.Signal, 1)
		signal.Notify(c, syscall.SIGINT)
		<-c
		atomic.StoreUint32(&shutdown, 1)
		close(shutdownC)
		log.Printf("shutting down...")
		time.Sleep(2 * time.Second)
		for _, f := range shutdownCleanup {
			f()
		}
		os.Exit(0)
	}()

	runtime.GOMAXPROCS(min(*flagProcs, runtime.NumCPU()))
	debug.SetGCPercent(50) // most memory is in large binary blobs
	lowerProcessPrio()

	if *flagMaster != "" || *flagSlave == "" {
		if *flagWorkdir == "" {
			log.Fatalf("-workdir is not set")
		}
		if *flagMaster == "" {
			*flagMaster = "localhost:0"
		}
		ln, err := net.Listen("tcp", *flagMaster)
		if err != nil {
			log.Fatalf("failed to listen: %v", err)
		}
		if *flagMaster == "localhost:0" && *flagSlave == "" {
			*flagSlave = ln.Addr().String()
		}
		go masterMain(ln)
	}

	if *flagSlave != "" {
		if *flagBin == "" {
			log.Fatalf("-bin is not set")
		}
		go slaveMain()
	}

	select {}
}