Ejemplo n.º 1
1
func TestChan3() {
	fmt.Println("@@@@@@@@@@@@ TestChan 3")

	fmt.Printf("cpu num: %d\n", runtime.NumCPU()) // 8核cpu

	// 虽然goroutine是并发执行的,但是它们并不是并行运行的。如果不告诉Go额外的东西,同
	// 一时刻只会有一个goroutine执行。利用runtime.GOMAXPROCS(n)可以设置goroutine
	// 并行执行的数量。GOMAXPROCS 设置了同时运行的CPU 的最大数量,并返回之前的设置。
	val := runtime.GOMAXPROCS(runtime.NumCPU() * 4)
	fmt.Printf("last goroutine num: %d\n", val) // 8个

	fmt.Printf("goroutine num: %d\n", runtime.NumGoroutine()) // 4个goroutine同时运行

	var ch1 chan int = make(chan int, 0)
	var ch2 chan int = make(chan int, 0)
	var ch3 chan int = make(chan int, 0)

	go write(ch1, 22)
	go write(ch2, 33)
	go write(ch3, 44)
	go read(ch1)
	go read(ch2)
	go read(ch3)

	fmt.Printf("goroutine num: %d\n", runtime.NumGoroutine()) // 10个goroutine同时运行
	sleep("TestChan3", 3)
}
Ejemplo n.º 2
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	fetchFeed()

	e := echo.New()
	e.Use(mw.Logger())
	e.Use(mw.Recover())
	e.Use(mw.StripTrailingSlash())
	e.Use(mw.Gzip())
	e.Use(cors.Default().Handler)

	bundle, _ := ioutil.ReadFile("./build/bundle.js")

	// stats
	s := stats.New()
	e.Use(s.Handler)
	e.Get("/stats", func(c *echo.Context) error {
		return c.JSON(http.StatusOK, s.Data())
	})
	// static files
	e.Static("/public/css", "public/css")
	e.Static("/universal.js", "./build/bundle.js")
	e.Favicon("public/favicon.ico")

	e.Get("/", selfjs.New(runtime.NumCPU(), string(bundle), rss))
	e.Get("/about", selfjs.New(runtime.NumCPU(), string(bundle), loremJSON()))

	e.Get("/api/data", apiFrontPage)
	e.Get("/api/anotherpage", apiAnotherPage)
	go tick()
	fmt.Println("serving at port 3000")
	e.Run(":3000")
}
Ejemplo n.º 3
0
func main() {
	flag.Parse()

	table := make(map[uint64]*big.Int)
	keys := make(chan *rsa.PrivateKey)

	go func() {
		d := time.Tick(time.Minute)
		for {
			<-d
			log.Printf("checked %v keypairs\n", len(table))
		}
	}()

	runtime.GOMAXPROCS(runtime.NumCPU())
	for i := 0; i < runtime.NumCPU(); i++ {
		go generate(keys)
	}

	for {
		priv := <-keys
		check(table, priv.Primes[0])
		check(table, priv.Primes[1])
	}
}
Ejemplo n.º 4
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	maxRoutines := runtime.NumCPU()
	products := make(chan int)
	end := make(chan int)
	numRoutines := 0
	pandigital := 123456789
	i := minBase
	for {
		for i <= maxBase && numRoutines <= maxRoutines {
			i++
			numRoutines++
			go catenatedProductGenerator(i, products, end)
		}
		if i >= maxBase && numRoutines == 0 {
			break
		}
		select {
		case p := <-products:
			if isPandigital(p) && p > pandigital {
				pandigital = p
			}
		case <-end:
			numRoutines--
		}
	}
	fmt.Println(pandigital)
}
Ejemplo n.º 5
0
func TestRunMonteCarlo(t *testing.T) {

	CPUS := runtime.NumCPU()

	runtime.GOMAXPROCS(CPUS)

	n := 1000000

	workload := n / runtime.NumCPU()

	results := make(chan int64, CPUS)
	for c := 0; c < CPUS; c++ {
		go func(c, size int) {
			s := NewPercolationSimulator(size)
			var value int64
			for i := 0; i < workload; i++ {
				value += s.Simulate() // sum of steps
			}
			results <- int64(value)
			log.Printf("CPU %v returned steps %v out of workload %v", c, value, size*size*workload)
		}(c, 5)
	}
	var total int64
	for i := 0; i < CPUS; i++ {
		total += <-results
	}
	log.Printf("ran %v simulations, got result %v", n, float64(total)/float64(25*n))
}
Ejemplo n.º 6
0
func main() {
	commands := map[string]command{
		"attack": attackCmd(),
		"report": reportCmd(),
	}

	flag.Usage = func() {
		fmt.Println("Usage: vegeta [globals] <command> [options]")
		for name, cmd := range commands {
			fmt.Printf("\n%s command:\n", name)
			cmd.fs.PrintDefaults()
		}
		fmt.Printf("\nglobal flags:\n  -cpus=%d Number of CPUs to use\n", runtime.NumCPU())
		fmt.Println(examples)
	}

	cpus := flag.Int("cpus", runtime.NumCPU(), "Number of CPUs to use")
	flag.Parse()

	runtime.GOMAXPROCS(*cpus)

	args := flag.Args()
	if len(args) == 0 {
		flag.Usage()
		os.Exit(1)
	}

	if cmd, ok := commands[args[0]]; !ok {
		log.Fatalf("Unknown command: %s", args[0])
	} else if err := cmd.fn(args[1:]); err != nil {
		log.Fatal(err)
	}
}
Ejemplo n.º 7
0
func main() {
	flag.Parse()

	var err error
	if db, err = sql.Open("mysql", connectionString); err != nil {
		log.Fatalf("Error opening database: %s", err)
	}
	if err = db.Ping(); err != nil {
		log.Fatalf("Cannot connect to db: %s", err)
	}

	dbConnCount := maxConnectionCount
	if *prefork {
		dbConnCount = (dbConnCount + runtime.NumCPU() - 1) / runtime.NumCPU()
	}
	db.SetMaxIdleConns(dbConnCount)
	db.SetMaxOpenConns(dbConnCount * 2)

	worldSelectStmt = mustPrepare(db, "SELECT id, randomNumber FROM World WHERE id = ?")
	worldUpdateStmt = mustPrepare(db, "UPDATE World SET randomNumber = ? WHERE id = ?")
	fortuneSelectStmt = mustPrepare(db, "SELECT id, message FROM Fortune")

	s := &fasthttp.Server{
		Handler: mainHandler,
		Name:    "fasthttp",
	}
	ln := getListener()
	if err = s.Serve(ln); err != nil {
		log.Fatalf("Error when serving incoming connections: %s", err)
	}
}
Ejemplo n.º 8
0
func main() {
	var defaultMaxprocs int
	var err error
	if defaultMaxprocs, err = strconv.Atoi(os.Getenv("GOMAXPROCS")); err != nil {
		defaultMaxprocs = runtime.NumCPU() * 2
	}
	var mode, dir string
	var port, maxprocs int
	flag.StringVar(&mode, "mode", "", "[v1|v2|v3|bench|durable-bench|example]")
	flag.StringVar(&dir, "dir", "", "database directory")
	flag.IntVar(&port, "port", 0, "listening port number")
	flag.IntVar(&maxprocs, "gomaxprocs", defaultMaxprocs, "GOMAXPROCS")
	flag.Parse()

	if mode == "" {
		flag.PrintDefaults()
		return
	}

	runtime.GOMAXPROCS(maxprocs)
	log.Printf("GOMAXPROCS is set to %d", maxprocs)

	if maxprocs < runtime.NumCPU() {
		log.Printf("GOMAXPROCS (%d) is less than number of CPUs (%d), this may affect performance. You can change it via environment variable GOMAXPROCS or by passing CLI parameter -gomaxprocs", maxprocs, runtime.NumCPU())
	}

	switch mode {
	case "v1":
		fallthrough
	case "v2":
		fallthrough
	case "v3":
		if dir == "" {
			log.Fatal("Please specify database directory, for example -dir=/tmp/db")
		}
		if port == 0 {
			log.Fatal("Please specify port number, for example -port=8080")
		}
		db, err := db.OpenDB(dir)
		if err != nil {
			log.Fatal(err)
		}
		if mode == "v1" {
			v1.Start(db, port)
		} else if mode == "v2" {
			v2.Start(db, port)
		} else if mode == "v3" {
			v3.Start(db, port)
		}
	case "bench":
		benchmark()
	case "durable-bench":
		durableBenchmark()
	case "example":
		embeddedExample()
	default:
		flag.PrintDefaults()
		return
	}
}
Ejemplo n.º 9
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	f, _ := os.Create("./boqi.txt")
	file = f

	spider := sg.NewSpider("test", &MyProcess{})

	downloader := sg.NewDownloader()
	downloader.SetSleepTime(2 * time.Second)
	downloader.SetRetryMaxCount(0)
	downloader.RegisterMiddleware(mw.NewDefaultDownloaderMiddleware())

	scheduler := sg.NewScheduler()
	scheduler.SetUrlHeap(sg.NewUrlHeap(50))
	scheduler.RegisterMiddleware(mw.NewDefaultSchedulerMiddleware())

	pipeliner := sg.NewPipeliner()
	pipeliner.RegisterMiddleware(mw.NewDefaultPipelinerMiddleware())

	spider.SetThreadNum(runtime.NumCPU() * 2)
	spider.SetDownloader(downloader)
	spider.SetScheduler(scheduler)
	spider.SetPipeliner(pipeliner)

	spider.AddUrl("http://shop.boqii.com/")
	//spider.AddUrl("http://www.epet.com/")

	spider.SetTimeout(5 * time.Second)
	spider.Run()
}
Ejemplo n.º 10
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	c := make(chan int, runtime.NumCPU())
	stats := Stats{0, 0, 0, 0, 0, 0, 0}

	nextNumber := int64(1000)
	for i := int64(0); i < NUM_CLIENTS; i++ {
		fmt.Println("Spawn inserter", i)
		go simulate(i, &nextNumber, &stats, c)
	}

	// Wait for interrupt signal
	sigs := make(chan os.Signal, 1)

	signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
	<-sigs

	// Print out statistics
	fmt.Println()
	fmt.Println("Write percentage: ", float64(stats.totalWrites)/float64(stats.totalWrites+stats.totalReads))
	fmt.Println("Empty read percentage: ", float64(stats.emptyReads)/float64(stats.totalReads))
	fmt.Println("Total documents read: ", stats.totalDocumentsRead)
	fmt.Println("Total documents written: ", stats.totalInsertions)
	fmt.Println("Total documents updated: ", stats.totalUpdates)
	fmt.Println("Total documents deleted: ", stats.totalDeletions)
}
Ejemplo n.º 11
0
func NewContext(requestId string, datastore, systemstore datastore.Datastore,
	namespace string, readonly bool, maxParallelism int, namedArgs map[string]value.Value,
	positionalArgs value.Values, credentials datastore.Credentials,
	consistency datastore.ScanConsistency, vector timestamp.Vector, output Output) *Context {
	rv := &Context{
		requestId:      requestId,
		datastore:      datastore,
		systemstore:    systemstore,
		namespace:      namespace,
		readonly:       readonly,
		maxParallelism: maxParallelism,
		now:            time.Now(),
		namedArgs:      namedArgs,
		positionalArgs: positionalArgs,
		credentials:    credentials,
		consistency:    consistency,
		vector:         vector,
		output:         output,
		subplans:       nil,
		subresults:     nil,
	}

	if rv.maxParallelism <= 0 || rv.maxParallelism > runtime.NumCPU() {
		rv.maxParallelism = runtime.NumCPU()
	}

	return rv
}
Ejemplo n.º 12
0
func (w *Walker) Walk() fusefs.Tree {
	wg := sync.WaitGroup{}

	paths := make(chan string, runtime.NumCPU())
	for i := 0; i < runtime.NumCPU(); i++ {
		go worker(w, paths, &wg)
	}

	walker := fs.Walk(w.Path)
	for walker.Step() {
		if err := walker.Err(); err != nil {
			continue
		}

		if walker.Stat().IsDir() {
			continue
		}

		wg.Add(1)
		paths <- walker.Path()
	}

	close(paths)
	wg.Wait()

	return w.tree
}
Ejemplo n.º 13
0
func main() {

	in, err := ioutil.ReadAll(os.Stdin)
	if err != nil {
		log.Println(err, string(in))
	}
	difficulty := os.Args[1]
	fmt.Fprintln(os.Stderr, "Called with difficulty:", difficulty)
	quit := make(chan bool)
	runtime.GOMAXPROCS(runtime.NumCPU())
	cores := int64(runtime.NumCPU() * 2)
	fmt.Fprintln(os.Stderr, "Running with", cores, "cores")
	//	var wg sync.WaitGroup
	go gitCount()
	//	wg.Add(1)
	for i := int64(0); i <= cores; i++ {
		rand.Seed(time.Now().UnixNano() * i)
		seed := rand.Intn(100000000000000)
		go gitMoney(difficulty, in, quit, seed)
	}
	for {
		select {
		case <-quit:
			return
		}
	}
	//	wg.Wait()
}
Ejemplo n.º 14
0
func ParallelFor(n int, f func(p *P)) {
	// TODO: this formula could probably be more clever
	step := n / runtime.NumCPU() / 100
	if step < 10 {
		step = 10
	}

	gp := &GP{
		max:     int64(n),
		current: 0,
		step:    int64(step),
	}

	gp.wg.Add(runtime.NumCPU())

	for i := 0; i < runtime.NumCPU(); i++ {
		go func() {
			p := &P{
				gp: gp,
			}
			f(p)
			gp.wg.Done()
		}()
	}

	gp.wg.Wait()
}
Ejemplo n.º 15
0
func main() {
	args := flag.Args()
	st, _ := os.Stdin.Stat()
	if st.Mode()&os.ModeCharDevice == 0 {
		args = append(args, "-")
	}
	if len(args) == 0 {
		flag.Usage()
	}
	sema := newSema(runtime.NumCPU())
	var wg sync.WaitGroup
	wg.Add(len(args))
	if !*checkArg {
		printf("# seed %d", *seedArg)
		if *use32 {
			printf("# 32bit")
		} else {
			printf("# 64bit")
		}
	}
	for _, fn := range args {
		if *checkArg {
			check(newSema(runtime.NumCPU()), fn)
		} else {
			sema.Run(func() { printHash(fn) })
		}
	}
	sema.WaitAndClose()
	if errored {
		os.Exit(1)
	}
}
Ejemplo n.º 16
0
func main() {
	fmt.Println(time.Now().Format("2006-01-02 15:04:05") + " Start")
	runtime.GOMAXPROCS(2 * runtime.NumCPU())
	//
	ncpuflag := flag.Int("ncpu", 0, "number of CPUs")
	flag.Parse()
	ncpu := *ncpuflag
	if ncpu <= 0 || ncpu > runtime.NumCPU() {
		ncpu = runtime.NumCPU()
	}
	fmt.Println("NumCPU", ncpu)
	//
	const N = 100000
	fmt.Println("Point#", N)
	ps := make([]pq.Point2q, N)
	for i := 0; i < N; i++ {
		//
		xf, yf := genI2()
		//
		xq, yq := pq.FtoQ(xf), pq.FtoQ(yf)
		ps[i] = pq.XYtoP(xq, yq)
	}
	//
	T := time.Now()
	mincircle := pq.ParMinCircle2q(ncpu, ps)
	TT := time.Since(T)
	xc, _ := mincircle.Center().X().Rat().Float64()
	yc, _ := mincircle.Center().Y().Rat().Float64()
	rc, _ := mincircle.Radius2().Rat().Float64()
	fmt.Println("X =", xc)
	fmt.Println("Y =", yc)
	fmt.Println("R =", math.Sqrt(rc))
	//
	fmt.Println(time.Now().Format("2006-01-02 15:04:05") + " Ready; T=" + TT.String())
}
Ejemplo n.º 17
0
func main() {

	log.Println("Setting GOMAXPROCS = ", runtime.NumCPU())
	runtime.GOMAXPROCS(runtime.NumCPU())

	cmdStore := memory.NewCommandStore(10000) // config: max queued commands is 10k
	eventBus := memory.NewEventBus(false)     // config: false means not async

	cmdServiceOptions := services.CommandServiceOptions{
		TransactionsPerSecond: 100,
		BackOffDuration:       1 * time.Second,
	}
	cmdService := services.NewCommandService(cmdServiceOptions, cmdStore)

	framework := sourcing.NewFramework(memory.NewEventStore())

	refSvc := domainServices.NewDummyMeetingService()
	model.BindEvents(framework, refSvc)

	commands.BindHandlers(framework, cmdService, eventBus)

	api.Register(cmdStore)

	go cmdService.Start()
	defer cmdService.Stop()

	initSwagger()

	err := http.ListenAndServe(":8080", nil)
	if err != nil {
		log.Fatal(err)
	}
}
Ejemplo n.º 18
0
func main() {
	fmt.Printf("GOMAXPROCS=%v\n", runtime.GOMAXPROCS(-1))
	fmt.Printf("NumCPU=%v\n", runtime.NumCPU())
	runtime.GOMAXPROCS(runtime.NumCPU())
	fmt.Printf("GOMAXPROCS=%v\n", runtime.GOMAXPROCS(-1))
	max := 10
	intChan := make(chan int)

	for i := 1; i <= max; i++ {
		go func(j, max int) {
			fmt.Printf("go: %d\n", j)
			if j > 7 {
				fmt.Println("sleep 2 secs")
				time.Sleep(2 * time.Second) // work more
			} else {
				time.Sleep(1 * time.Millisecond)
			}
			intChan <- j
			if j >= max {
				close(intChan)
			}
		}(i, max)
	}

	for j := range intChan {
		fmt.Printf("main: %d\n", j)
	}
	fmt.Printf("GOMAXPROCS=%v\n", runtime.GOMAXPROCS(-1))
	fmt.Println("fin")
}
Ejemplo n.º 19
0
// HashTree hashes count log entries from f and returns the tree hash. If
// status is non-nil then periodic status updates will be written to it.
func (f EntriesFile) HashTree(status chan<- OperationStatus, count uint64) (output [sha256.Size]byte, err error) {
	wg := new(sync.WaitGroup)
	entries := make(chan EntryAndPosition)

	mutex := new(sync.Mutex)
	state := &hashWorkersState{
		hashesChan: make(chan [32]byte, runtime.NumCPU()),
		cond:       sync.NewCond(mutex),
	}

	for i := 0; i < runtime.NumCPU(); i++ {
		wg.Add(1)
		const statusFraction = 1000
		go hashWorker(state, entries, status, uint64(i)*statusFraction, uint64(runtime.NumCPU())*statusFraction, count, wg)
	}

	wg.Add(1)
	go func() {
		hashTree(&output, sha256.New(), state.hashesChan, count)
		wg.Done()
	}()

	if err = f.readEntries(entries, 0); err != nil {
		return
	}
	wg.Wait()

	if status != nil {
		close(status)
	}

	return
}
Ejemplo n.º 20
0
Archivo: db.go Proyecto: nzinfo/ledisdb
func (db *DB) initOptions(cfg *Config) {
	opts := NewOptions()

	opts.SetCreateIfMissing(true)

	if cfg.CacheSize <= 0 {
		cfg.CacheSize = 4 * 1024 * 1024
	}

	db.env = NewDefaultEnv()
	db.env.SetBackgroundThreads(runtime.NumCPU() * 2)
	db.env.SetHighPriorityBackgroundThreads(1)
	opts.SetEnv(db.env)

	db.cache = NewLRUCache(cfg.CacheSize)
	opts.SetCache(db.cache)

	//we must use bloomfilter
	db.filter = NewBloomFilter(defaultFilterBits)
	opts.SetFilterPolicy(db.filter)

	if !cfg.Compression {
		opts.SetCompression(NoCompression)
	} else {
		opts.SetCompression(SnappyCompression)
	}

	if cfg.BlockSize <= 0 {
		cfg.BlockSize = 4 * 1024
	}

	opts.SetBlockSize(cfg.BlockSize)

	if cfg.WriteBufferSize <= 0 {
		cfg.WriteBufferSize = 4 * 1024 * 1024
	}

	opts.SetWriteBufferSize(cfg.WriteBufferSize)

	if cfg.MaxOpenFiles < 1024 {
		cfg.MaxOpenFiles = 1024
	}

	opts.SetMaxOpenFiles(cfg.MaxOpenFiles)

	opts.SetMaxBackgroundCompactions(runtime.NumCPU()*2 - 1)
	opts.SetMaxBackgroundFlushes(1)

	opts.SetLevel0SlowdownWritesTrigger(16)
	opts.SetLevel0StopWritesTrigger(64)
	opts.SetTargetFileSizeBase(32 * 1024 * 1024)

	db.opts = opts

	db.readOpts = NewReadOptions()
	db.writeOpts = NewWriteOptions()

	db.iteratorOpts = NewReadOptions()
	db.iteratorOpts.SetFillCache(false)
}
Ejemplo n.º 21
0
func main() {
	flag.Parse()
	go func() {
		log.Println(http.ListenAndServe("localhost:6060", nil))
	}()
	match := *name
	if *insensitive {
		match = "(?i)" + match
	}
	target, err := regexp.Compile(match)
	checkErr(err)
	kill := make(chan os.Signal, 1)
	signal.Notify(kill, os.Interrupt, os.Kill)
	runtime.GOMAXPROCS(runtime.NumCPU())
	log.Printf("Searching for \"%s\" with %d processors", *name, runtime.NumCPU())
	c := make(chan *Trial, 1000)
	for i := 0; i < runtime.NumCPU(); i++ {
		go search(c, target)
	}
	start := time.Now()
	for {
		select {
		case <-kill:
			num := atomic.LoadUint64(&count)
			log.Printf("Tested: %d seeds at %.2f/sec", num, float64(num)/time.Since(start).Seconds())
			return
		case trial := <-c:
			s, err := crypto.NewFamilySeed(trial.Seed)
			checkErr(err)
			log.Println(s, trial.Id)
		}
	}
}
Ejemplo n.º 22
0
func main() {
	flag.Parse()
	logrus.SetLevel(logrus.DebugLevel)
	glog.WithField("numcpus", runtime.NumCPU()).Info("Number of cpus found.")

	procs := runtime.GOMAXPROCS(runtime.NumCPU())
	glog.WithField("prev_procs", procs).Info("Previous setting.")

	f, scanner := getScanner(*entities)
	defer f.Close()

	tr := trie.NewTrie()
	count := 0
	for scanner.Scan() {
		tr.Add(scanner.Text())
		count += 1
	}
	glog.WithField("count", count).Debug("Entities read.")
	grep(tr)

	/*
		var mstats runtime.MemStats
		runtime.ReadMemStats(&mstats)
		fmt.Printf("Memory Stats: %+v\n\n", mstats)
	*/
}
Ejemplo n.º 23
0
func main() {
	in := make(chan int, 10)
	done := make(chan bool)

	worker := func(in chan int, done chan bool) {
		for {
			c := <-in
			if c == 0 {
				break
			}
			fetchStrokeXml(c)
		}
		done <- true
	}

	for i := 0; i < runtime.NumCPU(); i++ {
		go worker(in, done)
	}

	// 0xA440-0xC67E
	// 0xC940-0xF9D5
	os.Mkdir(baseDir, 0777)

	for code := 0xa440; code < 0xc67e; code++ {
		in <- code
	}

	for i := 0; i < runtime.NumCPU(); i++ {
		in <- 0
		<-done
		fmt.Printf("goroutine %d finished\n", i)
	}
}
Ejemplo n.º 24
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	log.SetOutput(os.Stderr)
	flag.Parse()

	numModuli := *nummoduli
	numThreads := runtime.NumCPU()
	perThread := (numModuli + numThreads - 1) / numThreads
	var wg sync.WaitGroup
	ch := make(chan *big.Int, numThreads)

	for numModuli > 0 {
		if perThread > numModuli {
			perThread = numModuli
		}
		wg.Add(1)
		go genModuli(perThread, ch, &wg)
		numModuli -= perThread
	}
	go func() {
		wg.Wait()
		close(ch)
	}()
	for modulus := range ch {
		fmt.Printf("%x\n", modulus)
	}
}
Ejemplo n.º 25
0
func bundlesFromRects(rects ...rtree.Rectangle) []*hilbertBundle {
	chunks := chunkRectangles(rects, int64(runtime.NumCPU()))
	bundleChunks := make([][]*hilbertBundle, len(chunks))
	var wg sync.WaitGroup
	wg.Add(len(chunks))

	for i := 0; i < runtime.NumCPU(); i++ {
		if len(chunks[i]) == 0 {
			bundleChunks[i] = []*hilbertBundle{}
			wg.Done()
			continue
		}
		go func(i int) {
			bundles := make([]*hilbertBundle, 0, len(chunks[i]))
			for _, r := range chunks[i] {
				h := h.Encode(getCenter(r))
				bundles = append(bundles, &hilbertBundle{hilbert(h), r})
			}
			bundleChunks[i] = bundles
			wg.Done()
		}(i)
	}

	wg.Wait()

	bundles := make([]*hilbertBundle, 0, len(rects))
	for _, bc := range bundleChunks {
		bundles = append(bundles, bc...)
	}

	return bundles
}
Ejemplo n.º 26
0
func main() {
	if runtime.NumCPU() < 2 {
		fmt.Println("need >= 2 CPUs")
		os.Exit(1)
	}
	runtime.GOMAXPROCS(runtime.NumCPU())
	var confused, good, bad itf
	pp := address(win)
	good = &safe{f: &pp}
	bad = &unsafe{}
	confused = good
	go func() {
		for {
			confused = bad
			confused = good
			i++
		}
	}()
	// we want confused to point to the type of unsafe (where func is)
	// but still have the value of safe (uint we control)
	for {
		confused.X()
		j++
	}
}
Ejemplo n.º 27
0
Archivo: rbm.go Proyecto: sguzwf/mlf
func main() {
	flag.Parse()
	runtime.GOMAXPROCS(runtime.NumCPU())

	// 载入训练集
	set := contrib.LoadLibSVMDataset(*libsvm_file, false)

	options := rbm.RBMOptions{
		NumHiddenUnits:       *hidden,
		NumCD:                *numCD,
		Worker:               runtime.NumCPU(),
		LearningRate:         *learning_rate,
		MaxIter:              *maxIter,
		BatchSize:            *batch_size,
		Delta:                *delta,
		UseBinaryHiddenUnits: *useBinary,
	}

	// 创建训练器
	machine := rbm.NewRBM(options)

	machine.Train(set)

	machine.Write(*model)
}
Ejemplo n.º 28
0
func TestMultipleThreads(t *testing.T) {
	// just make super-sure we're in a multi-threaded environmet
	runtime.GOMAXPROCS(runtime.NumCPU())

	tester := newIntegrityTester()

	pool := newTestPool(100)
	threads := runtime.NumCPU()

	done := make(chan bool)

	run := func() {
		for range getTicker(500, 50) {
			if err := tester.check(pool.getID()); err != nil {
				t.Error(err.Error())
			}
		}

		done <- true
	}

	for i := 0; i < threads; i++ {
		go run()
	}

	for i := 0; i < threads; i++ {
		<-done
	}
}
Ejemplo n.º 29
0
func newHerd(imageServerAddress string, objectServer objectserver.ObjectServer,
	logger *log.Logger) *Herd {
	var herd Herd
	herd.imageManager = images.New(imageServerAddress, logger)
	herd.objectServer = objectServer
	herd.computedFilesManager = filegenclient.New(objectServer, logger)
	herd.logger = logger
	herd.configurationForSubs.ScanSpeedPercent =
		constants.DefaultScanSpeedPercent
	herd.configurationForSubs.NetworkSpeedPercent =
		constants.DefaultNetworkSpeedPercent
	herd.configurationForSubs.ScanExclusionList =
		constants.ScanExcludeList
	herd.subsByName = make(map[string]*Sub)
	numPollSlots := uint(runtime.NumCPU()) * *pollSlotsPerCPU
	herd.pollSemaphore = make(chan struct{}, numPollSlots)
	herd.pushSemaphore = make(chan struct{}, runtime.NumCPU())
	numComputeSlots := runtime.NumCPU() - 1
	if numComputeSlots < 1 {
		numComputeSlots = 1
	}
	herd.computeSemaphore = make(chan struct{}, numComputeSlots)
	herd.currentScanStartTime = time.Now()
	return &herd
}
Ejemplo n.º 30
0
func TestSTMConcurrentTestTree(t *testing.T) {
	hc := newTestNodeHandle("c", nil)
	tr := NewTransaction()
	if err := hc.insert(tr, "a"); err != nil {
		t.Errorf("%v should insert 'a' but got %v", hc, err)
	}
	if err := hc.insert(tr, "d"); err != nil {
		t.Errorf("%v should insert 'd' but got %v", hc, err)
	}
	if err := hc.insert(tr, "b"); err != nil {
		t.Errorf("%v should insert 'b' but got %v", hc, err)
	}
	if !tr.Commit() {
		t.Errorf("%v should commit", tr)
	}
	assertTreeStructure(t, hc, &cmpNode{"c", &cmpNode{"a", nil, &cmpNode{"b", nil, nil}}, &cmpNode{"d", nil, nil}})
	do := make(chan bool)
	done := make(chan bool)
	runtime.GOMAXPROCS(runtime.NumCPU())
	for i := 0; i < runtime.NumCPU(); i++ {
		go fiddleTestTree(t, fmt.Sprint(i), hc, do, done)
	}
	close(do)
	for i := 0; i < runtime.NumCPU(); i++ {
		<-done
	}
	assertTreeStructure(t, hc, &cmpNode{"c", &cmpNode{"a", nil, &cmpNode{"b", nil, nil}}, &cmpNode{"d", nil, nil}})
}