Пример #1
0
func BenchmarkWriterAwaitMany(b *testing.B) {
	defer time.Sleep(DisruptorCleanup)
	runtime.GOMAXPROCS(2)
	defer runtime.GOMAXPROCS(1)

	controller := disruptor.
		Configure(RingBufferSize).
		WithConsumerGroup(SampleConsumer{}).
		Build()
	controller.Start()
	defer controller.Stop()
	writer := controller.Writer()

	iterations := int64(b.N)
	sequence := disruptor.InitialSequenceValue

	b.ReportAllocs()
	b.ResetTimer()

	for sequence < iterations {
		sequence += ReserveMany
		writer.Await(sequence)

		for i := sequence - ReserveManyDelta; i <= sequence; i++ {
			ringBuffer[i&RingBufferMask] = i
		}

		writer.Commit(sequence, sequence)
	}

	b.StopTimer()
}
Пример #2
0
func init() {
	if cpu := runtime.NumCPU(); cpu == 1 {
		runtime.GOMAXPROCS(2)
	} else {
		runtime.GOMAXPROCS(cpu)
	}
}
Пример #3
0
func BenchmarkFirst16(b *testing.B) {
	const n = 5000
	g := runtime.GOMAXPROCS(0)
	defer runtime.GOMAXPROCS(g)
	o := &Options{noClone: true}
	db, err := CreateTemp("_testdata", "temp", ".db", o)
	if err != nil {
		b.Fatal(err)
	}

	dbname := db.Name()
	defer func(n string) {
		db.Close()
		os.Remove(n)
		os.Remove(o._WAL)
	}(dbname)

	rng := fc()
	for i := 0; i < n; i++ {
		if err := db.Set(n2b(rng.Next()), n2b(rng.Next())); err != nil {
			b.Fatal(err)
		}
	}
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		db.First()
	}
	b.StopTimer()
}
Пример #4
0
func doConcurrentTest(c *test.C, ct func()) {
	maxProcs, numReqs := 1, 150
	if testing.Short() {
		maxProcs, numReqs = 4, 50
	}
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))

	var wg sync.WaitGroup
	wg.Add(numReqs)

	reqs := make(chan bool)
	defer close(reqs)

	for i := 0; i < maxProcs*2; i++ {
		go func() {
			for _ = range reqs {
				ct()
				if c.Failed() {
					wg.Done()
					continue
				}
				wg.Done()
			}
		}()
	}

	for i := 0; i < numReqs; i++ {
		reqs <- true
	}

	wg.Wait()
}
Пример #5
0
func TestHammer32(t *testing.T) {
	const p = 4
	n := 100000
	if testing.Short() {
		n = 1000
	}
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))

	for name, testf := range hammer32 {
		c := make(chan int)
		var val uint32
		for i := 0; i < p; i++ {
			go func() {
				defer func() {
					if err := recover(); err != nil {
						t.Error(err.(string))
					}
					c <- 1
				}()
				testf(&val, n)
			}()
		}
		for i := 0; i < p; i++ {
			<-c
		}
		if !strings.HasPrefix(name, "Swap") && val != uint32(n)*p {
			t.Fatalf("%s: val=%d want %d", name, val, n*p)
		}
	}
}
Пример #6
0
func runHiveTest(t *testing.T, opts ...HiveOption) {
	runtime.GOMAXPROCS(4)
	defer runtime.GOMAXPROCS(1)

	testHiveCh = make(chan interface{})
	defer func() {
		close(testHiveCh)
		testHiveCh = nil
	}()

	hive := newHiveForTest(opts...)
	app := hive.NewApp("TestHiveApp")
	app.Handle(MyMsg(0), &testHiveHandler{})

	go hive.Start()

	for i := 1; i <= msgs; i++ {
		hive.Emit(MyMsg(i))
	}

	for i := 0; i < handlers; i++ {
		<-testHiveCh
	}

	if err := hive.Stop(); err != nil {
		t.Errorf("cannot stop the hive %v", err)
	}
}
Пример #7
0
func main() {
	var port = flag.Int("p", 0, "port to listen on")
	flag.Parse()

	c := make(chan os.Signal, 1)
	signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
	go func() {
		for range c {
			os.Exit(0)
		}
	}()

	numThreads = runtime.NumCPU()
	runtime.GOMAXPROCS(numThreads)

	ln, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))
	if err != nil {
		log.Printf("failed to listen on port %d: %v", port, err)
		return
	}

	for {
		conn, err := ln.Accept()
		if err != nil {
			log.Println("failed to accept connection:", err)
			continue
		}
		nc := atomic.AddInt32(&numConns, 1)
		if int(nc) >= numThreads {
			numThreads *= 2
			runtime.GOMAXPROCS(numThreads)
		}
		go handleConnection(conn)
	}
}
Пример #8
0
func TestRunOnNodesStress(t *testing.T) {
	n := 1000
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(16))
	body := `{"Id":"e90302","Path":"date","Args":[]}`
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		w.Header().Set("Content-Type", "application/json")
		w.Write([]byte(body))
	}))
	defer server.Close()
	id := "e90302"
	cluster, err := New(nil, &MapStorage{}, Node{Address: server.URL})
	if err != nil {
		t.Fatal(err)
	}
	for i := 0; i < rand.Intn(10)+n; i++ {
		result, err := cluster.runOnNodes(func(n node) (interface{}, error) {
			return n.InspectContainer(id)
		}, &docker.NoSuchContainer{ID: id}, false)
		if err != nil {
			t.Fatal(err)
		}
		container := result.(*docker.Container)
		if container.ID != id {
			t.Errorf("InspectContainer(%q): Wrong ID. Want %q. Got %q.", id, id, container.ID)
		}
		if container.Path != "date" {
			t.Errorf("InspectContainer(%q): Wrong Path. Want %q. Got %q.", id, "date", container.Path)
		}
	}
}
Пример #9
0
func TestClusterHandleNodeSuccessStressShouldntBlockNodes(t *testing.T) {
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
	c, err := New(&roundRobin{}, &MapStorage{})
	if err != nil {
		t.Fatal(err)
	}
	_, err = c.Register("addr-1", nil)
	if err != nil {
		t.Fatal(err)
	}
	for i := 0; i < 100; i++ {
		go func() {
			err := c.handleNodeSuccess("addr-1")
			if err != nil && err != errHealerInProgress {
				t.Fatal(err)
			}
		}()
		go func() {
			nodes, err := c.Nodes()
			if err != nil {
				t.Fatal(err)
			}
			if len(nodes) != 1 {
				t.Fatalf("Expected nodes len to be 1, got %d", len(nodes))
			}
		}()
	}
}
Пример #10
0
func (s *S) TestAddRouteAndRemoteRouteAreSafe(c *check.C) {
	var wg sync.WaitGroup
	fake := fakeRouter{backends: make(map[string][]string)}
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
	for i := 1; i < 256; i++ {
		wg.Add(5)
		name := fmt.Sprintf("route-%d", i)
		addr, _ := url.Parse(fmt.Sprintf("http://10.10.10.%d", i))
		go func() {
			fake.AddBackend(name)
			wg.Done()
		}()
		go func() {
			fake.AddRoute(name, addr)
			wg.Done()
		}()
		go func() {
			fake.RemoveRoute(name, addr)
			wg.Done()
		}()
		go func() {
			fake.HasRoute(name, addr.String())
			wg.Done()
		}()
		go func() {
			fake.RemoveBackend(name)
			wg.Done()
		}()
	}
	wg.Wait()
}
Пример #11
0
func TestSetCPU(t *testing.T) {
	currentCPU := runtime.GOMAXPROCS(-1)
	maxCPU := runtime.NumCPU()
	for i, test := range []struct {
		input     string
		output    int
		shouldErr bool
	}{
		{"1", 1, false},
		{"-1", currentCPU, true},
		{"0", currentCPU, true},
		{"100%", maxCPU, false},
		{"50%", int(0.5 * float32(maxCPU)), false},
		{"110%", currentCPU, true},
		{"-10%", currentCPU, true},
		{"invalid input", currentCPU, true},
		{"invalid input%", currentCPU, true},
		{"9999", maxCPU, false}, // over available CPU
	} {
		err := setCPU(test.input)
		if test.shouldErr && err == nil {
			t.Errorf("Test %d: Expected error, but there wasn't any", i)
		}
		if !test.shouldErr && err != nil {
			t.Errorf("Test %d: Expected no error, but there was one: %v", i, err)
		}
		if actual, expected := runtime.GOMAXPROCS(-1), test.output; actual != expected {
			t.Errorf("Test %d: GOMAXPROCS was %d but expected %d", i, actual, expected)
		}
		// teardown
		runtime.GOMAXPROCS(currentCPU)
	}
}
Пример #12
0
func TestGoroutineParallelism(t *testing.T) {
	P := 4
	N := 10
	if testing.Short() {
		P = 3
		N = 3
	}
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
	// If runtime triggers a forced GC during this test then it will deadlock,
	// since the goroutines can't be stopped/preempted.
	// Disable GC for this test (see issue #10958).
	defer debug.SetGCPercent(debug.SetGCPercent(-1))
	for try := 0; try < N; try++ {
		done := make(chan bool)
		x := uint32(0)
		for p := 0; p < P; p++ {
			// Test that all P goroutines are scheduled at the same time
			go func(p int) {
				for i := 0; i < 3; i++ {
					expected := uint32(P*i + p)
					for atomic.LoadUint32(&x) != expected {
					}
					atomic.StoreUint32(&x, expected+1)
				}
				done <- true
			}(p)
		}
		for p := 0; p < P; p++ {
			<-done
		}
	}
}
Пример #13
0
func testStorageLockNodeHealingAfterTimeout(storage cluster.Storage, t *testing.T) {
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(100))
	node := cluster.Node{Address: "addr-xyz"}
	defer storage.RemoveNode("addr-xyz")
	err := storage.StoreNode(node)
	assertIsNil(err, t)
	locked, err := storage.LockNodeForHealing("addr-xyz", true, 200*time.Millisecond)
	assertIsNil(err, t)
	locked, err = storage.LockNodeForHealing("addr-xyz", true, 200*time.Millisecond)
	assertIsNil(err, t)
	if locked {
		t.Fatal("Expected LockNodeForHealing to return false before timeout")
	}
	time.Sleep(300 * time.Millisecond)
	successCount := int32(0)
	wg := sync.WaitGroup{}
	wg.Add(50)
	for i := 0; i < 50; i++ {
		go func() {
			defer wg.Done()
			locked, err := storage.LockNodeForHealing("addr-xyz", true, 5*time.Second)
			assertIsNil(err, t)
			if locked {
				atomic.AddInt32(&successCount, 1)
			}
		}()
	}
	wg.Wait()
	if successCount != 1 {
		t.Fatalf("Expected LockNodeForHealing after timeout to lock only once, got: %d", successCount)
	}
}
Пример #14
0
func Primes(limit uint) *bs.BitSlice {
	length := int(limit / 8)
	if limit%8 > 0 {
		length++
	}

	list := make([]byte, uint(length))

	// Initialize for values 2, 3, and 5 already run.
	// Avoids the nasty small loops
	// 0123456789 10 11 12 13 14 15 16 17...
	// 0011010100  0  1  0  1  0  0  0  1
	list[0] = 0x35 // Special case, 2 is prime
	for i := 1; i < length; i += 3 {
		list[i] = 0x14
	}
	for i := 2; i < length; i += 3 {
		list[i] = 0x51
	}
	for i := 3; i < length; i += 3 {
		list[i] = 0x45
	}

	primes := bs.New(limit)
	primes.Arr = list
	runtime.GOMAXPROCS(MAX_CONCURRENT)
	generate(primes, limit)
	runtime.GOMAXPROCS(1)
	return primes
}
Пример #15
0
func BenchmarkSharedWriterReserveOne(b *testing.B) {
	defer time.Sleep(DisruptorCleanup)
	runtime.GOMAXPROCS(2)
	defer runtime.GOMAXPROCS(1)

	controller := disruptor.
		Configure(RingBufferSize).
		WithConsumerGroup(SampleConsumer{}).
		BuildShared()
	controller.Start()
	defer controller.Stop()
	writer := controller.Writer()

	iterations := int64(b.N)
	sequence := disruptor.InitialSequenceValue

	b.ReportAllocs()
	b.ResetTimer()

	for sequence < iterations {
		sequence = writer.Reserve(ReserveOne)
		ringBuffer[sequence&RingBufferMask] = sequence
		writer.Commit(sequence, sequence)
	}

	b.StopTimer()
}
Пример #16
0
func (s *S) TestReserveAppIsSafe(c *gocheck.C) {
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))
	email := "*****@*****.**"
	user := &User{
		Email: email, Password: "******",
		Quota: quota.Quota{Limit: 10, InUse: 0},
	}
	err := user.Create()
	c.Assert(err, gocheck.IsNil)
	conn, err := db.Conn()
	c.Assert(err, gocheck.IsNil)
	defer conn.Close()
	defer conn.Users().Remove(bson.M{"email": user.Email})
	var wg sync.WaitGroup
	for i := 0; i < 24; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			ReserveApp(user)
		}()
	}
	wg.Wait()
	user, err = GetUserByEmail(email)
	c.Assert(err, gocheck.IsNil)
	c.Assert(user.Quota.InUse, gocheck.Equals, 10)
}
Пример #17
0
func BenchmarkSharedWriterReserveMany(b *testing.B) {
	defer time.Sleep(DisruptorCleanup)
	runtime.GOMAXPROCS(2)
	defer runtime.GOMAXPROCS(1)

	controller := disruptor.
		Configure(RingBufferSize).
		WithConsumerGroup(SampleConsumer{}).
		BuildShared()
	controller.Start()
	defer controller.Stop()
	writer := controller.Writer()

	iterations := int64(b.N)

	b.ReportAllocs()
	b.ResetTimer()

	previous, current := disruptor.InitialSequenceValue, disruptor.InitialSequenceValue
	for current < iterations {
		current = writer.Reserve(ReserveMany)

		for i := previous + 1; i <= current; i++ {
			ringBuffer[i&RingBufferMask] = i
		}

		writer.Commit(previous+1, current)
		previous = current
	}

	b.StopTimer()
}
Пример #18
0
func (s *LimiterSuite) TestLimiterAddDoneRace(c *check.C) {
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
	l := s.limiter()
	l.Initialize(100)
	wg := sync.WaitGroup{}
	doneCh := make(chan func(), 100)
	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			doneCh <- l.Start("n1")
		}()
	}
	wg.Wait()
	close(doneCh)
	c.Assert(l.Len("n1"), check.Equals, 100)
	for f := range doneCh {
		wg.Add(1)
		go func(f func()) {
			defer wg.Done()
			f()
		}(f)
	}
	wg.Wait()
	c.Assert(l.Len("n1"), check.Equals, 0)
}
Пример #19
0
func scannerDaemon(rootDirectoryName string, cacheDirectoryName string,
	ctx *fsrateio.FsRateContext, fsChannel chan *FileSystem) {
	if runtime.GOMAXPROCS(0) < 2 {
		runtime.GOMAXPROCS(2)
	}
	runtime.LockOSThread()
	loweredPriority := false
	var oldFS FileSystem
	for {
		fs, err := scanFileSystem(rootDirectoryName, cacheDirectoryName, ctx,
			&oldFS)
		if err != nil {
			fmt.Printf("Error scanning\t%s\n", err)
		} else {
			oldFS.RegularInodeTable = fs.RegularInodeTable
			oldFS.SymlinkInodeTable = fs.SymlinkInodeTable
			oldFS.InodeTable = fs.InodeTable
			fsChannel <- fs
			if !loweredPriority {
				syscall.Setpriority(syscall.PRIO_PROCESS, 0, 10)
				loweredPriority = true
			}
		}
	}
}
Пример #20
0
// Starts and runs the server given its configuration. (This function never returns.)
func RunServer(config *ServerConfig) {
	PrettyPrint = config.Pretty

	if os.Getenv("GOMAXPROCS") == "" && runtime.GOMAXPROCS(0) == 1 {
		cpus := runtime.NumCPU()
		if cpus > 1 {
			runtime.GOMAXPROCS(cpus)
			base.Log("Configured Go to use all %d CPUs; setenv GOMAXPROCS to override this", cpus)
		}
	}

	sc := NewServerContext(config)
	for _, dbConfig := range config.Databases {
		if err := sc.AddDatabaseFromConfig(dbConfig); err != nil {
			base.LogFatal("Error opening database: %v", err)
		}
	}

	base.Log("Starting admin server on %s", *config.AdminInterface)
	go func() {
		if err := http.ListenAndServe(*config.AdminInterface, CreateAdminHandler(sc)); err != nil {
			base.LogFatal("HTTP server failed: %v", err)
		}
	}()

	base.Log("Starting server on %s ...", *config.Interface)
	if err := http.ListenAndServe(*config.Interface, CreatePublicHandler(sc)); err != nil {
		base.LogFatal("HTTP server failed: %v", err)
	}
}
Пример #21
0
// Starts and runs the server given its configuration. (This function never returns.)
func RunServer(config *ServerConfig) {
	PrettyPrint = config.Pretty

	base.Log("==== %s ====", LongVersionString)

	if os.Getenv("GOMAXPROCS") == "" && runtime.GOMAXPROCS(0) == 1 {
		cpus := runtime.NumCPU()
		if cpus > 1 {
			runtime.GOMAXPROCS(cpus)
			base.Log("Configured Go to use all %d CPUs; setenv GOMAXPROCS to override this", cpus)
		}
	}

	setMaxFileDescriptors(config.MaxFileDescriptors)

	sc := NewServerContext(config)
	for _, dbConfig := range config.Databases {
		if _, err := sc.AddDatabaseFromConfig(dbConfig); err != nil {
			base.LogFatal("Error opening database: %v", err)
		}
	}

	if config.ProfileInterface != nil {
		//runtime.MemProfileRate = 10 * 1024
		base.Log("Starting profile server on %s", *config.ProfileInterface)
		go func() {
			http.ListenAndServe(*config.ProfileInterface, nil)
		}()
	}

	base.Log("Starting admin server on %s", *config.AdminInterface)
	go config.serve(*config.AdminInterface, CreateAdminHandler(sc))
	base.Log("Starting server on %s ...", *config.Interface)
	config.serve(*config.Interface, CreatePublicHandler(sc))
}
Пример #22
0
func TestStress(t *testing.T) {
	domains := []string{"www.google.com.", "www.isc.org.", "www.outlook.com.", "miek.nl.", "doesnotexist.miek.nl."}
	l := len(domains)
	max := 8
	procs := runtime.GOMAXPROCS(max)
	wg := new(sync.WaitGroup)
	wg.Add(max)
	u := New()
	defer u.Destroy()
	if err := u.ResolvConf("/etc/resolv.conf"); err != nil {
		return
	}
	for i := 0; i < max; i++ {
		go func() {
			for i := 0; i < 100; i++ {
				d := domains[int(dns.Id())%l]
				r, err := u.Resolve(d, dns.TypeA, dns.ClassINET)
				if err != nil {
					t.Log("failure to resolve: " + d)
					continue
				}
				if !r.HaveData && d != "doesnotexist.miek.nl." {
					t.Log("no data when resolving: " + d)
					continue
				}
			}
			wg.Done()
		}()
	}
	wg.Wait()
	runtime.GOMAXPROCS(procs)
}
Пример #23
0
func TestHammerStoreLoad(t *testing.T) {
	var tests []func(*testing.T, unsafe.Pointer)
	tests = append(tests, hammerStoreLoadInt32, hammerStoreLoadUint32,
		hammerStoreLoadUintptr, hammerStoreLoadPointer)
	if test64err == nil {
		tests = append(tests, hammerStoreLoadInt64, hammerStoreLoadUint64)
	}
	n := int(1e6)
	if testing.Short() {
		n = int(1e4)
	}
	const procs = 8
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs))
	for _, tt := range tests {
		c := make(chan int)
		var val uint64
		for p := 0; p < procs; p++ {
			go func() {
				for i := 0; i < n; i++ {
					tt(t, unsafe.Pointer(&val))
				}
				c <- 1
			}()
		}
		for p := 0; p < procs; p++ {
			<-c
		}
	}
}
Пример #24
0
func TestHammer32(t *testing.T) {
	const p = 4
	n := 100000
	if testing.Short() {
		n = 1000
	}
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))

	for _, tt := range hammer32 {
		if tt.f == nil {
			continue
		}
		c := make(chan int)
		var val uint32
		for i := 0; i < p; i++ {
			go func() {
				tt.f(&val, n)
				c <- 1
			}()
		}
		for i := 0; i < p; i++ {
			<-c
		}
		if val != uint32(n)*p {
			t.Fatalf("%s: val=%d want %d", tt.name, val, n*p)
		}
	}
}
Пример #25
0
func TestRunOnNodesStress(t *testing.T) {
	n := 1000
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(16))
	body := `{"Id":"e90302","Path":"date","Args":[]}`
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		w.Header().Set("Content-Type", "application/json")
		w.Write([]byte(body))
	}))
	defer server.Close()
	cluster, err := New(nil, Node{ID: "server0", Address: server.URL})
	if err != nil {
		t.Fatal(err)
	}
	id := "e90302"
	for i := 0; i < rand.Intn(10)+n; i++ {
		container, err := cluster.InspectContainer(id)
		if err != nil {
			t.Fatal(err)
		}
		if container.ID != id {
			t.Errorf("InspectContainer(%q): Wrong ID. Want %q. Got %q.", id, id, container.ID)
		}
		if container.Path != "date" {
			t.Errorf("InspectContainer(%q): Wrong Path. Want %q. Got %q.", id, "date", container.Path)
		}
	}
}
Пример #26
0
func TestHammer64(t *testing.T) {
	if test64err != nil {
		t.Logf("Skipping 64-bit tests: %v", test64err)
		return
	}
	const p = 4
	n := 100000
	if testing.Short() {
		n = 1000
	}
	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))

	for _, tt := range hammer64 {
		if tt.f == nil {
			continue
		}
		c := make(chan int)
		var val uint64
		for i := 0; i < p; i++ {
			go func() {
				tt.f(&val, n)
				c <- 1
			}()
		}
		for i := 0; i < p; i++ {
			<-c
		}
		if val != uint64(n)*p {
			t.Fatalf("%s: val=%d want %d", tt.name, val, n*p)
		}
	}
}
Пример #27
0
func BenchmarkContention(b *testing.B) {
	b.StopTimer()

	var procs = runtime.NumCPU()
	var origProcs = runtime.GOMAXPROCS(procs)

	var db = NewLogeDB(NewMemStore())
	db.CreateType(NewTypeDef("counters", 1, &TestCounter{}))

	db.Transact(func(t *Transaction) {
		t.Set("counters", "contended", &TestCounter{Value: 0})
	}, 0)

	b.StartTimer()

	var group sync.WaitGroup
	for i := 0; i < procs; i++ {
		group.Add(1)
		go LoopIncrement(db, "contended", &group, b.N)
	}
	group.Wait()

	b.StopTimer()

	db.Transact(func(t *Transaction) {
		var target = b.N * procs
		var counter = t.Read("counters", "contended").(*TestCounter)
		if counter.Value != uint32(target) {
			b.Errorf("Wrong count for counter: %d / %d",
				counter.Value, target)
		}
	}, 0)

	runtime.GOMAXPROCS(origProcs)
}
Пример #28
0
// An internal function but exported because it is cross-package; part of the implementation
// of gotest.
func RunBenchmarks(matchString func(pat, str string) (bool, os.Error), benchmarks []InternalBenchmark) {
	// If no flag was specified, don't run benchmarks.
	if len(*matchBenchmarks) == 0 {
		return
	}
	for _, Benchmark := range benchmarks {
		matched, err := matchString(*matchBenchmarks, Benchmark.Name)
		if err != nil {
			println("invalid regexp for -test.bench:", err.String())
			os.Exit(1)
		}
		if !matched {
			continue
		}
		for _, procs := range cpuList {
			runtime.GOMAXPROCS(procs)
			b := &B{benchmark: Benchmark}
			benchName := Benchmark.Name
			if procs != 1 {
				benchName = fmt.Sprintf("%s-%d", Benchmark.Name, procs)
			}
			print(fmt.Sprintf("%s\t", benchName))
			r := b.run()
			print(fmt.Sprintf("%v\n", r))
			if p := runtime.GOMAXPROCS(-1); p != procs {
				print(fmt.Sprintf("%s left GOMAXPROCS set to %d\n", benchName, p))
			}
		}
	}
}
Пример #29
0
// Test that simultaneous RemoveAll do not report an error.
// As long as it gets removed, we should be happy.
func TestRemoveAllRace(t *testing.T) {
	if runtime.GOOS == "windows" {
		// Windows has very strict rules about things like
		// removing directories while someone else has
		// them open. The racing doesn't work out nicely
		// like it does on Unix.
		t.Skip("skipping on windows")
	}

	n := runtime.GOMAXPROCS(16)
	defer runtime.GOMAXPROCS(n)
	root, err := ioutil.TempDir("", "issue")
	if err != nil {
		t.Fatal(err)
	}
	mkdirTree(t, root, 1, 6)
	hold := make(chan struct{})
	var wg sync.WaitGroup
	for i := 0; i < 4; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			<-hold
			err := RemoveAll(root)
			if err != nil {
				t.Errorf("unexpected error: %T, %q", err, err)
			}
		}()
	}
	close(hold) // let workers race to remove root
	wg.Wait()
}
Пример #30
0
func main() {
	log.SetFlags(0)
	flag.Parse()
	if flag.NArg() == 0 {
		log.Fatal("no input files")
	}

	// start many worker goroutines taking tasks from que
	runtime.GOMAXPROCS(runtime.NumCPU())
	ncpu := runtime.GOMAXPROCS(-1)
	que = make(chan task, ncpu)
	if ncpu == 0 {
		ncpu = 1
	}
	for i := 0; i < ncpu; i++ {
		go work()
	}

	// read all input files and put them in the task que
	for _, fname := range flag.Args() {
		log.Println(fname)
		slice, time, err := data.ReadFile(fname)
		if err != nil {
			log.Println(err)
			continue
		}
		wg.Add(1)
		que <- task{slice, time, util.NoExt(fname)}
	}

	// wait for work to finish
	wg.Wait()
}