Пример #1
0
func TestDumpWithLotsOfMessagesAndLargeBuffer(t *testing.T) {
	var bufferSize uint
	bufferSize = 200
	dump := NewDumpSink("myApp", bufferSize, loggertesthelper.Logger())
	dump.Run()

	for i := 0; i < 1000; i++ {
		logMessage := messagetesthelpers.NewMessage(t, strconv.Itoa(i), "appId")
		dump.Channel() <- logMessage
	}

	runtime.Gosched()

	logMessages := dumpAllMessages(dump, bufferSize)
	assert.Equal(t, len(logMessages), 200)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "800")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "801")

	for i := 1000; i < 2000; i++ {
		logMessage := messagetesthelpers.NewMessage(t, strconv.Itoa(i), "appId")
		dump.Channel() <- logMessage
	}

	runtime.Gosched()

	logMessages = dumpAllMessages(dump, bufferSize)
	assert.Equal(t, len(logMessages), 200)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "1800")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "1801")

	logMessages = dumpAllMessages(dump, bufferSize)
	assert.Equal(t, len(logMessages), 200)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "1800")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "1801")
}
Пример #2
0
func TestConcurrency(t *testing.T) {

	uids := make([]UID, 0)
	results := make(chan UID, MAX)

	// Fire off a number of Goroutines to grab a bunch of UIDs each
	for x := 0; x < LOOPS; x++ {
		go func(results chan UID) {
			for x := 0; x < COUNT_PER_LOOP; x++ {
				results <- <-Next
				runtime.Gosched()
			}
		}(results)
		runtime.Gosched()
	}

	// Wait for results
	for x := 0; x < MAX; x++ {
		uids = append(uids, <-results)
	}

	// Make sure all results are unique
	for x, have := range uids {
		for y, found := range uids {
			if have == found && x != y {
				t.Errorf("Duplicate UID: Cases %d & %d, have %d found %d", x, y, have, found)
			}
		}
	}
}
Пример #3
0
func TestDumpReturnsAllRecentMessagesToMultipleDumpRequestsWithMessagesCloningInInTheMeantime(t *testing.T) {
	var bufferSize uint
	bufferSize = 2

	dump := NewDumpSink("myApp", bufferSize, loggertesthelper.Logger())
	dump.Run()

	logMessage := messagetesthelpers.NewMessage(t, "1", "appId")
	dump.Channel() <- logMessage
	logMessage = messagetesthelpers.NewMessage(t, "2", "appId")
	dump.Channel() <- logMessage
	logMessage = messagetesthelpers.NewMessage(t, "3", "appId")
	dump.Channel() <- logMessage

	runtime.Gosched()

	logMessages := dumpAllMessages(dump, bufferSize)
	assert.Equal(t, len(logMessages), 2)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "2")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "3")

	logMessage = messagetesthelpers.NewMessage(t, "4", "appId")
	dump.Channel() <- logMessage

	runtime.Gosched()

	logMessages = dumpAllMessages(dump, bufferSize)
	assert.Equal(t, len(logMessages), 2)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "3")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "4")
}
Пример #4
0
func main() {
	u, err := user.Current()
	if err != nil {
		fmt.Fprintln(os.Stderr, err)
		// Let the test pass.
		os.Exit(0)
	}

	var wg sync.WaitGroup
	for i := 0; i < 20; i++ {
		wg.Add(2)
		go func() {
			defer wg.Done()
			for i := 0; i < 1000; i++ {
				user.Lookup(u.Username)
				runtime.Gosched()
			}
		}()
		go func() {
			defer wg.Done()
			for i := 0; i < 1000; i++ {
				p := C.malloc(C.size_t(len(u.Username) + 1))
				runtime.Gosched()
				C.free(p)
			}
		}()
	}
	wg.Wait()
}
Пример #5
0
func TestDumpWithLotsOfMessagesAndLargeBuffer(t *testing.T) {
	dump := NewDumpSink("myApp", 200, loggertesthelper.Logger(), make(chan Sink, 1), time.Second)

	go dump.Run()

	for i := 0; i < 1000; i++ {
		logMessage := messagetesthelpers.NewMessage(t, strconv.Itoa(i), "appId")
		dump.Channel() <- logMessage
	}

	runtime.Gosched()

	logMessages := dump.Dump()
	assert.Equal(t, len(logMessages), 200)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "800")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "801")

	for i := 1000; i < 2000; i++ {
		logMessage := messagetesthelpers.NewMessage(t, strconv.Itoa(i), "appId")
		dump.Channel() <- logMessage
	}

	runtime.Gosched()

	logMessages = dump.Dump()
	assert.Equal(t, len(logMessages), 200)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "1800")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "1801")

	logMessages = dump.Dump()
	assert.Equal(t, len(logMessages), 200)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "1800")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "1801")
}
Пример #6
0
// 数据转储输出
func (self *Collector) Manage() {
	// reporter.Log.Println("**************开启输出管道************")

	// 标记开始,令self.Ctrl长度不为零
	self.CtrlW()

	// 开启文件输出协程
	go self.SaveFile()

	// 只有当收到退出通知并且通道内无数据时,才退出循环
	for !(self.CtrlLen() == 0 && len(self.DataChan) == 0) {
		// reporter.Log.Println("**************断点 8 ***********")
		select {
		case data := <-self.DataChan:
			self.dockerOne(data)

		default:
			// time.Sleep(1e7) //0.1秒
			runtime.Gosched()
		}
	}

	// 将剩余收集到但未输出的数据输出
	self.goOutput(self.Curr)

	// 等待所有输出完成
	for (self.outCount[0] > self.outCount[1]) || (self.outCount[2] > self.outCount[3]) || len(self.FileChan) > 0 {
		// time.Sleep(5e8)
		runtime.Gosched()
	}

	// 返回报告
	self.Report()
}
Пример #7
0
func TestAddPort(t *testing.T) {
	l := NewListener()
	gcnt := runtime.NumGoroutine()
	l.AddPort(56561)
	if 1 != len(l.ports) {
		t.Errorf("Length of ports array should be 1, got %d", len(l.ports))
	}
	if runtime.Gosched(); gcnt >= runtime.NumGoroutine() {
		t.Errorf("Expected more than %d goroutines after AddPort, %d running", gcnt, runtime.NumGoroutine())
	}
	if listener, ok := l.ports[56561]; ok {
		if listener == nil {
			t.Errorf("Port listener should not be nil")
		}
	} else {
		t.Errorf("Listener should have entry for port 56561, got %v", l.ports)
	}
	gcnt = runtime.NumGoroutine()
	l.Close()
	if 0 != len(l.ports) {
		t.Errorf("After Close(), ports should have 0 entries, got %d", len(l.ports))
	}
	if runtime.Gosched(); gcnt <= runtime.NumGoroutine() {
		t.Errorf("Expected fewer than %d goroutines after Close(), %d running", gcnt, runtime.NumGoroutine())
	}
}
Пример #8
0
func (tcp *TcpSer) listenAction() {

	defer func() {
		if err := recover(); err != nil {
			debug.PrintStack()
			glog.Error("handleInput error(%v)", err)
		} else {
		}
	}()

	for {
		conn, err := tcp.listener.Accept()

		if err != nil {
			if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
				log.Printf("NOTICE: temporary Accept() failure - %s", err.Error())
				runtime.Gosched()
				continue
			}
			// theres no direct way to detect this error because it is not exposed
			if !strings.Contains(err.Error(), "use of closed network connection") {
				log.Printf("ERROR: listener.Accept() - %s", err.Error())
			}
			break
		}

		if tcp.ConnCount+1 == tcp.MaxConn {
			glog.Warningf("Too many connections! Active Denial %s\n", conn.RemoteAddr().String())
			conn.Close()
			runtime.Gosched()
			continue
		}
		go tcp.createNewItem(tcp, conn)
	}
}
Пример #9
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU()) // 모든 CPU 사용

	var data = []int{}
	var mutex = new(sync.Mutex)

	go func() { // 고루틴에서
		for i := 0; i < 1000; i++ { // 1000번 반복하면서
			mutex.Lock()           // 뮤텍스 잠금, data 슬라이스 보호 시작
			data = append(data, 1) // data 슬라이스에 1을 추가
			mutex.Unlock()         // 뮤텍스 잠금 해제, data 슬라이스 보호 종료

			runtime.Gosched() // 다른 고루틴이 CPU를 사용할 수 있도록 양보
		}
	}()

	go func() { // 고루틴에서
		for i := 0; i < 1000; i++ { // 1000번 반복하면서
			mutex.Lock()           // 뮤텍스 잠금, data 슬라이스 보호 시작
			data = append(data, 1) // data 슬라이스에 1을 추가
			mutex.Unlock()         // 뮤텍스 잠금 해제, data 슬라이스 보호 종료

			runtime.Gosched() // 다른 고루틴이 CPU를 사용할 수 있도록 양보
		}
	}()

	time.Sleep(2 * time.Second) // 2초 대기

	fmt.Println(len(data)) // data 슬라이스의 길이 출력
}
Пример #10
0
func sleep() {
	<-ticker;
	<-ticker;
	runtime.Gosched();
	runtime.Gosched();
	runtime.Gosched();
}
Пример #11
0
func TestDumpWithLotsOfMessages(t *testing.T) {
	dump := NewDumpSink("myApp", 2, loggertesthelper.Logger())

	for i := 0; i < 100; i++ {
		logMessage := messagetesthelpers.NewMessage(t, strconv.Itoa(i), "appId")
		dump.Channel() <- logMessage
	}

	runtime.Gosched()

	logMessages := dumpAllMessages(dump)
	assert.Equal(t, len(logMessages), 2)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "98")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "99")

	for i := 100; i < 200; i++ {
		logMessage := messagetesthelpers.NewMessage(t, strconv.Itoa(i), "appId")
		dump.Channel() <- logMessage
	}

	runtime.Gosched()

	logMessages = dumpAllMessages(dump)
	assert.Equal(t, len(logMessages), 2)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "198")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "199")

	logMessages = dumpAllMessages(dump)
	assert.Equal(t, len(logMessages), 2)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "198")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "199")
}
Пример #12
0
// 数据转储输出
func (self *Collector) Manage() {
	// 标记开始,令self.Ctrl长度不为零
	self.CtrlW()

	// 开启文件输出协程
	go self.SaveFile()

	// 只有当收到退出通知并且通道内无数据时,才退出循环
	for !(self.CtrlLen() == 0 && len(self.DataChan) == 0) {
		select {
		case data := <-self.DataChan:
			self.Dockers[self.Curr] = append(self.Dockers[self.Curr], data)
			// 检查是否更换缓存块
			if len(self.Dockers[self.Curr]) >= cache.Task.DockerCap {
				// curDocker存满后输出
				self.goOutput(self.Curr)
				// 更换一个空Docker用于curDocker
				self.DockerQueue.Change()
			}
		default:
			runtime.Gosched()
		}
	}

	// 将剩余收集到但未输出的数据输出
	self.goOutput(self.Curr)

	// 等待所有输出完成
	for (self.outCount[0] > self.outCount[1]) || (self.outCount[2] > self.outCount[3]) || len(self.FileChan) > 0 {
		runtime.Gosched()
	}

	// 返回报告
	self.Report()
}
Пример #13
0
// 添加请求到队列,并发安全
func (self *Matrix) Push(req *request.Request) {
	if sdl.checkStatus(status.STOP) {
		return
	}

	// 禁止并发,降低请求积存量
	self.Lock()
	defer self.Unlock()

	// 达到请求上限,停止该规则运行
	if self.maxPage >= 0 {
		return
	}

	// 暂停状态时等待,降低请求积存量
	waited := false
	for sdl.checkStatus(status.PAUSE) {
		waited = true
		runtime.Gosched()
	}
	if waited && sdl.checkStatus(status.STOP) {
		return
	}

	// 资源使用过多时等待,降低请求积存量
	waited = false
	for self.resCount > sdl.avgRes() {
		waited = true
		runtime.Gosched()
	}
	if waited && sdl.checkStatus(status.STOP) {
		return
	}

	// 不可重复下载的req
	if !req.IsReloadable() {
		hash := makeUnique(req)
		// 已存在成功记录时退出
		if self.hasHistory(hash) {
			return
		}
		// 添加到临时记录
		self.insertTempHistory(hash)
	}

	var priority = req.GetPriority()

	// 初始化该蜘蛛下该优先级队列
	if _, found := self.reqs[priority]; !found {
		self.priorities = append(self.priorities, priority)
		sort.Ints(self.priorities) // 从小到大排序
		self.reqs[priority] = []*request.Request{}
	}

	// 添加请求到队列
	self.reqs[priority] = append(self.reqs[priority], req)

	// 大致限制加入队列的请求量,并发情况下应该会比maxPage多
	atomic.AddInt64(&self.maxPage, 1)
}
Пример #14
0
func TestOutputChannel(t *testing.T) {
	o := &testOutput{}
	oc := NewOutputChannel(o)
	defer oc.Close()
	oc.input <- F{"foo": "bar"}
	assert.Nil(t, o.last)
	runtime.Gosched()
	assert.Equal(t, F{"foo": "bar"}, F(o.last))

	// Trigger error path
	buf := bytes.NewBuffer(nil)
	oldCritialLogger := critialLogger
	defer func() { critialLogger = oldCritialLogger }()
	critialLogger = func(v ...interface{}) {
		fmt.Fprint(buf, v...)
	}
	o.err = errors.New("some error")
	oc.input <- F{"foo": "bar"}
	// Wait for log output to go through
	runtime.Gosched()
	for i := 0; i < 10 && buf.Len() == 0; i++ {
		time.Sleep(10 * time.Millisecond)
	}
	assert.Contains(t, buf.String(), "cannot write log message: some error")
}
Пример #15
0
func TestClosePort(t *testing.T) {
	l := NewListener()
	l.AddPort(56561)
	gcnt := runtime.Goroutines()
	l.ClosePort(56561)
	// ClosePort is not synchronized, so give it some time (on mac, dialog pops up)
	for i := 0; i < 100 && 0 != len(l.ports); i++ {
		time.Sleep(1e6)
	}
	if runtime.Gosched(); 0 != len(l.ports) {
		t.Errorf("After ClosePort(), ports should have 0 entries, got %d", len(l.ports))
	}
	if runtime.Gosched(); gcnt <= runtime.Goroutines() {
		t.Errorf("Expected fewer than %d goroutines after ClosePort(), %d running", gcnt,
			runtime.Goroutines())
	}
	l.Close()
	if 0 != len(l.ports) {
		t.Errorf("After Close(), ports should have 0 entries, got %d", len(l.ports))
	}
	if runtime.Gosched(); gcnt <= runtime.Goroutines() {
		t.Errorf("Expected fewer than %d goroutines after Close(), %d running", gcnt,
			runtime.Goroutines())
	}
}
Пример #16
0
func BenchmarkContention_2G_2P(b *testing.B) {
	runtime.GOMAXPROCS(2)
	var lock IntObject = 34
	var owner1 IntObject = 33
	var owner2 IntObject = 35

	m := NewLockManager()

	c := make(chan bool)
	go func() {
		for n := 0; n < b.N; n++ {
			m.Acquire(owner1, lock, EXCLUSIVE, ManualDuration, 0)
			runtime.Gosched()
			m.Release(owner1, lock, false)
			runtime.Gosched()
		}
		c <- true
	}()
	for n := 0; n < b.N; n++ {
		m.Acquire(owner2, lock, EXCLUSIVE, ManualDuration, 0)
		runtime.Gosched()
		m.Release(owner2, lock, false)
		runtime.Gosched()
	}
	<-c
}
Пример #17
0
func main() {

	// state will be a map
	var state = make(map[int]int)

	// mutex will synchronize across to state
	var mutex = &sync.Mutex{}

	// ops will count how many operations we perform against the state
	var ops int64 = 0

	// start 100 goroutines to execute repeated reads against the state
	for r := 0; r < 100; r++ {
		go func() {
			total := 0
			// for each read pick a key to access
			// Lock() the mutex to ensure exclusive access to state
			// read value at chosen key
			// Unlock() mutex
			// increment ops count
			for {
				key := rand.Intn(5)
				mutex.Lock()
				total += state[key]
				mutex.Unlock()
				atomic.AddInt64(&ops, 1)
				// explicitly yield after each operation
				// to ensure goroutine doesn't starve scheduler
				runtime.Gosched()
			}
		}()
	}

	// start 10 goroutines to simulate writes
	for w := 0; w < 10; w++ {
		go func() {
			for {
				key := rand.Intn(5)
				val := rand.Intn(100)
				mutex.Lock()
				state[key] = val
				mutex.Unlock()
				atomic.AddInt64(&ops, 1)
				runtime.Gosched()
			}
		}()
	}

	// allow 10 goroutines to work on state and mutex for 1 second
	time.Sleep(time.Second)

	// take and report final ops count
	opsFinal := atomic.LoadInt64(&ops)
	fmt.Println("ops:", opsFinal)

	// final lock state, show ending point
	mutex.Lock()
	fmt.Println("state:", state)
	mutex.Unlock()
}
Пример #18
0
func (t *lockstepBusySim) loop(e *entity) {
	lastnotify := t.notify
	t.wg.Done()
	runtime.Gosched()
	for {
		notify := atomic.LoadUint32(&t.notify)
		backoff := 0
		for notify == lastnotify {
			if backoff < 5 {
				runtime.Gosched()
			} else {
				time.Sleep(1000)
			}
			backoff++
			notify = atomic.LoadUint32(&t.notify)
		}
		lastnotify = notify
		fn := t.state
		if fn == nil {
			t.wg.Done()
			break
		}
		fn(e)
		t.wg.Done()
	}
}
Пример #19
0
func main() {
	runtime.GOMAXPROCS(1)
	wg := new(sync.WaitGroup)
	wg.Add(2)

	go func() {
		defer wg.Done()

		for i := 0; i < 6; i++ {
			fmt.Println(i)
			if i == 3 {
				runtime.Gosched()
			}
		}
	}()

	go func() {
		defer wg.Done()
		for i := 0; i < 10; i++ {
			if i == 3 {
				runtime.Gosched()
			}
			fmt.Println("Hell0 World!")
		}
	}()

	wg.Wait()
}
Пример #20
0
func TestDumpReturnsAllRecentMessagesToMultipleDumpRequestsWithMessagesCloningInInTheMeantime(t *testing.T) {
	dump := NewDumpSink("myApp", 2, loggertesthelper.Logger(), make(chan Sink, 1), time.Second)

	go dump.Run()

	logMessage := messagetesthelpers.NewMessage(t, "1", "appId")
	dump.Channel() <- logMessage
	logMessage = messagetesthelpers.NewMessage(t, "2", "appId")
	dump.Channel() <- logMessage
	logMessage = messagetesthelpers.NewMessage(t, "3", "appId")
	dump.Channel() <- logMessage

	runtime.Gosched()

	logMessages := dump.Dump()
	assert.Equal(t, len(logMessages), 2)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "2")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "3")

	logMessage = messagetesthelpers.NewMessage(t, "4", "appId")
	dump.Channel() <- logMessage

	runtime.Gosched()

	logMessages = dump.Dump()
	assert.Equal(t, len(logMessages), 2)
	assert.Equal(t, string(logMessages[0].GetLogMessage().GetMessage()), "3")
	assert.Equal(t, string(logMessages[1].GetLogMessage().GetMessage()), "4")
}
Пример #21
0
func sleep() {
	<-ticker
	<-ticker
	runtime.Gosched()
	runtime.Gosched()
	runtime.Gosched()
}
Пример #22
0
func handleSid_Auth_Info(bot *Bot, bncs *BncsPacket) {
	logonType := bncs.ReadDword()
	serverToken := bncs.ReadDword()
	bncs.ReadDword() // Udp value
	mpqFiletime := make([]int, 2)
	mpqFiletime[0] = bncs.ReadDword()
	mpqFiletime[1] = bncs.ReadDword()
	mpqFilename := bncs.ReadString()
	valueString := bncs.ReadByteArray()

	switch logonType {
	case 0x00:
		log.Printf("[%s] Logon type: Broken Sha-1\n", bot.ProfileName)
	case 0x01:
		log.Printf("[%s] Logon type: Nls version 1\n", bot.ProfileName)
	case 0x02:
		log.Printf("[%s] Logon type: Nls version 2\n", bot.ProfileName)
	default:
		log.Printf("[%s] Logon type: unknown [0x%x]\n", bot.ProfileName, logonType)
	}

	SendBnls_Cdkey(bot, serverToken, bot.Config.Cdkey)
	for bot.CdkeyData == nil {
		runtime.Gosched()
	}
	bot.CdkeyData.ServerToken = serverToken

	SendBnls_VersionCheckEx2(bot, mpqFiletime, mpqFilename, valueString)
	for bot.ExeInfo == nil {
		runtime.Gosched()
	}

	SendSid_Auth_Check(bot)
}
Пример #23
0
func main() {
	const N = 10000
	st := new(runtime.MemStats)
	memstats := new(runtime.MemStats)
	runtime.ReadMemStats(st)
	for i := 0; i < N; i++ {
		c := make(chan int, 10)
		_ = c
		if i%100 == 0 {
			for j := 0; j < 4; j++ {
				runtime.GC()
				runtime.Gosched()
				runtime.GC()
				runtime.Gosched()
			}
		}
	}

	runtime.ReadMemStats(memstats)
	obj := memstats.HeapObjects - st.HeapObjects
	if obj > N/5 {
		fmt.Println("too many objects left:", obj)
		os.Exit(1)
	}
}
Пример #24
0
// Test for deadlock where a query fails while another one is queued
func TestConnExecDeadlock(t *testing.T) {
	l, _ := newTestListenerConn(t)
	defer l.Close()

	var wg sync.WaitGroup
	wg.Add(2)

	go func() {
		l.ExecSimpleQuery("SELECT pg_sleep(60)")
		wg.Done()
	}()
	runtime.Gosched()
	go func() {
		l.ExecSimpleQuery("SELECT 1")
		wg.Done()
	}()
	// give the two goroutines some time to get into position
	runtime.Gosched()
	// calls Close on the net.Conn; equivalent to a network failure
	l.Close()

	var done int32 = 0
	go func() {
		time.Sleep(10 * time.Second)
		if atomic.LoadInt32(&done) != 1 {
			panic("timed out")
		}
	}()
	wg.Wait()
	atomic.StoreInt32(&done, 1)
}
Пример #25
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU()) // 모든 CPU 사용

	var data = []int{} // int형 슬라이스 생성

	go func() { // 고루틴에서
		for i := 0; i < 1000; i++ { // 1000번 반복하면서
			data = append(data, 1) // data 슬라이스에 1을 추가

			runtime.Gosched() // 다른 고루틴이 CPU를 사용할 수 있도록 양보
		}
	}()

	go func() { // 고루틴에서
		for i := 0; i < 1000; i++ { // 1000번 반복하면서
			data = append(data, 1) // data 슬라이스에 1을 추가

			runtime.Gosched() // 다른 고루틴이 CPU를 사용할 수 있도록 양보
		}
	}()

	time.Sleep(2 * time.Second) // 2초 대기

	fmt.Println(len(data)) // data 슬라이스의 길이 출력
}
Пример #26
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	var mutex = new(sync.Mutex)
	var data = []int{}

	go func() {
		for i := 0; i < 1000; i++ {
			mutex.Lock()
			data = append(data, 1)
			mutex.Unlock()

			runtime.Gosched()
		}
	}()

	go func() {
		for i := 0; i < 1000; i++ {
			mutex.Lock()
			data = append(data, 1)
			mutex.Unlock()

			runtime.Gosched()
		}
	}()

	time.Sleep(2 * time.Second)
	fmt.Println(len(data))
}
Пример #27
0
// wait for outstanding tests to finish
func
wait()
{
	runtime.Gosched();
	for nproc != 0 {
		runtime.Gosched();
	}
}
Пример #28
0
func wait(buf *bytes.Buffer) {
	runtime.Gosched()
	tickC <- time.Now()
	runtime.Gosched()
	for i := 0; i < 10 && buf.Len() == 0; i++ {
		time.Sleep(10 * time.Millisecond)
	}
}
Пример #29
0
func TestManyForget(t *testing.T) {
	runtime.GOMAXPROCS(4)

	const npaxos = 3
	var pxa []*Paxos = make([]*Paxos, npaxos)
	var pxh []string = make([]string, npaxos)
	defer cleanup(pxa)

	for i := 0; i < npaxos; i++ {
		pxh[i] = port("manygc", i)
	}
	for i := 0; i < npaxos; i++ {
		pxa[i] = Make(pxh, i, nil)
	}

	fmt.Printf("Test: Lots of forgetting ...\n")

	const maxseq = 30
	done := false

	go func() {
		for done == false {
			seq := (rand.Int() % maxseq)
			i := (rand.Int() % npaxos)
			v := rand.Int()
			pxa[i].Start(seq, v)
			runtime.Gosched()
		}
	}()

	go func() {
		for done == false {
			seq := (rand.Int() % maxseq)
			i := (rand.Int() % npaxos)
			if seq >= pxa[i].Min() {
				decided, _ := pxa[i].Status(seq)
				if decided {
					pxa[i].Done(seq)
				}
			}
			runtime.Gosched()
		}
	}()

	time.Sleep(5 * time.Second)
	done = true
	time.Sleep(1 * time.Second)

	for seq := 0; seq < maxseq; seq++ {
		for i := 0; i < npaxos; i++ {
			if seq >= pxa[i].Min() {
				pxa[i].Status(seq)
			}
		}
	}

	fmt.Printf("  ... Passed\n")
}
Пример #30
0
func TestRegistryEvents(t *testing.T) {
	mdc := newMockClient()
	setupStubs(mdc, func() {
		registry, _ := docker.NewRegistry(10 * time.Second)
		defer registry.Stop()
		runtime.Gosched()

		check := func(want []docker.Container) {
			test.Poll(t, 100*time.Millisecond, want, func() interface{} {
				return allContainers(registry)
			})
		}

		{
			mdc.Lock()
			mdc.apiContainers = []client.APIContainers{apiContainer1, apiContainer2}
			mdc.containers["wiff"] = container2
			mdc.Unlock()
			mdc.send(&client.APIEvents{Status: docker.StartEvent, ID: "wiff"})
			runtime.Gosched()

			want := []docker.Container{&mockContainer{container1}, &mockContainer{container2}}
			check(want)
		}

		{
			mdc.Lock()
			mdc.apiContainers = []client.APIContainers{apiContainer1}
			delete(mdc.containers, "wiff")
			mdc.Unlock()
			mdc.send(&client.APIEvents{Status: docker.DieEvent, ID: "wiff"})
			runtime.Gosched()

			want := []docker.Container{&mockContainer{container1}}
			check(want)
		}

		{
			mdc.Lock()
			mdc.apiContainers = []client.APIContainers{}
			delete(mdc.containers, "ping")
			mdc.Unlock()
			mdc.send(&client.APIEvents{Status: docker.DieEvent, ID: "ping"})
			runtime.Gosched()

			want := []docker.Container{}
			check(want)
		}

		{
			mdc.send(&client.APIEvents{Status: docker.DieEvent, ID: "doesntexist"})
			runtime.Gosched()

			want := []docker.Container{}
			check(want)
		}
	})
}