示例#1
1
func TestNoRaceCond(t *testing.T) { // tsan's test02
	ch := make(chan bool, 1)
	var x int = 0
	var mu sync.Mutex
	var cond *sync.Cond = sync.NewCond(&mu)
	var condition int = 0
	var waker func()
	waker = func() {
		x = 1
		mu.Lock()
		condition = 1
		cond.Signal()
		mu.Unlock()
	}

	var waiter func()
	waiter = func() {
		go waker()
		cond.L.Lock()
		for condition != 1 {
			cond.Wait()
		}
		cond.L.Unlock()
		x = 2
		ch <- true
	}
	go waiter()
	<-ch
}
示例#2
0
文件: main.go 项目: dnesting/alife
func printLoop(ch <-chan []grid2d.Update, g grid2d.Grid, cns *census.DirCensus, cond *sync.Cond, numUpdates *int64, clearScreen bool) {
	// Try to keep rendering smooth.
	runtime.LockOSThread()

	for _ = range ch {
		if clearScreen {
			// TODO(dnesting): use termcap/terminfo to do this more portably.
			fmt.Print("")
		}
		term.PrintWorld(os.Stdout, g)
		fmt.Println()
		if clearScreen {
			fmt.Print("")
		}

		// Write some summary stats after the rendering.
		fmt.Printf("%d updates\n", atomic.LoadInt64(numUpdates))
		fmt.Printf("%d/%d orgs (%d/%d species, %d recorded)\n", cns.Count(), cns.CountAllTime(), cns.Distinct(), cns.DistinctAllTime(), cns.NumRecorded())
		if loc := g.Get(0, 0); loc != nil {
			fmt.Printf("random: %v\n", loc.Value())
		}

		// If we're running with --sync, signal to any goroutines waiting on a rendering that
		// it's OK for them to continue again.
		if cond != nil {
			cond.Broadcast()
		}
	}
}
示例#3
0
文件: commands.go 项目: cfstras/pcm
// Returns a function that will return any commands, or
// nil when no other commands are defined. It will also signal the passed
// Condition when if are no commands left and the Condition is not nil.
func GetCommandFunc(c *types.Connection, startWait *sync.Cond, moreCommands CommandFunc) CommandFunc {
	commands := make(chan string, 5) // command1-5
	//if c.Options.PostCommands { // official PCM ignores this flag... yay.
	for _, v := range []string{
		c.Commands.Command1, c.Commands.Command2, c.Commands.Command3,
		c.Commands.Command4, c.Commands.Command5} {
		if strings.TrimSpace(v) != "" {
			commands <- v
		}
	}
	//}
	return func() *string {
		select {
		case v := <-commands:
			return &v
		default:
			if c := moreCommands(); c != nil {
				return c
			} else if startWait != nil {
				startWait.Broadcast()
			}
			return nil
		}
	}
}
示例#4
0
文件: timeline.go 项目: pgm/gospoke
func (t *RealTimer) SleepUntil(cond *sync.Cond, timestamp time.Time) {
	now := t.Now()
	delay := timestamp.Sub(now)
	if delay > 0 {
		tt := time.AfterFunc(time.Duration(delay), func() { cond.Broadcast() })
		t.Sleep(cond)
		tt.Stop()
	}
}
示例#5
0
func main() {
	var mtx sync.Mutex
	var cnd *sync.Cond
	var cnds [N]*sync.Cond
	var mtxs [N]sync.Mutex
	cnd = sync.NewCond(&mtx)
	for i := 0; i < N; i++ {
		cnds[i] = sync.NewCond(&mtxs[i])
	}
	for i := 0; i < N; i++ {
		go func(me int, m *sync.Mutex, c1 *sync.Cond, c2 *sync.Cond) {
			fmt.Printf("Hello, world. %d\n", me)
			if me == 0 {
				cnd.Signal()
			}
			for j := 0; j < 10000000; j++ {
				m.Lock()
				c1.Wait()
				m.Unlock()
				c2.Signal()
			}
			if me == N-1 {
				cnd.Signal()
			}
		}(i, &mtxs[i], cnds[i], cnds[(i+1)%N])
	}
	mtx.Lock()
	cnd.Wait()
	mtx.Unlock()
	cnds[0].Signal()
	mtx.Lock()
	cnd.Wait()
	mtx.Unlock()
}
示例#6
0
文件: inproc.go 项目: skycoin/skycoin
func newCondDeadline(cond *sync.Cond) (ret *condDeadline) {
	ret = &condDeadline{
		timer: time.AfterFunc(math.MaxInt64, func() {
			mu.Lock()
			ret._exceeded = true
			mu.Unlock()
			cond.Broadcast()
		}),
	}
	ret.setDeadline(time.Time{})
	return
}
示例#7
0
文件: net.go 项目: jappre/mutation
// Schedule configures the timer to call c.Broadcast() when the timer expires.
// It returns true if the caller should defer a call to t.Stop().
func (t *timer) Schedule(c *sync.Cond) bool {
	if t.now != 0 {
		if t.Timer == nil {
			t.Timer = time.AfterFunc(time.Duration(t.rem), func() {
				// c.L must be held to guarantee that the caller is waiting
				c.L.Lock()
				defer c.L.Unlock()
				c.Broadcast()
			})
			return true
		}
		t.Reset(time.Duration(t.rem))
	}
	return false
}
示例#8
0
func monitorChannel(channelID string, api *slack.Client, messageChan <-chan *slack.MessageEvent, msgQueue *utility.Queue, monitorSize, doLogging bool, histCond *sync.Cond, threadWait *sync.WaitGroup) {
	defer threadWait.Done()
	defer logging.Log.Noticef("(%s) Finished monitoring channel", channelID)

	logging.Log.Infof("(%s) Waiting for history", channelID)
	histCond.L.Lock()
	histCond.Wait()
	histCond.L.Unlock()

	logging.Log.Debugf("(%s) Message queue has %v items", channelID, msgQueue.Len())

	logging.Log.Infof("(%s) Monitor size: %v", channelID, monitorSize)
	logging.Log.Infof("(%s) Do logging: %v", channelID, doLogging)

	logging.Log.Infof("(%s) Waiting for events", channelID)
monitorLoop:
	for {
		select {
		case message, chanOpen := <-messageChan:
			if !chanOpen {
				logging.Log.Errorf("(%s) Incoming message channel is closed", channelID)
				break monitorLoop
			}
			/*
				logging.Log.Debugf("(%s) Received message", channelID)
				logging.Log.Debugf("\tUser: %s", message.User)
				logging.Log.Debugf("\tChannel: %s", message.Channel)
				logging.Log.Debugf("\tTimestamp: %s", message.Timestamp)
				logging.Log.Debugf("\tText: %s", message.Text)
				logging.Log.Debugf("\tSubtype: %s", message.SubType)
			*/

			msgQueue.Push(message.Timestamp)
			toDelete := msgQueue.Poll().(string)
			//logging.Log.Debugf("(%s) Adding to queue: %s; Removing from queue: %s", channelID, message.Timestamp, toDelete)
			api.DeleteMessage(channelID, toDelete)

		}
	}

	return
}
示例#9
0
//TODO return after sending request immediately
func post(url string, payload io.Reader, cond *sync.Cond, ratio int, ready chan bool) {
	cond.L.Lock()
	ready <- true
	cond.Wait()
	cond.L.Unlock()

	log.Printf("++post: %s", url)

	u := url

	if !strings.HasPrefix(u, "http://") {
		u = "http://" + u
	}

	if !strings.HasSuffix(u, "/") {
		u = u + "/"
	}

	u = u + "download"

	resp, err := http.Post(u, "application/json", payload)
	if err != nil {
		log.Printf("post to %s error: %s", url, err.Error())
		return
	}

	if resp.StatusCode != 200 {
		log.Printf("post to %s error, status code %d", url, resp.StatusCode)
		return
	}

	log.Printf("--post: %s", url)

	for i := 0; i < ratio; i++ {
		cond.L.Lock()
		cond.Signal()
		cond.L.Unlock()
	}
}
示例#10
0
func finishWaiting(cond *sync.Cond, waitFinished <-chan struct{}) {
	runtime.Gosched()
	select {
	// avoid creating a timer if we can help it...
	case <-waitFinished:
		return
	default:
		const spinTimeout = 100 * time.Millisecond
		t := time.NewTimer(spinTimeout)
		defer t.Stop()
		for {
			runtime.Gosched()
			cond.Broadcast()
			select {
			case <-waitFinished:
				return
			case <-t.C:
				t.Reset(spinTimeout)
			}
		}
	}
}
示例#11
0
func sendRequest(numReq int64, st *stats, ch chan bool, cd *sync.Cond) {
	cd.L.Lock()
	for status == false {
		cd.Wait()
	}
	resTime := []int64{}
	totReq := numReq
	for numReq > 0 {
		start := time.Now()
		resp, err := http.Get(queryuri)
		end := int64(time.Since(start))
		resTime = append(resTime, end)
		if err != nil {
			fmt.Println("Error is err", err, "response is ", resp, "restime is", resTime)
			st.failures++
		}
		ioutil.ReadAll(resp.Body)
		resp.Body.Close()
		numReq--
	}
	//time.Sleep(1*time.Second)
	var tot, max, min int64
	for _, val := range resTime {
		tot += val
		if val > max {
			max = val
		}
		if val < min {
			min = val
		}
	}
	st.totalTime = tot
	st.avgTime = tot / totReq
	st.maxTime = max
	st.minTime = min
	cd.L.Unlock()
	ch <- true
}
示例#12
0
func TestRaceCond(t *testing.T) { // tsan's test50
	ch := make(chan bool, 2)

	var x int = 0
	var mu sync.Mutex
	var condition int = 0
	var cond *sync.Cond = sync.NewCond(&mu)

	var waker func() = func() {
		<-time.After(1e5)
		x = 1
		mu.Lock()
		condition = 1
		cond.Signal()
		mu.Unlock()
		<-time.After(1e5)
		mu.Lock()
		x = 3
		mu.Unlock()
		ch <- true
	}

	var waiter func() = func() {
		mu.Lock()
		for condition != 1 {
			cond.Wait()
		}
		mu.Unlock()
		x = 2
		ch <- true
	}
	x = 0
	go waker()
	go waiter()
	<-ch
	<-ch
}
func (tasks Tasks) RunTasksWithTimeout(stopTime time.Time) {
	finishedTasks := 0
	waitCond := new(sync.Cond)
	waitCond.L = new(sync.Mutex)

	done := make(chan bool, 1)
	current_running := 0
	for _, task := range tasks {

		waitCond.L.Lock()
		for current_running >= numWorkers {
			waitCond.Wait()
		}
		current_running++
		waitCond.L.Unlock()

		go func(task *Task) {
			todo := len(tasks) - finishedTasks - (numWorkers - 1)
			if todo < 1 {
				todo = 1
			}
			duration := stopTime.Sub(time.Now()) / time.Duration(todo)
			dprint(duration)
			task.stopTime = time.Now().Add(duration)
			go task.Run()
			<-task.ch
			if task.timed_out {
				dprint("Timeout occured.")
			} else {
				dprint("Finished normally.")
			}

			waitCond.L.Lock()
			current_running--
			finishedTasks++
			if finishedTasks == len(tasks) {
				done <- true
			}
			waitCond.Signal()
			waitCond.L.Unlock()
		}(task)
	}
	<-done
}
示例#14
0
文件: issue9110.go 项目: Xiahl1990/go
func main() {
	runtime.GOMAXPROCS(1)
	debug.SetGCPercent(1000000) // only GC when we ask for GC

	var stats, stats1, stats2 runtime.MemStats

	release := func() {}
	for i := 0; i < 20; i++ {
		if i == 10 {
			// Should be warmed up by now.
			runtime.ReadMemStats(&stats1)
		}

		c := make(chan int)
		for i := 0; i < 10; i++ {
			go func() {
				select {
				case <-c:
				case <-c:
				case <-c:
				}
			}()
		}
		time.Sleep(1 * time.Millisecond)
		release()

		close(c) // let select put its sudog's into the cache
		time.Sleep(1 * time.Millisecond)

		// pick up top sudog
		var cond1 sync.Cond
		var mu1 sync.Mutex
		cond1.L = &mu1
		go func() {
			mu1.Lock()
			cond1.Wait()
			mu1.Unlock()
		}()
		time.Sleep(1 * time.Millisecond)

		// pick up next sudog
		var cond2 sync.Cond
		var mu2 sync.Mutex
		cond2.L = &mu2
		go func() {
			mu2.Lock()
			cond2.Wait()
			mu2.Unlock()
		}()
		time.Sleep(1 * time.Millisecond)

		// put top sudog back
		cond1.Broadcast()
		time.Sleep(1 * time.Millisecond)

		// drop cache on floor
		runtime.GC()

		// release cond2 after select has gotten to run
		release = func() {
			cond2.Broadcast()
			time.Sleep(1 * time.Millisecond)
		}
	}

	runtime.GC()

	runtime.ReadMemStats(&stats2)

	if int(stats2.HeapObjects)-int(stats1.HeapObjects) > 20 { // normally at most 1 or 2; was 300 with leak
		print("BUG: object leak: ", stats.HeapObjects, " -> ", stats1.HeapObjects, " -> ", stats2.HeapObjects, "\n")
	}
}
示例#15
0
func cleanHistory(channelID string, api *slack.Client, msgQueue *utility.Queue, histSize int, histCond *sync.Cond, threadWait *sync.WaitGroup) {
	defer threadWait.Done()
	defer histCond.Signal()

	histCond.L.Lock()
	defer histCond.L.Unlock()

	logging.Log.Infof("(%s) Starting cleanHistory", channelID)
	histParams := slack.NewHistoryParameters()
	histParams.Inclusive = true

	histCountMax := 1000

	// build history with histSize messages
	logging.Log.Infof("(%s) Building history with %v messages", channelID, histSize)
	var history *slack.History
	var histErr error
	nRemaining := histSize
	for nRemaining > 0 {
		if nRemaining > histCountMax {
			histParams.Count = histCountMax
		} else {
			histParams.Count = nRemaining
		}

		history, histErr = api.GetChannelHistory(channelID, histParams)
		if histErr != nil {
			logging.Log.Errorf("(%s) Unable to get the channel history: %v", channelID, histErr)
			return
		}

		iLastMsg := len(history.Messages) - 1
		//logging.Log.Debug("0: %v, %v: %v", history.Messages[0].Timestamp, iLastMsg, history.Messages[iLastMsg].Timestamp)

		logging.Log.Debugf("(%s) In skip loop; obtained history with %v messages", channelID, len(history.Messages))

		for iMsg := iLastMsg; iMsg >= 0; iMsg-- {
			msgQueue.Push(history.Messages[iMsg].Timestamp)
			//logging.Log.Debugf("(%s) Pushing to queue: %s", channelID, history.Messages[iMsg].Timestamp)
		}

		if !history.HasMore {
			return
		}
		histParams.Latest = history.Messages[iLastMsg].Timestamp
		histParams.Inclusive = false
		nRemaining -= histCountMax
	}

	histParams.Count = histCountMax
	nDeleted := 0
	for history.HasMore == true {
		history, histErr = api.GetChannelHistory(channelID, histParams)
		if histErr != nil {
			logging.Log.Errorf("(%s) Unable to get the channel history: %v", channelID, histErr)
			return
		}

		logging.Log.Debugf("(%s) Deleting %v items (latest: %v)", channelID, len(history.Messages), history.Latest)

		for _ /*iMsg*/, message := range history.Messages {
			//logging.Log.Debugf("(%s) Deleting: %s", channelID, message.Timestamp)
			_, _ /*respChan, respTS,*/, respErr := api.DeleteMessage(channelID, message.Timestamp)
			if respErr != nil {
				logging.Log.Warningf("(%s) Unable to delete message: %v", channelID, respErr)
			}
			//logging.Log.Debugf("(%s) Deletion response: %s, %s, %v", channelID, respChan, respTS, respErr)
			nDeleted++
		}
		histParams.Latest = history.Messages[len(history.Messages)-1].Timestamp
	}
	logging.Log.Noticef("(%s) Deleted %v messages", channelID, nDeleted)

	return
}
示例#16
0
文件: timeline.go 项目: pgm/gospoke
func (t RealTimer) Sleep(cond *sync.Cond) {
	cond.Wait()
}
示例#17
0
// heartbeat runs infinite process of sending test messages to the connected
// node. All heartbeats to all nodes are connected to each other, so if one
// heartbeat routine exits, all heartbeat routines will exit, because in that
// case orgalorg can't guarantee global lock.
func heartbeat(
	period time.Duration,
	node *distributedLockNode,
	canceler *sync.Cond,
) {
	abort := make(chan struct{}, 0)

	// Internal go-routine for listening abort broadcast and finishing current
	// heartbeat process.
	go func() {
		canceler.L.Lock()
		canceler.Wait()
		canceler.L.Unlock()

		abort <- struct{}{}
	}()

	// Finish finishes current go-routine and send abort broadcast to all
	// connected go-routines.
	finish := func(code int) {
		canceler.L.Lock()
		canceler.Broadcast()
		canceler.L.Unlock()

		<-abort

		if remote, ok := node.runner.(*runcmd.Remote); ok {
			tracef("%s closing connection", node.String())
			err := remote.CloseConnection()
			if err != nil {
				warningf(
					"%s",
					hierr.Errorf(
						err,
						"%s error while closing connection",
						node.String(),
					),
				)
			}
		}

		exit(code)
	}

	ticker := time.Tick(period)

	// Infinite loop of heartbeating. It will send heartbeat message, wait
	// fraction of send timeout time and try to receive heartbeat response.
	// If no response received, heartbeat process aborts.
	for {
		_, err := io.WriteString(node.connection.stdin, heartbeatPing+"\n")
		if err != nil {
			errorf(
				"%s",
				hierr.Errorf(
					err,
					`%s can't send heartbeat`,
					node.String(),
				),
			)

			finish(2)
		}

		select {
		case <-abort:
			return

		case <-ticker:
			// pass
		}

		ping, err := bufio.NewReader(node.connection.stdout).ReadString('\n')
		if err != nil {
			errorf(
				"%s",
				hierr.Errorf(
					err,
					`%s can't receive heartbeat`,
					node.String(),
				),
			)

			finish(2)
		}

		if strings.TrimSpace(ping) != heartbeatPing {
			errorf(
				`%s received unexpected heartbeat ping: '%s'`,
				node.String(),
				ping,
			)

			finish(2)
		}

		tracef(`%s heartbeat`, node.String())
	}
}
func Test_Journal_Concurrency(t *testing.T) {
	logging.InitForTesting(logging.NOTICE)
	logger := logging.MustGetLogger("journal")
	tempDir, err := ioutil.TempDir("", "journal")
	if err != nil {
		t.FailNow()
	}
	defer os.RemoveAll(tempDir)
	factory := NewFileJournalGroupFactory(
		logger,
		rand.NewSource(0),
		func() time.Time { return time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC) },
		".log",
		os.FileMode(0644),
		16,
	)
	dummyWorker := &DummyWorker{}
	tempFile := filepath.Join(tempDir, "test")
	t.Log(tempFile)
	journalGroup, err := factory.GetJournalGroup(tempFile, dummyWorker)
	if err != nil {
		t.FailNow()
	}
	journal := journalGroup.GetFileJournal("key")
	defer journal.Dispose()
	cond := sync.Cond{L: &sync.Mutex{}}
	count := int64(0)
	outerWg := sync.WaitGroup{}
	doFlush := func() {
		journal.Flush(func(chunk JournalChunk) interface{} {
			defer chunk.Dispose()
			reader, err := chunk.Reader()
			if err != nil {
				t.Log(err.Error())
				t.FailNow()
			}
			defer reader.Close()
			c, err := countLines(reader)
			if err != nil {
				t.Log(err.Error())
				t.FailNow()
			}
			atomic.AddInt64(&count, int64(c))
			return nil
		})
	}
	for j := 0; j < 10; j += 1 {
		outerWg.Add(1)
		go func(j int) {
			defer outerWg.Done()
			wg := sync.WaitGroup{}
			starting := sync.WaitGroup{}
			for i := 0; i < 10; i += 1 {
				wg.Add(1)
				starting.Add(1)
				go func(i int) {
					defer wg.Done()
					starting.Done()
					cond.L.Lock()
					cond.Wait()
					cond.L.Unlock()
					for k := 0; k < 3; k += 1 {
						data := fmt.Sprintf("test%d\n", i)
						err = journal.Write([]byte(data))
						if err != nil {
							t.Log(err.Error())
							t.FailNow()
						}
					}
				}(i)
			}
			starting.Wait()
			cond.Broadcast()
			runtime.Gosched()
			doFlush()
			wg.Wait()
		}(j)
	}
	outerWg.Wait()
	doFlush()
	if count != 300 {
		t.Logf("%d", count)
		t.Fail()
	}
}