func TestNoRaceCond(t *testing.T) { // tsan's test02 ch := make(chan bool, 1) var x int = 0 var mu sync.Mutex var cond *sync.Cond = sync.NewCond(&mu) var condition int = 0 var waker func() waker = func() { x = 1 mu.Lock() condition = 1 cond.Signal() mu.Unlock() } var waiter func() waiter = func() { go waker() cond.L.Lock() for condition != 1 { cond.Wait() } cond.L.Unlock() x = 2 ch <- true } go waiter() <-ch }
func main() { var mtx sync.Mutex var cnd *sync.Cond var cnds [N]*sync.Cond var mtxs [N]sync.Mutex cnd = sync.NewCond(&mtx) for i := 0; i < N; i++ { cnds[i] = sync.NewCond(&mtxs[i]) } for i := 0; i < N; i++ { go func(me int, m *sync.Mutex, c1 *sync.Cond, c2 *sync.Cond) { fmt.Printf("Hello, world. %d\n", me) if me == 0 { cnd.Signal() } for j := 0; j < 10000000; j++ { m.Lock() c1.Wait() m.Unlock() c2.Signal() } if me == N-1 { cnd.Signal() } }(i, &mtxs[i], cnds[i], cnds[(i+1)%N]) } mtx.Lock() cnd.Wait() mtx.Unlock() cnds[0].Signal() mtx.Lock() cnd.Wait() mtx.Unlock() }
func (tasks Tasks) RunTasksWithTimeout(stopTime time.Time) { finishedTasks := 0 waitCond := new(sync.Cond) waitCond.L = new(sync.Mutex) done := make(chan bool, 1) current_running := 0 for _, task := range tasks { waitCond.L.Lock() for current_running >= numWorkers { waitCond.Wait() } current_running++ waitCond.L.Unlock() go func(task *Task) { todo := len(tasks) - finishedTasks - (numWorkers - 1) if todo < 1 { todo = 1 } duration := stopTime.Sub(time.Now()) / time.Duration(todo) dprint(duration) task.stopTime = time.Now().Add(duration) go task.Run() <-task.ch if task.timed_out { dprint("Timeout occured.") } else { dprint("Finished normally.") } waitCond.L.Lock() current_running-- finishedTasks++ if finishedTasks == len(tasks) { done <- true } waitCond.Signal() waitCond.L.Unlock() }(task) } <-done }
func monitorChannel(channelID string, api *slack.Client, messageChan <-chan *slack.MessageEvent, msgQueue *utility.Queue, monitorSize, doLogging bool, histCond *sync.Cond, threadWait *sync.WaitGroup) { defer threadWait.Done() defer logging.Log.Noticef("(%s) Finished monitoring channel", channelID) logging.Log.Infof("(%s) Waiting for history", channelID) histCond.L.Lock() histCond.Wait() histCond.L.Unlock() logging.Log.Debugf("(%s) Message queue has %v items", channelID, msgQueue.Len()) logging.Log.Infof("(%s) Monitor size: %v", channelID, monitorSize) logging.Log.Infof("(%s) Do logging: %v", channelID, doLogging) logging.Log.Infof("(%s) Waiting for events", channelID) monitorLoop: for { select { case message, chanOpen := <-messageChan: if !chanOpen { logging.Log.Errorf("(%s) Incoming message channel is closed", channelID) break monitorLoop } /* logging.Log.Debugf("(%s) Received message", channelID) logging.Log.Debugf("\tUser: %s", message.User) logging.Log.Debugf("\tChannel: %s", message.Channel) logging.Log.Debugf("\tTimestamp: %s", message.Timestamp) logging.Log.Debugf("\tText: %s", message.Text) logging.Log.Debugf("\tSubtype: %s", message.SubType) */ msgQueue.Push(message.Timestamp) toDelete := msgQueue.Poll().(string) //logging.Log.Debugf("(%s) Adding to queue: %s; Removing from queue: %s", channelID, message.Timestamp, toDelete) api.DeleteMessage(channelID, toDelete) } } return }
//TODO return after sending request immediately func post(url string, payload io.Reader, cond *sync.Cond, ratio int, ready chan bool) { cond.L.Lock() ready <- true cond.Wait() cond.L.Unlock() log.Printf("++post: %s", url) u := url if !strings.HasPrefix(u, "http://") { u = "http://" + u } if !strings.HasSuffix(u, "/") { u = u + "/" } u = u + "download" resp, err := http.Post(u, "application/json", payload) if err != nil { log.Printf("post to %s error: %s", url, err.Error()) return } if resp.StatusCode != 200 { log.Printf("post to %s error, status code %d", url, resp.StatusCode) return } log.Printf("--post: %s", url) for i := 0; i < ratio; i++ { cond.L.Lock() cond.Signal() cond.L.Unlock() } }
func sendRequest(numReq int64, st *stats, ch chan bool, cd *sync.Cond) { cd.L.Lock() for status == false { cd.Wait() } resTime := []int64{} totReq := numReq for numReq > 0 { start := time.Now() resp, err := http.Get(queryuri) end := int64(time.Since(start)) resTime = append(resTime, end) if err != nil { fmt.Println("Error is err", err, "response is ", resp, "restime is", resTime) st.failures++ } ioutil.ReadAll(resp.Body) resp.Body.Close() numReq-- } //time.Sleep(1*time.Second) var tot, max, min int64 for _, val := range resTime { tot += val if val > max { max = val } if val < min { min = val } } st.totalTime = tot st.avgTime = tot / totReq st.maxTime = max st.minTime = min cd.L.Unlock() ch <- true }
func TestRaceCond(t *testing.T) { // tsan's test50 ch := make(chan bool, 2) var x int = 0 var mu sync.Mutex var condition int = 0 var cond *sync.Cond = sync.NewCond(&mu) var waker func() = func() { <-time.After(1e5) x = 1 mu.Lock() condition = 1 cond.Signal() mu.Unlock() <-time.After(1e5) mu.Lock() x = 3 mu.Unlock() ch <- true } var waiter func() = func() { mu.Lock() for condition != 1 { cond.Wait() } mu.Unlock() x = 2 ch <- true } x = 0 go waker() go waiter() <-ch <-ch }
func main() { runtime.GOMAXPROCS(1) debug.SetGCPercent(1000000) // only GC when we ask for GC var stats, stats1, stats2 runtime.MemStats release := func() {} for i := 0; i < 20; i++ { if i == 10 { // Should be warmed up by now. runtime.ReadMemStats(&stats1) } c := make(chan int) for i := 0; i < 10; i++ { go func() { select { case <-c: case <-c: case <-c: } }() } time.Sleep(1 * time.Millisecond) release() close(c) // let select put its sudog's into the cache time.Sleep(1 * time.Millisecond) // pick up top sudog var cond1 sync.Cond var mu1 sync.Mutex cond1.L = &mu1 go func() { mu1.Lock() cond1.Wait() mu1.Unlock() }() time.Sleep(1 * time.Millisecond) // pick up next sudog var cond2 sync.Cond var mu2 sync.Mutex cond2.L = &mu2 go func() { mu2.Lock() cond2.Wait() mu2.Unlock() }() time.Sleep(1 * time.Millisecond) // put top sudog back cond1.Broadcast() time.Sleep(1 * time.Millisecond) // drop cache on floor runtime.GC() // release cond2 after select has gotten to run release = func() { cond2.Broadcast() time.Sleep(1 * time.Millisecond) } } runtime.GC() runtime.ReadMemStats(&stats2) if int(stats2.HeapObjects)-int(stats1.HeapObjects) > 20 { // normally at most 1 or 2; was 300 with leak print("BUG: object leak: ", stats.HeapObjects, " -> ", stats1.HeapObjects, " -> ", stats2.HeapObjects, "\n") } }
// heartbeat runs infinite process of sending test messages to the connected // node. All heartbeats to all nodes are connected to each other, so if one // heartbeat routine exits, all heartbeat routines will exit, because in that // case orgalorg can't guarantee global lock. func heartbeat( period time.Duration, node *distributedLockNode, canceler *sync.Cond, ) { abort := make(chan struct{}, 0) // Internal go-routine for listening abort broadcast and finishing current // heartbeat process. go func() { canceler.L.Lock() canceler.Wait() canceler.L.Unlock() abort <- struct{}{} }() // Finish finishes current go-routine and send abort broadcast to all // connected go-routines. finish := func(code int) { canceler.L.Lock() canceler.Broadcast() canceler.L.Unlock() <-abort if remote, ok := node.runner.(*runcmd.Remote); ok { tracef("%s closing connection", node.String()) err := remote.CloseConnection() if err != nil { warningf( "%s", hierr.Errorf( err, "%s error while closing connection", node.String(), ), ) } } exit(code) } ticker := time.Tick(period) // Infinite loop of heartbeating. It will send heartbeat message, wait // fraction of send timeout time and try to receive heartbeat response. // If no response received, heartbeat process aborts. for { _, err := io.WriteString(node.connection.stdin, heartbeatPing+"\n") if err != nil { errorf( "%s", hierr.Errorf( err, `%s can't send heartbeat`, node.String(), ), ) finish(2) } select { case <-abort: return case <-ticker: // pass } ping, err := bufio.NewReader(node.connection.stdout).ReadString('\n') if err != nil { errorf( "%s", hierr.Errorf( err, `%s can't receive heartbeat`, node.String(), ), ) finish(2) } if strings.TrimSpace(ping) != heartbeatPing { errorf( `%s received unexpected heartbeat ping: '%s'`, node.String(), ping, ) finish(2) } tracef(`%s heartbeat`, node.String()) } }
func Test_Journal_Concurrency(t *testing.T) { logging.InitForTesting(logging.NOTICE) logger := logging.MustGetLogger("journal") tempDir, err := ioutil.TempDir("", "journal") if err != nil { t.FailNow() } defer os.RemoveAll(tempDir) factory := NewFileJournalGroupFactory( logger, rand.NewSource(0), func() time.Time { return time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC) }, ".log", os.FileMode(0644), 16, ) dummyWorker := &DummyWorker{} tempFile := filepath.Join(tempDir, "test") t.Log(tempFile) journalGroup, err := factory.GetJournalGroup(tempFile, dummyWorker) if err != nil { t.FailNow() } journal := journalGroup.GetFileJournal("key") defer journal.Dispose() cond := sync.Cond{L: &sync.Mutex{}} count := int64(0) outerWg := sync.WaitGroup{} doFlush := func() { journal.Flush(func(chunk JournalChunk) interface{} { defer chunk.Dispose() reader, err := chunk.Reader() if err != nil { t.Log(err.Error()) t.FailNow() } defer reader.Close() c, err := countLines(reader) if err != nil { t.Log(err.Error()) t.FailNow() } atomic.AddInt64(&count, int64(c)) return nil }) } for j := 0; j < 10; j += 1 { outerWg.Add(1) go func(j int) { defer outerWg.Done() wg := sync.WaitGroup{} starting := sync.WaitGroup{} for i := 0; i < 10; i += 1 { wg.Add(1) starting.Add(1) go func(i int) { defer wg.Done() starting.Done() cond.L.Lock() cond.Wait() cond.L.Unlock() for k := 0; k < 3; k += 1 { data := fmt.Sprintf("test%d\n", i) err = journal.Write([]byte(data)) if err != nil { t.Log(err.Error()) t.FailNow() } } }(i) } starting.Wait() cond.Broadcast() runtime.Gosched() doFlush() wg.Wait() }(j) } outerWg.Wait() doFlush() if count != 300 { t.Logf("%d", count) t.Fail() } }
func (t RealTimer) Sleep(cond *sync.Cond) { cond.Wait() }