Example #1
0
// Returns a function that will return any commands, or
// nil when no other commands are defined. It will also signal the passed
// Condition when if are no commands left and the Condition is not nil.
func GetCommandFunc(c *types.Connection, startWait *sync.Cond, moreCommands CommandFunc) CommandFunc {
	commands := make(chan string, 5) // command1-5
	//if c.Options.PostCommands { // official PCM ignores this flag... yay.
	for _, v := range []string{
		c.Commands.Command1, c.Commands.Command2, c.Commands.Command3,
		c.Commands.Command4, c.Commands.Command5} {
		if strings.TrimSpace(v) != "" {
			commands <- v
		}
	}
	//}
	return func() *string {
		select {
		case v := <-commands:
			return &v
		default:
			if c := moreCommands(); c != nil {
				return c
			} else if startWait != nil {
				startWait.Broadcast()
			}
			return nil
		}
	}
}
Example #2
0
func printLoop(ch <-chan []grid2d.Update, g grid2d.Grid, cns *census.DirCensus, cond *sync.Cond, numUpdates *int64, clearScreen bool) {
	// Try to keep rendering smooth.
	runtime.LockOSThread()

	for _ = range ch {
		if clearScreen {
			// TODO(dnesting): use termcap/terminfo to do this more portably.
			fmt.Print("")
		}
		term.PrintWorld(os.Stdout, g)
		fmt.Println()
		if clearScreen {
			fmt.Print("")
		}

		// Write some summary stats after the rendering.
		fmt.Printf("%d updates\n", atomic.LoadInt64(numUpdates))
		fmt.Printf("%d/%d orgs (%d/%d species, %d recorded)\n", cns.Count(), cns.CountAllTime(), cns.Distinct(), cns.DistinctAllTime(), cns.NumRecorded())
		if loc := g.Get(0, 0); loc != nil {
			fmt.Printf("random: %v\n", loc.Value())
		}

		// If we're running with --sync, signal to any goroutines waiting on a rendering that
		// it's OK for them to continue again.
		if cond != nil {
			cond.Broadcast()
		}
	}
}
Example #3
0
func (t *RealTimer) SleepUntil(cond *sync.Cond, timestamp time.Time) {
	now := t.Now()
	delay := timestamp.Sub(now)
	if delay > 0 {
		tt := time.AfterFunc(time.Duration(delay), func() { cond.Broadcast() })
		t.Sleep(cond)
		tt.Stop()
	}
}
Example #4
0
func newCondDeadline(cond *sync.Cond) (ret *condDeadline) {
	ret = &condDeadline{
		timer: time.AfterFunc(math.MaxInt64, func() {
			mu.Lock()
			ret._exceeded = true
			mu.Unlock()
			cond.Broadcast()
		}),
	}
	ret.setDeadline(time.Time{})
	return
}
Example #5
0
// Schedule configures the timer to call c.Broadcast() when the timer expires.
// It returns true if the caller should defer a call to t.Stop().
func (t *timer) Schedule(c *sync.Cond) bool {
	if t.now != 0 {
		if t.Timer == nil {
			t.Timer = time.AfterFunc(time.Duration(t.rem), func() {
				// c.L must be held to guarantee that the caller is waiting
				c.L.Lock()
				defer c.L.Unlock()
				c.Broadcast()
			})
			return true
		}
		t.Reset(time.Duration(t.rem))
	}
	return false
}
Example #6
0
func finishWaiting(cond *sync.Cond, waitFinished <-chan struct{}) {
	runtime.Gosched()
	select {
	// avoid creating a timer if we can help it...
	case <-waitFinished:
		return
	default:
		const spinTimeout = 100 * time.Millisecond
		t := time.NewTimer(spinTimeout)
		defer t.Stop()
		for {
			runtime.Gosched()
			cond.Broadcast()
			select {
			case <-waitFinished:
				return
			case <-t.C:
				t.Reset(spinTimeout)
			}
		}
	}
}
Example #7
0
func main() {
	runtime.GOMAXPROCS(1)
	debug.SetGCPercent(1000000) // only GC when we ask for GC

	var stats, stats1, stats2 runtime.MemStats

	release := func() {}
	for i := 0; i < 20; i++ {
		if i == 10 {
			// Should be warmed up by now.
			runtime.ReadMemStats(&stats1)
		}

		c := make(chan int)
		for i := 0; i < 10; i++ {
			go func() {
				select {
				case <-c:
				case <-c:
				case <-c:
				}
			}()
		}
		time.Sleep(1 * time.Millisecond)
		release()

		close(c) // let select put its sudog's into the cache
		time.Sleep(1 * time.Millisecond)

		// pick up top sudog
		var cond1 sync.Cond
		var mu1 sync.Mutex
		cond1.L = &mu1
		go func() {
			mu1.Lock()
			cond1.Wait()
			mu1.Unlock()
		}()
		time.Sleep(1 * time.Millisecond)

		// pick up next sudog
		var cond2 sync.Cond
		var mu2 sync.Mutex
		cond2.L = &mu2
		go func() {
			mu2.Lock()
			cond2.Wait()
			mu2.Unlock()
		}()
		time.Sleep(1 * time.Millisecond)

		// put top sudog back
		cond1.Broadcast()
		time.Sleep(1 * time.Millisecond)

		// drop cache on floor
		runtime.GC()

		// release cond2 after select has gotten to run
		release = func() {
			cond2.Broadcast()
			time.Sleep(1 * time.Millisecond)
		}
	}

	runtime.GC()

	runtime.ReadMemStats(&stats2)

	if int(stats2.HeapObjects)-int(stats1.HeapObjects) > 20 { // normally at most 1 or 2; was 300 with leak
		print("BUG: object leak: ", stats.HeapObjects, " -> ", stats1.HeapObjects, " -> ", stats2.HeapObjects, "\n")
	}
}
Example #8
0
// heartbeat runs infinite process of sending test messages to the connected
// node. All heartbeats to all nodes are connected to each other, so if one
// heartbeat routine exits, all heartbeat routines will exit, because in that
// case orgalorg can't guarantee global lock.
func heartbeat(
	period time.Duration,
	node *distributedLockNode,
	canceler *sync.Cond,
) {
	abort := make(chan struct{}, 0)

	// Internal go-routine for listening abort broadcast and finishing current
	// heartbeat process.
	go func() {
		canceler.L.Lock()
		canceler.Wait()
		canceler.L.Unlock()

		abort <- struct{}{}
	}()

	// Finish finishes current go-routine and send abort broadcast to all
	// connected go-routines.
	finish := func(code int) {
		canceler.L.Lock()
		canceler.Broadcast()
		canceler.L.Unlock()

		<-abort

		if remote, ok := node.runner.(*runcmd.Remote); ok {
			tracef("%s closing connection", node.String())
			err := remote.CloseConnection()
			if err != nil {
				warningf(
					"%s",
					hierr.Errorf(
						err,
						"%s error while closing connection",
						node.String(),
					),
				)
			}
		}

		exit(code)
	}

	ticker := time.Tick(period)

	// Infinite loop of heartbeating. It will send heartbeat message, wait
	// fraction of send timeout time and try to receive heartbeat response.
	// If no response received, heartbeat process aborts.
	for {
		_, err := io.WriteString(node.connection.stdin, heartbeatPing+"\n")
		if err != nil {
			errorf(
				"%s",
				hierr.Errorf(
					err,
					`%s can't send heartbeat`,
					node.String(),
				),
			)

			finish(2)
		}

		select {
		case <-abort:
			return

		case <-ticker:
			// pass
		}

		ping, err := bufio.NewReader(node.connection.stdout).ReadString('\n')
		if err != nil {
			errorf(
				"%s",
				hierr.Errorf(
					err,
					`%s can't receive heartbeat`,
					node.String(),
				),
			)

			finish(2)
		}

		if strings.TrimSpace(ping) != heartbeatPing {
			errorf(
				`%s received unexpected heartbeat ping: '%s'`,
				node.String(),
				ping,
			)

			finish(2)
		}

		tracef(`%s heartbeat`, node.String())
	}
}
func Test_Journal_Concurrency(t *testing.T) {
	logging.InitForTesting(logging.NOTICE)
	logger := logging.MustGetLogger("journal")
	tempDir, err := ioutil.TempDir("", "journal")
	if err != nil {
		t.FailNow()
	}
	defer os.RemoveAll(tempDir)
	factory := NewFileJournalGroupFactory(
		logger,
		rand.NewSource(0),
		func() time.Time { return time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC) },
		".log",
		os.FileMode(0644),
		16,
	)
	dummyWorker := &DummyWorker{}
	tempFile := filepath.Join(tempDir, "test")
	t.Log(tempFile)
	journalGroup, err := factory.GetJournalGroup(tempFile, dummyWorker)
	if err != nil {
		t.FailNow()
	}
	journal := journalGroup.GetFileJournal("key")
	defer journal.Dispose()
	cond := sync.Cond{L: &sync.Mutex{}}
	count := int64(0)
	outerWg := sync.WaitGroup{}
	doFlush := func() {
		journal.Flush(func(chunk JournalChunk) interface{} {
			defer chunk.Dispose()
			reader, err := chunk.Reader()
			if err != nil {
				t.Log(err.Error())
				t.FailNow()
			}
			defer reader.Close()
			c, err := countLines(reader)
			if err != nil {
				t.Log(err.Error())
				t.FailNow()
			}
			atomic.AddInt64(&count, int64(c))
			return nil
		})
	}
	for j := 0; j < 10; j += 1 {
		outerWg.Add(1)
		go func(j int) {
			defer outerWg.Done()
			wg := sync.WaitGroup{}
			starting := sync.WaitGroup{}
			for i := 0; i < 10; i += 1 {
				wg.Add(1)
				starting.Add(1)
				go func(i int) {
					defer wg.Done()
					starting.Done()
					cond.L.Lock()
					cond.Wait()
					cond.L.Unlock()
					for k := 0; k < 3; k += 1 {
						data := fmt.Sprintf("test%d\n", i)
						err = journal.Write([]byte(data))
						if err != nil {
							t.Log(err.Error())
							t.FailNow()
						}
					}
				}(i)
			}
			starting.Wait()
			cond.Broadcast()
			runtime.Gosched()
			doFlush()
			wg.Wait()
		}(j)
	}
	outerWg.Wait()
	doFlush()
	if count != 300 {
		t.Logf("%d", count)
		t.Fail()
	}
}