コード例 #1
1
ファイル: service.go プロジェクト: carriercomm/examples-1
func (r *LedReporter) watch() {
	var t *time.Ticker = time.NewTicker(1 * time.Minute)

	for {
		select {
		case color := <-r.blinkChan:
			if t != nil {
				t.Stop()
			}

			led.Off()

			switch color {
			case RED:
				led.Red(true)
			case BLUE:
				led.Blue(true)
			case GREEN:
				led.Green(true)
			}

			t = time.NewTicker(100 * time.Millisecond)
		case <-t.C:
			led.Off()

			if registered {
				led.Blue(true)
			} else {
				led.Off()
			}
		}
	}
}
コード例 #2
0
// Ticker Function
func (c *cclient) tickHandler(ticker *time.Ticker) {
	defer LOGV.Println("client has stopped ticking.")
	for {
		select {
		case <-ticker.C:
			length := len(c.movelist)
			if length > 0 {
				LOGV.Println("movelist is length " + strconv.Itoa(length))
				var translatedMove int
				switch c.movelist[length-1] {
				case lib2048.Up:
					translatedMove = 0
				case lib2048.Right:
					translatedMove = 1
				case lib2048.Down:
					translatedMove = 2
				case lib2048.Left:
					translatedMove = 3
				}
				move := util.ClientMove{translatedMove}
				c.moveQueue <- move
				c.movelist = c.movelist[0:0]
				//				websocket.JSON.Send(c.conn, move)
			}
		case <-c.quitchan:
			ticker.Stop()
			close(c.quitchan)
			return
		}
	}
}
コード例 #3
0
ファイル: pronom.go プロジェクト: richardlehane/siegfried
func applyAll(max int, reps []string, apply func(puid string) error) []error {
	ch := make(chan error, len(reps))
	wg := sync.WaitGroup{}
	queue := make(chan struct{}, max) // to avoid hammering TNA
	_, _, tf, _ := config.HarvestOptions()
	var throttle *time.Ticker
	if tf > 0 {
		throttle = time.NewTicker(tf)
		defer throttle.Stop()
	}
	for _, puid := range reps {
		if tf > 0 {
			<-throttle.C
		}
		wg.Add(1)
		go func(puid string) {
			queue <- struct{}{}
			defer wg.Done()
			if err := apply(puid); err != nil {
				ch <- err
			}
			<-queue
		}(puid)
	}
	wg.Wait()
	close(ch)
	var errors []error
	for err := range ch {
		errors = append(errors, err)
	}
	return errors
}
コード例 #4
0
ファイル: scheme.go プロジェクト: myshkin5/netspel
func (s *Scheme) RunReader(reader factory.Reader) {
	s.closer = reader
	defer s.done.Done()
	s.startReporter()

	buffer := make([]byte, s.bytesPerMessage*2)

	var ticker *time.Ticker
	if s.tickerTime > 0 {
		ticker = time.NewTicker(s.tickerTime)
		defer ticker.Stop()
	}

	for {
		if ticker != nil {
			<-ticker.C
		}

		if s.isClosed() {
			break
		}

		count, err := reader.Read(buffer)
		s.countMessage(count, err)
	}
}
コード例 #5
0
ファイル: gpio.go プロジェクト: paulbellamy/gpio
func (p *pin) startPwmLoop(initialValue int) {
	if p.pwmLoop != nil {
		p.pwmLoop <- initialValue
		return
	}
	p.pwmLoop = make(chan int)
	p.quitPwmLoop = make(chan chan error)

	go func() {
		var err error
		var period time.Duration = 20 * time.Millisecond
		var highDuration time.Duration
		var ticker *time.Ticker = time.NewTicker(period)
		defer func() {
			// cleanup
			close(p.pwmLoop)
			close(p.quitPwmLoop)

			p.pwmLoop = nil
			p.quitPwmLoop = nil

			ticker.Stop()
		}()

		for {
			select {
			case v := <-p.pwmLoop:
				switch {
				case v == 0:
					p.SetLow()
					return
				case v == 100:
					p.SetHigh()
					return
				default:
					highDuration = valueToDuration(v, period)
				}
			case reply := <-p.quitPwmLoop:
				reply <- nil
				return
			case <-ticker.C:
				err = p.SetHigh()
				if err != nil {
					reply := <-p.quitPwmLoop
					reply <- err
					return
				}

				time.Sleep(highDuration)

				err = p.SetLow()
				if err != nil {
					reply := <-p.quitPwmLoop
					reply <- err
					return
				}
			}
		}
	}()
}
コード例 #6
0
func workerProviderRun(w *worker.Worker, args ...interface{}) {
	var (
		prov       = w.Props[0].(*provider.Provider)
		timeTicker *time.Ticker
		timeChan   <-chan time.Time
	)

	defer func() { w.State = worker.JobStopped }()
	defer w.Shutdown()

	logger.Log(logger.LevelDebug, "provider", "%s: starting", prov.Name)

	// If provider `refresh_interval` has been configured, set up a time ticker
	if prov.Config.RefreshInterval > 0 {
		timeTicker = time.NewTicker(time.Duration(prov.Config.RefreshInterval) * time.Second)
		timeChan = timeTicker.C
	}

	for {
		select {
		case _ = <-timeChan:
			logger.Log(logger.LevelDebug, "provider", "%s: performing refresh from connector", prov.Name)

			if err := prov.Connector.Refresh(prov.Name, prov.Filters.Input); err != nil {
				logger.Log(logger.LevelError, "provider", "%s: unable to refresh: %s", prov.Name, err)
				continue
			}

			prov.LastRefresh = time.Now()

		case cmd := <-w.ReceiveJobSignals():
			switch cmd {
			case jobSignalRefresh:
				logger.Log(logger.LevelInfo, "provider", "%s: received refresh command", prov.Name)

				if err := prov.Connector.Refresh(prov.Name, prov.Filters.Input); err != nil {
					logger.Log(logger.LevelError, "provider", "%s: unable to refresh: %s", prov.Name, err)
					continue
				}

				prov.LastRefresh = time.Now()

			case jobSignalShutdown:
				logger.Log(logger.LevelInfo, "provider", "%s: received shutdown command, stopping job", prov.Name)

				w.State = worker.JobStopped

				if timeTicker != nil {
					// Stop refresh time ticker
					timeTicker.Stop()
				}

				return

			default:
				logger.Log(logger.LevelNotice, "provider", "%s: received unknown command, ignoring", prov.Name)
			}
		}
	}
}
コード例 #7
0
ファイル: client_token.go プロジェクト: neatstudio/wechat
// 负责定时更新本地缓存的 access token.
// 使用这种复杂的实现是减少 time.Now() 的调用, 不然每次都要比较 time.Now().
func (c *Client) tokenService() {
	const defaultTickDuration = time.Minute // 设置 44 秒以上就不会超过限制(2000次/日 的限制)

	// 当前定时器的时间间隔, 正常情况下等于当前的 access token 的过期时间减去 10 秒;
	// 异常情况下就要尝试不断的获取, 时间间隔就是 defaultTickDuration.
	currentTickDuration := defaultTickDuration
	var tk *time.Ticker

NewTickDuration:
	for {
		tk = time.NewTicker(currentTickDuration)
		for {
			select {
			case currentTickDuration = <-c.resetRefreshTickChan: // 在别的地方成功获取了 access token, 重置定时器.
				tk.Stop()
				break NewTickDuration

			case <-tk.C:
				resp, err := c.getNewToken()
				switch {
				case err != nil:
					c.update("", err)
					// 出错则重置到 defaultTickDuration
					if currentTickDuration != defaultTickDuration { // 这个判断的目的是避免重置定时器开销
						tk.Stop()
						currentTickDuration = defaultTickDuration
						break NewTickDuration
					}
				case resp.ExpiresIn > 10: // 正常情况
					c.update(resp.Token, nil)
					// 根据返回的过期时间来重新设置定时器
					// 设置新的 currentTickDuration, 考虑到网络延时, 提前 10 秒过期
					nextTickDuration := time.Duration(resp.ExpiresIn-10) * time.Second
					if currentTickDuration != nextTickDuration { // 这个判断的目的是避免重置定时器开销
						tk.Stop()
						currentTickDuration = nextTickDuration
						break NewTickDuration
					}
				case resp.ExpiresIn > 0: // 正常情况下不会出现
					c.update(resp.Token, nil)
					// 根据返回的过期时间来重新设置定时器
					nextTickDuration := time.Duration(resp.ExpiresIn) * time.Second
					if currentTickDuration != nextTickDuration { // 这个判断的目的是避免重置定时器开销
						tk.Stop()
						currentTickDuration = nextTickDuration
						break NewTickDuration
					}
				default: // resp.ExpiresIn <= 0, 正常情况下不会出现
					c.update("", fmt.Errorf("tokenService: access token 过期时间应该是正整数: %d", resp.ExpiresIn))
					// 出错则重置到 defaultTickDuration
					if currentTickDuration != defaultTickDuration { // 这个判断的目的是避免重置定时器开销
						tk.Stop()
						currentTickDuration = defaultTickDuration
						break NewTickDuration
					}
				}
			}
		}
	}
}
コード例 #8
0
ファイル: server.go プロジェクト: TF2Stadium/Pauling
func (s *Server) StartVerifier(ticker *time.Ticker) {
	var err error
	defer DeleteServer(s.LobbyId)

	_, err = s.rcon.Query("status")
	if err != nil {
		err = s.rcon.Reconnect(5 * time.Minute)

		if err != nil {
			publishEvent(Event{
				Name:    DisconnectedFromServer,
				LobbyID: s.LobbyId})
			return
		}
	}

	for {
		select {
		case <-ticker.C:
			if !s.Verify() {
				ticker.Stop()
				s.rcon.Close()
				return
			}
		case <-s.StopVerifier:
			helpers.Logger.Debugf("Stopping logger for lobby %d", s.LobbyId)
			s.rcon.Say("[tf2stadium.com] Lobby Ended.")
			s.rcon.RemoveTag("TF2Stadium")
			ticker.Stop()
			s.rcon.Close()
			return
		}
	}
}
コード例 #9
0
ファイル: consumer.go プロジェクト: pgpst/pgpst
// poll all known lookup servers every LookupdPollInterval
func (r *Consumer) lookupdLoop() {
	// add some jitter so that multiple consumers discovering the same topic,
	// when restarted at the same time, dont all connect at once.
	jitter := time.Duration(int64(r.rng.Float64() *
		r.config.LookupdPollJitter * float64(r.config.LookupdPollInterval)))
	var ticker *time.Ticker

	select {
	case <-time.After(jitter):
	case <-r.exitChan:
		goto exit
	}

	ticker = time.NewTicker(r.config.LookupdPollInterval)

	for {
		select {
		case <-ticker.C:
			r.queryLookupd()
		case <-r.lookupdRecheckChan:
			r.queryLookupd()
		case <-r.exitChan:
			goto exit
		}
	}

exit:
	if ticker != nil {
		ticker.Stop()
	}
	r.log(LogLevelInfo, "exiting lookupdLoop")
	r.wg.Done()
}
コード例 #10
0
ファイル: role_manager.go プロジェクト: docker/docker
// Run is roleManager's main loop.
func (rm *roleManager) Run() {
	defer close(rm.doneChan)

	var (
		nodes    []*api.Node
		ticker   *time.Ticker
		tickerCh <-chan time.Time
	)

	watcher, cancelWatch, err := store.ViewAndWatch(rm.store,
		func(readTx store.ReadTx) error {
			var err error
			nodes, err = store.FindNodes(readTx, store.All)
			return err
		},
		state.EventUpdateNode{})
	defer cancelWatch()

	if err != nil {
		log.L.WithError(err).Error("failed to check nodes for role changes")
	} else {
		for _, node := range nodes {
			rm.pending[node.ID] = node
			rm.reconcileRole(node)
		}
		if len(rm.pending) != 0 {
			ticker = time.NewTicker(roleReconcileInterval)
			tickerCh = ticker.C
		}
	}

	for {
		select {
		case event := <-watcher:
			node := event.(state.EventUpdateNode).Node
			rm.pending[node.ID] = node
			rm.reconcileRole(node)
			if len(rm.pending) != 0 && ticker == nil {
				ticker = time.NewTicker(roleReconcileInterval)
				tickerCh = ticker.C
			}
		case <-tickerCh:
			for _, node := range rm.pending {
				rm.reconcileRole(node)
			}
			if len(rm.pending) == 0 {
				ticker.Stop()
				ticker = nil
				tickerCh = nil
			}
		case <-rm.ctx.Done():
			if ticker != nil {
				ticker.Stop()
			}
			return
		}
	}
}
コード例 #11
0
ファイル: producer.go プロジェクト: pkoro/go-kafka
// one per broker
// groups messages together into appropriately-sized batches for sending to the broker
// based on https://godoc.org/github.com/eapache/channels#BatchingChannel
func (p *Producer) messageAggregator(broker *Broker, input chan *ProducerMessage) {
	var ticker *time.Ticker
	var timer <-chan time.Time
	if p.config.FlushFrequency > 0 {
		ticker = time.NewTicker(p.config.FlushFrequency)
		timer = ticker.C
	}

	var buffer []*ProducerMessage
	var doFlush chan []*ProducerMessage
	var bytesAccumulated int

	flusher := make(chan []*ProducerMessage)
	go withRecover(func() { p.flusher(broker, flusher) })

	for {
		select {
		case msg := <-input:
			if msg == nil {
				goto shutdown
			}

			if (bytesAccumulated+msg.byteSize() >= forceFlushThreshold()) ||
				(p.config.Compression != CompressionNone && bytesAccumulated+msg.byteSize() >= p.config.MaxMessageBytes) ||
				(p.config.MaxMessagesPerReq > 0 && len(buffer) >= p.config.MaxMessagesPerReq) {
				Logger.Println("producer/aggregator maximum request accumulated, forcing blocking flush")
				flusher <- buffer
				buffer = nil
				doFlush = nil
				bytesAccumulated = 0
			}

			buffer = append(buffer, msg)
			bytesAccumulated += msg.byteSize()

			if len(buffer) >= p.config.FlushMsgCount ||
				(p.config.FlushByteCount > 0 && bytesAccumulated >= p.config.FlushByteCount) {
				doFlush = flusher
			}
		case <-timer:
			doFlush = flusher
		case doFlush <- buffer:
			buffer = nil
			doFlush = nil
			bytesAccumulated = 0
		}
	}

shutdown:
	if ticker != nil {
		ticker.Stop()
	}
	if len(buffer) > 0 {
		flusher <- buffer
	}
	close(flusher)
}
コード例 #12
0
ファイル: middle.go プロジェクト: prodigeni/circuit
func (x *Sieve) loop() {
	// Close out connection
	defer func() {
		close(x.k)
		x.Mute()
		x.dual.Mute()
	}()
	// Timeout ticker
	var tkr *time.Ticker
	var expchan <-chan time.Time
	if x.exp > 0 {
		tkr = time.NewTicker(x.exp)
		expchan = tkr.C
	}
	defer func() {
		if tkr != nil {
			tkr.Stop()
		}
	}()
	// Main loop
	var nrecv, nticks int
	for {
		select {
		case <-x.abrt:
			x.Frame.Println("Aborting sieve.")
			return
		case <-expchan:
			nticks++
			if nticks > 1 {
				x.Frame.Println("Breaking connection due to timeout.")
				return // Timeout with nothing send kills the connection
			}
		case q, ok := <-x.s:
			nticks = 0
			if !ok {
				return // s-channel closed, i.e. a Close on the sending HalfConn, kills the connection
			}
			nrecv++
			send := nrecv <= x.nok
			forceEOF := nrecv+1 > x.nok+x.ndrop
			if send {
				if _, ok := q.(eof); ok {
					x.Frame.Println("SIEVE ---> EOF") // ??
				}
				x.t.Send(q)
			}
			// OK tells conn.Write whether the message was delivered
			// EOF tells conn.Write whether the connection has ended after the write
			x.k <- feedback{OK: send, EOF: forceEOF}
			if forceEOF {
				x.Mute()
				x.dual.Mute()
			}
			//x.Printf("X=%-2d dlvr=%4v forceEOF=%4v (%d,%d)", nrecv, send, forceEOF, x.nok, x.ndrop)
		}
	}
}
コード例 #13
0
ファイル: main.go プロジェクト: dskinner/snd
func main() {
	flag.Parse()
	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
		go func() {
			time.Sleep(10 * time.Second)
			pprof.StopCPUProfile()
		}()
	}

	app.Main(func(a app.App) {
		var logdbg *time.Ticker
		var glctx gl.Context
		for ev := range a.Events() {
			switch ev := a.Filter(ev).(type) {
			case lifecycle.Event:
				switch ev.Crosses(lifecycle.StageVisible) {
				case lifecycle.CrossOn:
					logdbg = time.NewTicker(time.Second)
					go func() {
						for range logdbg.C {
							log.Printf("fps=%-4v underruns=%-4v buflen=%-4v tickavg=%-12s drift=%s\n",
								fps, al.Underruns(), al.BufLen(), al.TickAverge(), al.DriftApprox())
						}
					}()
					glctx = ev.DrawContext.(gl.Context)
					onStart(glctx)
					al.Start()
				case lifecycle.CrossOff:
					glctx = nil
					logdbg.Stop()
					al.Stop()
					al.CloseDevice()
				}
			case touch.Event:
				env.Touch(ev)
			case size.Event:
				if glctx == nil {
					a.Send(ev)
				} else {
					onLayout(ev)
				}
			case paint.Event:
				if glctx != nil {
					onPaint(glctx)
					a.Publish()
					a.Send(paint.Event{})
				}
			}
		}
	})
}
コード例 #14
0
ファイル: mandel.go プロジェクト: zenoss/rog-go
// clicker handles possibly multiple click mouse actions.
// It should be called with the first mouse event that triggered
// the action (which should have m.Buttons != 0), and the
// channel from which it will read mouse events.
// It sends a mouse event on c for each click;
// and closes c when no more clicks are available.
// If the last event has Buttons == 0, the mouse
// has been released, otherwise the user continues
// to drag the mouse. Only the last event may have Buttons==0.
//
func clicker(m0 draw.Mouse, mc <-chan draw.Mouse) (clicks, final <-chan draw.Mouse) {
	var t *time.Ticker
	c := make(chan draw.Mouse)
	fc := make(chan draw.Mouse, 1)
	go func() {
		c <- m0
		m := m0
	tracking:
		for !closed(mc) {
			// wait for button up or delta or time to move outside limit.
			for m = range mc {
				if m.Buttons == 0 {
					// does a long click with no intervening movement still count as a click?
					break
				}
				d := m.Sub(m0.Point)
				if m.Nsec-m0.Nsec > ClickTime || d.X*d.X+d.Y*d.Y > ClickDist {
					break tracking
				}
			}

			t = time.NewTicker(ClickTime)
			// wait for button down or delta or time to move outside limit.
		buttonDown:
			for {
				select {
				case m = <-mc:
					if closed(mc) {
						break tracking
					}
					d := m.Sub(m0.Point)
					if m.Nsec-m0.Nsec > ClickTime || d.X*d.X+d.Y*d.Y > ClickDist {
						break tracking
					}
					if m.Buttons != 0 {
						break buttonDown
					}
				case <-t.C:
					break tracking
				}
			}
			t.Stop()
			t = nil
			c <- m0
			m0 = m
		}
		if t != nil {
			t.Stop()
		}
		close(c)
		fc <- m
		return
	}()
	return c, fc
}
コード例 #15
0
ファイル: ssh.go プロジェクト: Reejoshi/cli
func keepalive(conn ssh.Conn, ticker *time.Ticker, stopCh chan struct{}) {
	for {
		select {
		case <-ticker.C:
			_, _, _ = conn.SendRequest("*****@*****.**", true, nil)
		case <-stopCh:
			ticker.Stop()
			return
		}
	}
}
コード例 #16
0
ファイル: main.go プロジェクト: sparrc/stash
// daemon controls the backup processors
func daemon(ticker *time.Ticker, quit <-chan bool) {
	config := stash.NewConfig()
	for {
		select {
		case <-ticker.C:
			processBackups(config)
		case <-quit:
			ticker.Stop()
			return
		}
	}
}
コード例 #17
0
ファイル: watcher.go プロジェクト: upfluence/etcdexpose
func (t *watcher) run(evt chan<- bool, ticker *time.Ticker) {
	for {
		select {
		case <-ticker.C:
			evt <- true
		case <-t.stopChan:
			ticker.Stop()
			close(evt)
			return
		}
	}
}
コード例 #18
0
ファイル: main.go プロジェクト: antlinker/alog
func output(startTime time.Time, ticker *time.Ticker) {
	for t := range ticker.C {
		totalNum := alog.GALog.GetWriteNum()
		currentSecond := float64(t.Sub(startTime)) / float64(time.Second)
		info := fmt.Sprintf("\r ===> 写入日志条数:%d,用时:%.2fs", totalNum, currentSecond)
		fmt.Print(info)
		if totalNum == int64(_LogNum) {
			ticker.Stop()
			_GCHComplete <- time.Now()
		}
	}
}
コード例 #19
0
func PingList(hostList []string, waitTime int, timeLimit int) {
	successAlive := make([]PingReturn, 0)
	noRet := make(chan PingReturn, 255)
	var ticker *time.Ticker
	ticker = time.NewTicker(time.Second)
	defer ticker.Stop()
	go func() {
		for {
			select {
			case <-ticker.C:
				fmt.Printf("all:%d over:%d pre:%f\n", len(hostList), len(successAlive), 0.)
			}
		}
	}()
	for _, v := range hostList {
		go func(v string) {
			r := ping(v, timeLimit)
			// print("*")
			noRet <- r
		}(v)
	}
	func() {
		for {
			select {
			case <-time.After(time.Second * time.Duration(waitTime)):
				fmt.Println("timeout ", waitTime)
				return
			case r := <-noRet:
				successAlive = append(successAlive, r)
				if len(successAlive) == len(hostList) {
					return
				}
				continue
			}
			break
		}
	}()

	var suc, err int
	for _, v := range successAlive {
		if v.success {
			suc++
			fmt.Printf("ip:%s success:%t\n", v.host, v.success)
		} else {
			err++
			// fmt.Println(v.msg, v.err.Error())
		}
	}
	fmt.Printf("###########################\nsuccess:%d error:%d\n", suc, err)

}
コード例 #20
0
ファイル: requests.go プロジェクト: sanathp/statusok
//A time ticker writes data to request channel for every request.CheckEvery seconds
func createTicker(requestConfig RequestConfig) {

	var ticker *time.Ticker = time.NewTicker(requestConfig.CheckEvery * time.Second)
	quit := make(chan struct{})
	for {
		select {
		case <-ticker.C:
			requestChannel <- requestConfig
		case <-quit:
			ticker.Stop()
			return
		}
	}
}
コード例 #21
0
func handleEmails(cfg *config.Config, db *database.Db, broker *messaging.Broker, ticker *time.Ticker, quit chan os.Signal, wg *sync.WaitGroup) {
	defer wg.Done()
	for {
		select {
		case <-ticker.C:
			fmt.Printf("Email stuff\n")

		case <-quit:
			fmt.Printf("Shutting down email loop\n")
			ticker.Stop()
			return
		}
	}
}
コード例 #22
0
ファイル: strobe.go プロジェクト: Carrotman42/strobe
func main() {
	fmt.Println("Started!")
	chs := [...]struct {
		ch chan<- bool
		on bool
	}{
		{openStrobe(7), true},
		{openStrobe(8), true},
		{openStrobe(11), true},
	}

	vlc := startMedia("music/say.wav")
	time.Sleep(time.Second * 1)

	bpmChange := make(chan *time.Ticker)
	go detectBPM(vlc, bpmChange)

	cmd := make(chan int)
	go getInput(cmd)
	cur := true
	var ticker *time.Ticker
	var tickChan <-chan time.Time
	for {
		select {
		case <-tickChan:
			for _, ch := range chs {
				if ch.on {
					ch.ch <- cur
				}
			}
			cur = !cur
		case newTicker := <-bpmChange:
			if ticker != nil {
				ticker.Stop()
			}
			ticker = newTicker
			tickChan = ticker.C
		case char := <-cmd:
			switch {
			case char >= 0 && char < len(chs):
				chs[char].on = !chs[char].on
				chs[char].ch <- false
			default:
				fmt.Println("Bad command:", char)
			}
		}

	}

}
コード例 #23
0
func loopHistoricalRecordEvent(ticker *time.Ticker, checkingInterval time.Duration) {
	for {
		select {
		case <-ticker.C:
			// Historical record
			if active {
				periodicalRunHistoricalRecordEvent()
			}
		case <-quitChannel:
			ticker.Stop()
			log.Info("Loop historical record event quit")
			return
		}
	}
}
コード例 #24
0
ファイル: gameserver.go プロジェクト: boombuler/tron2go
func (gs *GameServer) adjustSpeed(ticker *time.Ticker) *time.Ticker {
	ticker.Stop()
	if gs.SuddenDeathTime != nil {
		timeSinceSDStart := time.Now().Sub(*gs.SuddenDeathTime)
		SDRound := int(timeSinceSDStart / SUDDENDEATH_INC_TIME)
		speedFactor := 1.0 + (float64(SDRound) * SUDDENDEATH_FACTOR)
		speed := time.Duration(float64(SPEED.Nanoseconds()) / speedFactor)

		ticker = time.NewTicker(speed)
	} else {
		ticker = time.NewTicker(SPEED)
	}

	return ticker
}
コード例 #25
0
ファイル: ibm.go プロジェクト: hack4impact/transcribe4all
func keepConnectionOpen(ws *websocket.Conn, ticker *time.Ticker, quit chan struct{}) {
	for {
		select {
		case <-ticker.C:
			err := ws.WriteJSON(map[string]string{
				"action": "no-op",
			})
			if err != nil {
				return
			}
		case <-quit:
			ticker.Stop()
			return
		}
	}
}
コード例 #26
0
ファイル: crypto.go プロジェクト: FiloSottile/caddy
// standaloneTLSTicketKeyRotation governs over the array of TLS ticket keys used to de/crypt TLS tickets.
// It periodically sets a new ticket key as the first one, used to encrypt (and decrypt),
// pushing any old ticket keys to the back, where they are considered for decryption only.
//
// Lack of entropy for the very first ticket key results in the feature being disabled (as does Go),
// later lack of entropy temporarily disables ticket key rotation.
// Old ticket keys are still phased out, though.
//
// Stops the ticker when returning.
func standaloneTLSTicketKeyRotation(c *tls.Config, ticker *time.Ticker, exitChan chan struct{}) {
	defer ticker.Stop()

	// The entire page should be marked as sticky, but Go cannot do that
	// without resorting to syscall#Mlock. And, we don't have madvise (for NODUMP), too. ☹
	keys := make([][32]byte, 1, NumTickets)

	rng := c.Rand
	if rng == nil {
		rng = rand.Reader
	}
	if _, err := io.ReadFull(rng, keys[0][:]); err != nil {
		c.SessionTicketsDisabled = true // bail if we don't have the entropy for the first one
		return
	}
	c.SessionTicketKey = keys[0] // SetSessionTicketKeys doesn't set a 'tls.keysAlreadySet'
	c.SetSessionTicketKeys(setSessionTicketKeysTestHook(keys))

	for {
		select {
		case _, isOpen := <-exitChan:
			if !isOpen {
				return
			}
		case <-ticker.C:
			rng = c.Rand // could've changed since the start
			if rng == nil {
				rng = rand.Reader
			}
			var newTicketKey [32]byte
			_, err := io.ReadFull(rng, newTicketKey[:])

			if len(keys) < NumTickets {
				keys = append(keys, keys[0]) // manipulates the internal length
			}
			for idx := len(keys) - 1; idx >= 1; idx-- {
				keys[idx] = keys[idx-1] // yes, this makes copies
			}

			if err == nil {
				keys[0] = newTicketKey
			}
			// pushes the last key out, doesn't matter that we don't have a new one
			c.SetSessionTicketKeys(setSessionTicketKeysTestHook(keys))
		}
	}
}
コード例 #27
0
ファイル: websocket.go プロジェクト: apcera/sample-apps
func (ep *websocketPeer) sending() {
	ep.inSending = make(chan struct{})
	var ticker *time.Ticker
	if ep.PingTimeout == 0 {
		ticker = time.NewTicker(7 * 24 * time.Hour)
	} else {
		ticker = time.NewTicker(ep.PingTimeout)
	}

	defer func() {
		ep.setReadDead()
		ticker.Stop()
		close(ep.inSending)
	}()

	for {
		select {
		case msg := <-ep.sendMsgs:
			if closed, _ := ep.doSend(msg); closed {
				return
			}
		case <-ticker.C:
			wt := ep.WriteTimeout
			if wt == 0 {
				wt = 10 * time.Second
			}
			if err := ep.conn.WriteControl(websocket.PingMessage, nil, time.Now().Add(wt)); err != nil {
				log.Println("error sending ping message:", err)
				return
			}
		case <-ep.closing:
			// sending remaining messages.
			for {
				select {
				case msg := <-ep.sendMsgs:
					if closed, _ := ep.doSend(msg); !closed {
						continue
					}
				default:
				}
				break
			}
			return
		}
		ep.updateReadDeadline()
	}
}
コード例 #28
0
ファイル: main.go プロジェクト: dskinner/material
func main() {
	app.Main(func(a app.App) {
		var glctx gl.Context
		var ticker *time.Ticker
		for ev := range a.Events() {
			switch ev := a.Filter(ev).(type) {
			case lifecycle.Event:
				switch ev.Crosses(lifecycle.StageVisible) {
				case lifecycle.CrossOn:
					if ticker != nil {
						ticker.Stop()
					}
					ticker = time.NewTicker(time.Second)
					go func() {
						for range ticker.C {
							log.Printf("fps=%-4v\n", fps)
						}
					}()
					glctx = ev.DrawContext.(gl.Context)
					onStart(glctx)
					a.Send(paint.Event{})
				case lifecycle.CrossOff:
					if ticker != nil {
						ticker.Stop()
					}
					onStop(glctx)
					glctx = nil
				}
			case size.Event:
				if glctx == nil {
					a.Send(ev) // republish event until onStart is called
				} else {
					onLayout(ev)
				}
			case paint.Event:
				if glctx == nil || ev.External {
					continue
				}
				onPaint(glctx)
				a.Publish()
				a.Send(paint.Event{})
			case touch.Event:
				env.Touch(ev)
			}
		}
	})
}
コード例 #29
0
func loopNotifier(ticker *time.Ticker, checkingInterval time.Duration) {
	for {
		select {
		// Notifier
		case replicationControllerNotifier := <-registerNotifierChannel:
			receiveFromNotifierChannel(replicationControllerNotifier)
		case <-ticker.C:
			// Notifier
			periodicalCheckNotifier(checkingInterval)
		case <-quitChannel:
			ticker.Stop()
			close(registerNotifierChannel)
			log.Info("Loop notifier quit")
			return
		}
	}
}
コード例 #30
0
ファイル: subscriber.go プロジェクト: crezam/kit
func (p *Subscriber) loop(t *time.Ticker, lookup Lookup) {
	defer t.Stop()
	for {
		select {
		case <-t.C:
			instances, err := p.resolve(lookup)
			if err != nil {
				p.logger.Log("name", p.name, "err", err)
				continue // don't replace potentially-good with bad
			}
			p.cache.Update(instances)

		case <-p.quit:
			return
		}
	}
}