Exemplo n.º 1
0
// Runs in its own goroutine, listens for interval tickers which trigger it to
// a) try to open any upopened files and b) read any new data from already
// opened files.
func (fm *FileMonitor) Watcher() {
	discovery := time.Tick(fm.discoverInterval)
	checkStat := time.Tick(fm.statInterval)

	ok := true

	for ok {
		select {
		case _, ok = <-fm.stopChan:
			break
		case <-checkStat:
			for fileName, _ := range fm.fds {
				ok = fm.ReadLines(fileName)
				if !ok {
					break
				}
			}
		case <-discovery:
			// Check to see if the files exist now, start reading them
			// if we can, and watch them
			for fileName, _ := range fm.discover {
				if fm.OpenFile(fileName) == nil {
					delete(fm.discover, fileName)
				}
			}
		}
	}
	for _, fd := range fm.fds {
		fd.Close()
	}
}
Exemplo n.º 2
0
func (m *myservice) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (ssec bool, errno uint32) {
	const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown | svc.AcceptPauseAndContinue
	changes <- svc.Status{State: svc.StartPending}
	fasttick := time.Tick(500 * time.Millisecond)
	slowtick := time.Tick(2 * time.Second)
	tick := fasttick
	changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted}
loop:
	for {
		select {
		case <-tick:
			beep()
		case c := <-r:
			switch c.Cmd {
			case svc.Interrogate:
				changes <- c.CurrentStatus
				// testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4
				time.Sleep(100 * time.Millisecond)
				changes <- c.CurrentStatus
			case svc.Stop, svc.Shutdown:
				break loop
			case svc.Pause:
				changes <- svc.Status{State: svc.Paused, Accepts: cmdsAccepted}
				tick = slowtick
			case svc.Continue:
				changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted}
				tick = fasttick
			default:
				elog.Error(1, fmt.Sprintf("unexpected control request #%d", c))
			}
		}
	}
	changes <- svc.Status{State: svc.StopPending}
	return
}
Exemplo n.º 3
0
func (m *manager) startAccelometer(app sender, d time.Duration) {
	go func() {
		ev := make([]C.float, 4)
		var lastTimestamp int64
		for {
			select {
			case <-doneA:
				return
			default:
				C.GoIOS_readAccelerometer(m.m, (*C.float)(unsafe.Pointer(&ev[0])))
				t := int64(ev[0] * 1000 * 1000)
				if t > lastTimestamp {
					// TODO(jbd): Do we need to convert the values to another unit?
					// How does iOS units compate to the Android units.
					app.Send(Event{
						Sensor:    Accelerometer,
						Timestamp: t,
						Data:      []float64{float64(ev[1]), float64(ev[2]), float64(ev[3])},
					})
					lastTimestamp = t
					<-time.Tick(d)
				} else {
					<-time.Tick(d / 2)
				}
			}
		}
	}()
}
Exemplo n.º 4
0
func (r *reporter) run() {
	intervalTicker := time.Tick(r.interval)
	//pingTicker := time.Tick(time.Second * 5)
	pingTicker := time.Tick(r.interval / 2)

	for {
		select {
		// TODO on shutdown, flush all metrics

		case <-r.stop:
			return

		case <-intervalTicker:
			if err := r.send(); err != nil {
				log.Error("unable to send metrics to InfluxDB. err=%v", err)
			}

		case <-pingTicker:
			_, _, err := r.client.Ping()
			if err != nil {
				log.Error("got error while sending a ping to InfluxDB, trying to recreate client. err=%v", err)

				if err = r.makeClient(); err != nil {
					log.Error("unable to make InfluxDB client. err=%v", err)
				}
			}
		}
	}
}
Exemplo n.º 5
0
func (game *Game) run() {
	timeInterval := 2e8
	updateTicker := time.Tick(30e9)
	moveTicker := time.Tick(time.Duration(timeInterval))
	foodTicker := time.Tick(1e9)
	for {
		select {
		case <-updateTicker:
			timeInterval /= 2
			moveTicker = time.Tick(time.Duration(timeInterval))
		case <-moveTicker:
			game.PlayerOne.AdvancePosition()
			game.PlayerTwo.AdvancePosition()
			game.checkForLoser()
			game.PlayerOne.ToClient <- game
			game.PlayerTwo.ToClient <- game
			if game.HasEnded {
				close(game.PlayerOne.ToClient)
				close(game.PlayerTwo.ToClient)
				return
			} else {
				game.eatFood()
			}
		case <-foodTicker:
			x := rand.Int() % game.Width
			y := rand.Int() % game.Height
			game.Food = append(game.Food, [2]int{x, y})
		case update := <-game.PlayerOne.FromClient:
			game.PlayerOne.UpdateHeading(update)
		case update := <-game.PlayerTwo.FromClient:
			game.PlayerTwo.UpdateHeading(update)
		}
	}
}
Exemplo n.º 6
0
func rate_limiting() {
	fmt.Println("<rate_limiting>")
	fmt.Println("<------------->")

	// First we'll look at basic rate limiting. Suppose
	// we want to limit our handling of incoming requests.
	// We'll serve these requests off a channel of the
	// same name.
	requests := make(chan int, 5)
	for i := 1; i <= 5; i++ {
		requests <- i
	}
	close(requests)

	// This `limiter` channel will receive a value
	// every 200 milliseconds. This is the regulator in
	// our rate limiting scheme.
	limiter := time.Tick(time.Millisecond * 200)

	// By blocking on a receive from the `limiter` channel
	// before serving each request, we limit ourselves to
	// 1 request every 200 milliseconds.
	for req := range requests {
		<-limiter
		fmt.Println("request", req, time.Now())
	}

	// We may want to allow short bursts of requests in
	// our rate limiting scheme while preserving the
	// overall rate limit. We can accomplish this by
	// buffering our limiter channel. This `burstyLimiter`
	// channel will allow bursts of up to 3 events.
	burstyLimiter := make(chan time.Time, 3)

	// Fill up the channel to represent allowed bursting.
	for i := 0; i < 3; i++ {
		burstyLimiter <- time.Now()
	}

	// Every 200 milliseconds we'll try to add a new
	// value to `burstyLimiter`, up to its limit of 3.
	go func() {
		for t := range time.Tick(time.Millisecond * 200) {
			burstyLimiter <- t
		}
	}()

	// Now simulate 5 more incoming requests. The first
	// 3 of these will benefit from the burst capability
	// of `burstyLimiter`.
	burstyRequests := make(chan int, 5)
	for i := 1; i <= 5; i++ {
		burstyRequests <- i
	}
	close(burstyRequests)
	for req := range burstyRequests {
		<-burstyLimiter
		fmt.Println("request", req, time.Now())
	}
}
Exemplo n.º 7
0
func (w *World) Loop() {
	timer1secCh := time.Tick(1 * time.Second)
	fps := 60
	timer60Ch := time.Tick(time.Duration(1000/fps) * time.Millisecond)
loop:
	for {
		select {
		case cmd := <-w.cmdCh:
			//log.Println(cmd)
			switch cmd.Cmd {
			default:
				log.Printf("unknown cmd %v", cmd)
			case "quit":
				break loop
			}
		case ftime := <-timer60Ch:
			ok := w.Do1Frame(ftime)
			if !ok {
				break loop
			}
		case <-timer1secCh:
			log.Printf("%v %v", w, <-go4game.IdGenCh)

			log.Printf("%v", w.octree)
			ol := w.ListNearObj(SnakeDefault.WorldCube)
			for _, o := range ol {
				log.Printf("%v", o)
			}
		}
	}
}
Exemplo n.º 8
0
// Lock attempts to acquire mutex m. If the lock is already in use, the calling
// goroutine blocks until the mutex is available
func (m *Mutex) Lock() error {
	l, err := m.tryLock()
	if l || err != nil {
		return err
	}
	// if RetryTime is not a positive value, we don't want to retry. Just fail instead.
	if m.RetryTime <= 0 {
		return fmt.Errorf("failed to acquire lock '%s'", m.LockName)
	}
	retryTicker := time.Tick(m.RetryTime)
	var timeoutTicker <-chan time.Time
	if m.Timeout > 0 {
		timeoutTicker = time.Tick(m.Timeout)
	} else {
		// if m.Timeout isn't a positive value, just make an unconnected channel to poll
		// (effectively making the timeout infinite)
		timeoutTicker = make(chan time.Time)
	}
	for {
		select {
		case <-retryTicker:
			l, err := m.tryLock()
			if l || err != nil {
				return err
			}
		case <-timeoutTicker:
			return fmt.Errorf("mutex lock hit timeout of %v", m.Timeout)
		}
	}
}
Exemplo n.º 9
0
func main() {
	start := time.Now()
	counter := 0
	tick := time.Tick(100 * time.Millisecond)
	tick2 := time.Tick(5 * time.Second)
	tick3 := time.Tick(10 * time.Second)
	timeout := time.After(time.Second * 30)

	fmt.Println("Started at ", start)
	for {
		select {
		case <-tick:
			fmt.Print(".")
			counter += 1

		case <-tick2:
			fmt.Println("")
			fmt.Println("Time elapsed: ", time.Since(start))

		case <-tick3:
			fmt.Println("\nCounter:", counter)

		case <-timeout:
			fmt.Println("Timeout")
			os.Exit(0)
		}
	}

}
Exemplo n.º 10
0
func main() {
	lastSeenMessage := time.Now()

	checker := time.Tick(1 * time.Second)
	producer := time.Tick(10 * time.Second)

	log.Println("Started")

	go func() {
		for _ = range producer {
			lastSeenMessage = time.Now()
			log.Println(lastSeenMessage)
		}
	}()

	for _ = range checker {
		checkTime := 5 * time.Second
		lastSeenMessageTime := lastSeenMessage.Add(checkTime).Unix()
		currentTime := time.Now().Unix()

		check := lastSeenMessageTime < currentTime

		log.Println(currentTime, lastSeenMessageTime, check)

		if check {
			panic(fmt.Sprintf("no message received in last %v", checkTime))
		}
	}
}
Exemplo n.º 11
0
func (fm *FileMonitor) Watcher() {
	discovery := time.Tick(time.Second * 5)
	checkStat := time.Tick(time.Millisecond * 500)

	for {
		select {
		case <-checkStat:
			for fileName, _ := range fm.fds {
				fm.ReadLines(fileName)
			}
		case <-discovery:
			// Check to see if the files exist now, start reading them
			// if we can, and watch them
			for fileName, _ := range fm.discover {
				if fm.OpenFile(fileName) == nil {
					delete(fm.discover, fileName)
				}
			}
		case <-fm.stopChan:
			for _, fd := range fm.fds {
				fd.Close()
			}
			return
		}
	}
}
Exemplo n.º 12
0
// Runs in its own goroutine, listens for interval tickers which trigger it to
// a) try to open any upopened files and b) read any new data from already
// opened files.
func (fm *FileMonitor) Watcher() {
	discovery := time.Tick(fm.discoverInterval)
	checkStat := time.Tick(fm.statInterval)

	ok := true

	for ok {
		select {
		case _, ok = <-fm.stopChan:
			break
		case <-checkStat:
			if fm.fd != nil {
				ok = fm.ReadLines(fm.logfile)
				if !ok {
					break
				}
			}
		case <-discovery:
			// Check to see if the files exist now, start reading them
			// if we can, and watch them
			if fm.OpenFile(fm.logfile) == nil {
				fm.discover = false
			}
		}
	}
	if fm.fd != nil {
		fm.fd.Close()
		fm.fd = nil
	}
}
Exemplo n.º 13
0
func get_headers() {
	if SeedNode != "" {
		pr, e := peersdb.NewPeerFromString(SeedNode)
		if e != nil {
			fmt.Println("Seed node error:", e.Error())
		} else {
			fmt.Println("Seed node:", pr.Ip())
			new_connection(pr)
		}
	}
	LastBlock.Mutex.Lock()
	LastBlock.node = MemBlockChain.BlockTreeEnd
	LastBlock.Mutex.Unlock()

	tickTick := time.Tick(100 * time.Millisecond)
	tickStat := time.Tick(6 * time.Second)

	for !GlobalExit() && !GetAllHeadersDone() {
		select {
		case <-tickTick:
			add_new_connections()

		case <-tickStat:
			LastBlock.Mutex.Lock()
			fmt.Println("Last Header Height:", LastBlock.node.Height, "...")
			LastBlock.Mutex.Unlock()
			usif_prompt()
		}
	}
}
Exemplo n.º 14
0
func (l *lgr) Updater(windowLogGranularity int, keyLogGranularity int) {
	go func() {
		// Fetch freshest logs
		c := time.Tick(
			time.Millisecond * time.Duration(windowLogGranularity),
		)
		for _ = range c {
			newWLogs := l.WindowLogger.GetFreshestTxtLogs()
			if newWLogs != nil {
				l.winLogs = append(
					l.winLogs,
					newWLogs,
				)
			}
		}
	}()

	go func() {
		// Fetch freshest logs
		c := time.Tick(
			time.Millisecond * time.Duration(keyLogGranularity),
		)
		for _ = range c {
			newKLogs := l.KeyLogger.GetFreshestNumLogs()
			if newKLogs != nil {
				l.keyLogs = append(
					l.keyLogs,
					newKLogs,
				)
			}
		}
	}()
}
Exemplo n.º 15
0
func main() {
	requests := make(chan int, 5)
	for i := 1; i <= 5; i++ {
		requests <- i
	}
	close(requests)
	limiter := time.Tick(time.Millisecond * 200)

	for req := range requests {
		<-limiter
		fmt.Println("request", req, time.Now())
	}
	burstyLimiter := make(chan time.Time, 3)

	for i := 0; i < 3; i++ {
		burstyLimiter <- time.Now()
	}

	go func() {
		for t := range time.Tick(time.Millisecond * 200) {
			burstyLimiter <- t
		}
	}()

	burstyRequests := make(chan int, 5)
	for i := 1; i <= 5; i++ {
		burstyRequests <- i
	}
	close(burstyRequests)
	for req := range burstyRequests {
		<-burstyLimiter
		fmt.Println("request", req, time.Now())
	}
}
Exemplo n.º 16
0
func main() {
	c, err := cluster.New("localhost:7000")
	if err != nil {
		log.Fatal(err)
	}

	oldKeys := make(chan string, 1000)

	doRand := time.Tick(100 * time.Millisecond)
	doOldRand := time.Tick(1 * time.Second)

	for {
		select {
		case <-doRand:
			key := randString()
			doGetSet(c, key)
			select {
			case oldKeys <- key:
			default:
			}

		case <-doOldRand:
			select {
			case key := <-oldKeys:
				doGetSet(c, key)
			default:
			}
		}
	}
}
Exemplo n.º 17
0
// Heartbeating serves two purposes: a) keeping NAT paths alive, and
// b) updating a remote peer's knowledge of our address, in the event
// it changes (e.g. because NAT paths expired).
// Called only by connection actor process.
func (conn *LocalConnection) ensureHeartbeat(fast bool) error {
	if err := conn.ensureForwarders(); err != nil {
		return err
	}
	var heartbeat, fetchAll, fragTest <-chan time.Time
	// explicitly 0 length chan - make send block until receive occurs
	stop := make(chan interface{}, 0)
	if fast {
		// fast, nofetchall, no fragtest
		// Lang Spec: "A nil channel is never ready for communication."
		heartbeat = time.Tick(FastHeartbeat)
	} else {
		heartbeat = time.Tick(SlowHeartbeat)
		fetchAll = time.Tick(FetchAllInterval)
		fragTest = time.Tick(FragTestInterval)
	}
	// Don't need locks here as this is only read here and in
	// handleShutdown, both of which are called by the connection
	// actor process only.
	if conn.heartbeatStop != nil {
		conn.heartbeatStop <- nil
	}
	conn.heartbeatStop = stop
	go conn.forwardHeartbeats(heartbeat, fetchAll, fragTest, stop)
	return nil
}
Exemplo n.º 18
0
func BenchmarkTCPShareRouter(b *testing.B) {
	r, err := NewRouter(nil, ServiceProcessPayload)
	if err != nil {
		b.FailNow()
	}

	hf := NewMsgHeaderFactory(pbt.NewMsgProtobufFactory())

	r.Run()
	<-time.Tick(1 * time.Millisecond)

	network := "tcp"
	address := "localhost:10001"
	if err := r.ListenAndServe("client", network, address, hf, ServiceProcessConn); err != nil {
		b.Log(err)
		b.FailNow()
	}

	name := "scheduler"
	n := ConcurrentNum
	m := GoRoutineRequests
	for i := 0; i < n; i++ {
		if err := r.Dial(name+string(i), network, address, hf); err != nil {
			b.Log(err)
			b.FailNow()
		}
	}

	<-time.Tick(1 * time.Millisecond)
	testShareRouter(b, r, n, m)
}
Exemplo n.º 19
0
func main() {
	defer common.LogPanic()
	common.Init()

	if logDirFlag := flag.Lookup("log_dir"); logDirFlag != nil {
		logDir = logDirFlag.Value.String()
	}

	if *dryRun {
		exec.SetRunForTesting(func(command *exec.Command) error {
			glog.Infof("dry_run: %s", exec.DebugString(command))
			return nil
		})
	}
	if *local {
		frontend.InitForTesting("http://localhost:8000/")
	} else {
		frontend.MustInit()
	}

	workerHealthTick := time.Tick(*workerHealthCheckInterval)
	pollTick := time.Tick(*pollInterval)
	// Run immediately, since pollTick will not fire until after pollInterval.
	pollAndExecOnce()
	for {
		select {
		case <-workerHealthTick:
			doWorkerHealthCheck()
		case <-pollTick:
			pollAndExecOnce()
		}
	}
}
Exemplo n.º 20
0
func (ms MessageServer) notifyWatch() {
	var lastNotify, lastMessage int64
	notifyTick := time.Tick(time.Duration(ms.NotifyDuration) * time.Second)
	fetchTick := time.Tick(time.Duration(ms.FetchDuration) * time.Second)
	expireTick := time.Tick(time.Duration(ms.ExpireDuration) * time.Second)
	var statTick <-chan time.Time
	if ms.Stat {
		// show statistics once a minute
		statTick = time.Tick(60 * time.Second)
	}
	for {
		select {
		case <-notifyTick:
			log.Debugs("Check notification.\n")
			if lastNotify < lastMessage { // both are zero when started
				log.Debugs("Notify run started.\n")
				lastNotify = CurrentTime()
				ms.NotifyPeers()
			}
		case <-fetchTick:
			log.Debugs("Fetch run started.\n")
			ms.FetchRun()
		case <-expireTick:
			log.Debugs("Expire run started.\n")
			ms.DB.ExpireFromIndex()
		case <-statTick:
			stat.Input <- stat.Show
		case <-ms.notifyChan:
			log.Debugs("Notification reason\n")
			lastMessage = CurrentTime()
		}
	}
}
Exemplo n.º 21
0
func main() {
	defer common.LogPanic()
	master_common.InitWithMetrics("ct-poller", graphiteServer)

	if logDirFlag := flag.Lookup("log_dir"); logDirFlag != nil {
		logDir = logDirFlag.Value.String()
	}

	if *dryRun {
		exec.SetRunForTesting(func(command *exec.Command) error {
			glog.Infof("dry_run: %s", exec.DebugString(command))
			return nil
		})
	}

	statusTracker.(*heartbeatStatusTracker).StartMetrics()

	workerHealthTick := time.Tick(*workerHealthCheckInterval)
	pollTick := time.Tick(*pollInterval)
	// Run immediately, since pollTick will not fire until after pollInterval.
	pollAndExecOnce()
	for {
		select {
		case <-workerHealthTick:
			doWorkerHealthCheck()
		case <-pollTick:
			pollAndExecOnce()
		}
	}
}
Exemplo n.º 22
0
func (m *Manager) ManageFarm(){
    c1 := time.Tick(1 * time.Millisecond)
    c1000 := time.Tick(1000 * time.Millisecond)
    go m.updateFarmLoop(c1)
    go m.outputFarmLoop(c1000)
    go m.receiveCommands()
}
Exemplo n.º 23
0
func events(feed *couchbase.DcpFeed, timeoutMs int) {
	var timeout <-chan time.Time

	mutations := 0
	done := true
	tick := time.Tick(time.Duration(options.tick) * time.Millisecond)
	if timeoutMs > 0 {
		timeout = time.Tick(time.Duration(timeoutMs) * time.Millisecond)
	}

loop:
	for {
		select {
		case e := <-feed.C:
			if e.Opcode == mcd.DCP_MUTATION {
				mutations += 1
			} else {
				log.Printf("Received {%s, %d(vb), %d(opq), %s}\n",
					e.Opcode, e.VBucket, e.Opaque, e.Status)
			}
			handleEvent(e)
			done = false

		case <-tick:
			log.Printf("Mutation count %d", mutations)
			if timeout == nil && done {
				break loop
			}
			done = true

		case <-timeout:
			break loop
		}
	}
}
Exemplo n.º 24
0
// Launch starts a member based on ServerConfig, PeerListeners
// and ClientListeners.
func (m *member) Launch() error {
	var err error
	if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {
		return fmt.Errorf("failed to initialize the etcd server: %v", err)
	}
	m.s.Ticker = time.Tick(tickDuration)
	m.s.SyncTicker = time.Tick(500 * time.Millisecond)
	m.s.Start()

	m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)}

	for _, ln := range m.PeerListeners {
		hs := &httptest.Server{
			Listener: ln,
			Config:   &http.Server{Handler: m.raftHandler},
		}
		hs.Start()
		m.hss = append(m.hss, hs)
	}
	for _, ln := range m.ClientListeners {
		hs := &httptest.Server{
			Listener: ln,
			Config:   &http.Server{Handler: etcdhttp.NewClientHandler(m.s)},
		}
		hs.Start()
		m.hss = append(m.hss, hs)
	}
	return nil
}
Exemplo n.º 25
0
func BenchmarkPipeShareRouter(b *testing.B) {
	r, err := NewRouter(nil, ServiceProcessPayload)
	if err != nil {
		b.FailNow()
	}

	hf := NewMsgHeaderFactory(pbt.NewMsgProtobufFactory())

	r.Run()
	<-time.Tick(1 * time.Millisecond)

	name := "scheduler"
	n := ConcurrentNum
	m := GoRoutineRequests
	for i := 0; i < n; i++ {
		c, s := net.Pipe()
		ep_c := r.newRouterEndPoint(name+string(i), c, hf)
		ep_s := r.newRouterEndPoint("client"+string(n), s, hf)
		r.AddEndPoint(ep_c)
		r.AddEndPoint(ep_s)
	}

	<-time.Tick(1 * time.Millisecond)
	testShareRouter(b, r, n, m)
}
Exemplo n.º 26
0
func (pm *ProtocolManager) update() {
	forceSync := time.Tick(forceSyncCycle)
	blockProc := time.Tick(blockProcCycle)

	for {
		select {
		case <-pm.newPeerCh:
			// Meet the `minDesiredPeerCount` before we select our best peer
			if len(pm.peers) < minDesiredPeerCount {
				break
			}
			// Find the best peer and synchronise with it
			peer := getBestPeer(pm.peers)
			if peer == nil {
				glog.V(logger.Debug).Infoln("Sync attempt canceled. No peers available")
			}
			go pm.synchronise(peer)

		case <-forceSync:
			// Force a sync even if not enough peers are present
			if peer := getBestPeer(pm.peers); peer != nil {
				go pm.synchronise(peer)
			}
		case <-blockProc:
			// Try to pull some blocks from the downloaded
			go pm.processBlocks()

		case <-pm.quitSync:
			return
		}
	}
}
Exemplo n.º 27
0
func get_blocks() {
	var bl *btc.Block

	DlStartTime = time.Now()
	BlocksMutex.Lock()
	BlocksComplete = TheBlockChain.BlockTreeEnd.Height
	CurrentBlockHeight := BlocksComplete + 1
	BlocksMutex.Unlock()

	TheBlockChain.DoNotSync = true

	tickSec := time.Tick(time.Second)
	tickDrop := time.Tick(DROP_PEER_EVERY_SEC * time.Second)
	tickStat := time.Tick(6 * time.Second)

	for !GlobalExit() && CurrentBlockHeight <= LastBlockHeight {
		select {
		case <-tickSec:
			cc := open_connection_count()
			if cc > MaxNetworkConns {
				drop_slowest_peers()
			} else if cc < MaxNetworkConns {
				add_new_connections()
			}

		case <-tickStat:
			print_stats()
			usif_prompt()

		case <-tickDrop:
			if open_connection_count() >= MaxNetworkConns {
				drop_slowest_peers()
			}

		case bl = <-BlockQueue:
			bl.Trusted = CurrentBlockHeight <= TrustUpTo
			if OnlyStoreBlocks {
				TheBlockChain.Blocks.BlockAdd(CurrentBlockHeight, bl)
			} else {
				er, _, _ := TheBlockChain.CheckBlock(bl)
				if er != nil {
					fmt.Println("CheckBlock:", er.Error())
					return
				} else {
					bl.LastKnownHeight = CurrentBlockHeight + uint32(len(BlockQueue))
					TheBlockChain.AcceptBlock(bl)
				}
			}
			atomic.StoreUint32(&LastStoredBlock, CurrentBlockHeight)
			atomic.AddUint64(&DlBytesProcessed, uint64(len(bl.Raw)))
			CurrentBlockHeight++

		case <-time.After(100 * time.Millisecond):
			COUNTER("IDLE")
			TheBlockChain.Unspent.Idle()
		}
	}
	TheBlockChain.Sync()
}
func main() {

	// Primero veamos una limitación básica. Supongamos
	// que queremos limitar el número de peticiones
	// entrantes que podemos manejar. Serviremos estas
	// peticiones desde un canal con el mismo nombre.
	requests := make(chan int, 5)
	for i := 1; i <= 5; i++ {
		requests <- i
	}
	close(requests)

	// Este canal `limiter` recibirá un valor cada 200
	// milisegundos. Este es el regulador en nuestro
	// esquema limitador de transferencia.
	limiter := time.Tick(time.Millisecond * 200)

	// Al bloquear durante la recepción del canal
	// `limiter` antes de servir cada petición, nos
	// autolimitamos a una petición cada 200 milisegundos
	for req := range requests {
		<-limiter
		fmt.Println("peticiones", req, time.Now())
	}

	// Podríamos permitir pequeños picos de peticiones
	// en nuestro esquema de limitación y seguir
	// conservando el límite general. Para lograrlo
	// podemos bufferear nuestro canal `limiter`. Este
	// canal `burstyLimiter` nos permitirá tener picos
	// de hasta 3 eventos.
	burstyLimiter := make(chan time.Time, 3)

	// Llenamos el canal para representar los picos.
	for i := 0; i < 3; i++ {
		burstyLimiter <- time.Now()
	}

	// Cada 200 milisegundos intentaremos agregar un
	// nuevo valor a `burstyLimiter` hasta su límite.
	go func() {
		for t := range time.Tick(time.Millisecond * 200) {
			burstyLimiter <- t
		}
	}()

	// Ahora simularemos 5 peticiones más. La primera
	// de estas 3 se beneficiará de la capacidad de
	// soportar picos del canal `burstyLimiter`.
	burstyRequests := make(chan int, 5)
	for i := 1; i <= 5; i++ {
		burstyRequests <- i
	}
	close(burstyRequests)
	for req := range burstyRequests {
		<-burstyLimiter
		fmt.Println("peticiones", req, time.Now())
	}
}
Exemplo n.º 29
0
func (s *Snake) run(cxt context.Context) error {
	if err := cxt.Err(); err != nil {
		return &errStartingObject{err}
	}

	go func() {
		defer func() {
			if s.pg.Located(s) {
				s.Die()
			}
		}()

		var ticker = time.Tick(s.calculateDelay())

		for {
			select {
			case <-cxt.Done():
				return
			case <-ticker:
			}

			if !s.pg.Located(s) {
				return
			}

			// Calculate next position
			dot, err := s.getNextHeadDot()
			if err != nil {
				return
			}

			if object := s.pg.GetObjectByDot(dot); object != nil {
				if err = logic.Clash(s, object, dot); err != nil {
					return
				}

				if !s.pg.Located(s) {
					return
				}

				ticker = time.Tick(s.calculateDelay())
			}

			tmpDots := make(playground.DotList, len(s.dots)+1)
			copy(tmpDots[1:], s.dots)
			tmpDots[0] = dot
			s.dots = tmpDots

			if s.length < s.DotCount() {
				s.dots = s.dots[:len(s.dots)-1]
			}

			s.lastMove = time.Now()
		}
	}()

	return nil
}
Exemplo n.º 30
0
Arquivo: main.go Projeto: 4396/dht
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	conn, err := net.ListenPacket("udp", ":0")
	if err != nil {
		return
	}

	d := dht.NewDHT(newRandomID(), conn.(*net.UDPConn), 16)
	t := dht.NewTracker(&dhtQueryTracker{}, &dhtReplyTracker{}, &dhtErrorTracker{})
	exit := make(chan interface{})
	msg := make(chan *udpMessage, 1024)
	datas := &sync.Pool{New: func() interface{} {
		return make([]byte, 1024)
	}}

	go func(msg chan *udpMessage) {
		if err = initDHTServer(d); err != nil {
			fmt.Println(err)
			close(exit)
			return
		}
		conn := d.Conn()
		buf := datas.Get().([]byte)
		for {
			n, addr, err := conn.ReadFromUDP(buf)
			if err != nil {
				fmt.Println(err)
				continue
			}
			msg <- &udpMessage{addr, buf, n}
		}
	}(msg)

	timer := time.Tick(time.Second * 30)
	checkup := time.Tick(time.Second * 30)

	for {
		select {
		case m := <-msg:
			if m.addr != nil && m.data != nil {
				d.HandleMessage(m.addr, m.data[:m.size], t)
				datas.Put(m.data)
			}
		case <-timer:
			if n := d.Route().NumNodes(); n < 1024 {
				d.DoTimer(time.Minute*15, time.Minute*15, time.Hour*6, time.Minute*5)
			}
		case <-checkup:
			if n := d.Route().NumNodes(); n < 1024 {
				d.FindNode(d.ID())
			}
		case <-exit:
			return
		default:
		}
	}
}