Beispiel #1
1
func (r *LedReporter) watch() {
	var t *time.Ticker = time.NewTicker(1 * time.Minute)

	for {
		select {
		case color := <-r.blinkChan:
			if t != nil {
				t.Stop()
			}

			led.Off()

			switch color {
			case RED:
				led.Red(true)
			case BLUE:
				led.Blue(true)
			case GREEN:
				led.Green(true)
			}

			t = time.NewTicker(100 * time.Millisecond)
		case <-t.C:
			led.Off()

			if registered {
				led.Blue(true)
			} else {
				led.Off()
			}
		}
	}
}
//Called by main
//Initializes all the other modules and goes to eternal for loop
func RunLift(quitCh chan bool) {
	var buttonPress = make(chan driver.Button, 5)
	var status = make(chan driver.LiftStatus, 5)
	myID = udp.NetInit(toNetwork, fromNetwork, quitCh)
	fsmelev.Init(floorOrder, setLight, status, buttonPress, motorStopCh, quitCh)
	restoreBackup()
	liftStatus = <-status
	ticker1 := time.NewTicker(10 * time.Millisecond).C
	ticker2 := time.NewTicker(5 * time.Millisecond).C
	log.Println("Network UP \n Driver UP \n My id:", myID)
	for {
		select {
		case button = <-buttonPress:
			newKeyPress(button)
		case liftStatus = <-status:
			runQueue(liftStatus, floorOrder)
		case message = <-fromNetwork:
			newMessage(message)
			orderLight(message)
		case <-ticker1:
			checkTimeout()
		case <-ticker2:
			runQueue(liftStatus, floorOrder)
		case <-quitCh:
			return
		}
	}
}
Beispiel #3
0
func newMqttEngine() (*mqttEngine, error) {

	murl, err := url.Parse(*mqttURL)

	if err != nil {
		return nil, err
	}

	mq := &mqttEngine{}

	// Create an MQTT Client.
	cli := client.New(&client.Options{
		ErrorHandler: mq.handleClientError,
	})

	mq.murl = murl
	mq.cli = cli

	mq.attemptConnect()

	//mq.publisher = publisher
	mq.pollTicker = time.NewTicker(time.Second * 1)
	mq.pubTicker = time.NewTicker(time.Second * 15)

	go poll(mq)
	go publish(mq, "Ready to publish")

	return mq, nil
}
Beispiel #4
0
// create a new http based frontend
func NewHTTPFrontend(daemon *NNTPDaemon, config map[string]string, url string) Frontend {
	var front httpFrontend
	front.daemon = daemon
	front.regenBoardTicker = time.NewTicker(time.Second * 10)
	front.ukkoTicker = time.NewTicker(time.Second * 30)
	front.regenBoard = make(map[string]groupRegenRequest)
	front.attachments = mapGetInt(config, "allow_files", 1) == 1
	front.bindaddr = config["bind"]
	front.name = config["name"]
	front.webroot_dir = config["webroot"]
	front.static_dir = config["static_files"]
	front.template_dir = config["templates"]
	front.prefix = config["prefix"]
	front.regen_on_start = config["regen_on_start"] == "1"
	front.regen_threads = mapGetInt(config, "regen_threads", 1)
	front.store = sessions.NewCookieStore([]byte(config["api-secret"]))
	front.store.Options = &sessions.Options{
		// TODO: detect http:// etc in prefix
		Path:   front.prefix,
		MaxAge: 600,
	}
	front.postchan = make(chan NNTPMessage, 16)
	front.recvpostchan = make(chan NNTPMessage, 16)
	front.regenThreadChan = make(chan ArticleEntry, 16)
	front.regenGroupChan = make(chan groupRegenRequest, 8)
	return front
}
Beispiel #5
0
// BUG: This test will most likely fail on a highly loaded
// system
func TestCounterRate(t *testing.T) {
	c := NewCounter()
	// increment counter every 10ms in two goroutines
	// rate ~ 200/sec
	tick1 := time.NewTicker(time.Millisecond * 10)
	go func() {
		for _ = range tick1.C {
			c.Add(1)
		}
	}()
	tick2 := time.NewTicker(time.Millisecond * 10)
	go func() {
		for _ = range tick2.C {
			c.Add(1)
		}
	}()

	time.Sleep(time.Millisecond * 5000)
	tick1.Stop()
	tick2.Stop()

	want := 200.0
	out := c.ComputeRate()

	if math.Abs(want-out) > 1 {
		t.Errorf("c.ComputeRate() = %v, want %v", out, want)
	}
}
Beispiel #6
0
// manage manages outgoing clients. Periodically, the infostore is
// scanned for infos with hop count exceeding the MaxHops
// threshold. If the number of outgoing clients doesn't exceed
// maxPeers(), a new gossip client is connected to a randomly selected
// peer beyond MaxHops threshold. Otherwise, the least useful peer
// node is cut off to make room for a replacement. Disconnected
// clients are processed via the disconnected channel and taken out of
// the outgoing address set. If there are no longer any outgoing
// connections or the sentinel gossip is unavailable, the bootstrapper
// is notified via the stalled conditional variable.
func (g *Gossip) manage() {
	stopper := g.server.stopper

	stopper.RunWorker(func() {
		cullTicker := time.NewTicker(g.jitteredInterval(g.cullInterval))
		stallTicker := time.NewTicker(g.jitteredInterval(g.stallInterval))
		defer cullTicker.Stop()
		defer stallTicker.Stop()
		for {
			select {
			case <-stopper.ShouldStop():
				return
			case c := <-g.disconnected:
				g.doDisconnected(stopper, c)
			case nodeID := <-g.tighten:
				g.tightenNetwork(stopper, nodeID)
			case <-cullTicker.C:
				g.cullNetwork()
			case <-stallTicker.C:
				g.mu.Lock()
				g.maybeSignalStalledLocked()
				g.mu.Unlock()
			}
		}
	})
}
Beispiel #7
0
func (r *report) Run(shutdown <-chan bool, wg *sync.WaitGroup) {
	defer wg.Done()
	ticker := time.NewTicker(time.Second)
	ticker30 := time.NewTicker(30 * time.Second)
	defer ticker.Stop()

	r.printColumns()
	for {
		select {
		case <-shutdown:
			r.printStatusCodes()
			r.printHistogram()
			r.printLatencies()
			if r.total.errorCount.Val() > 0 {
				r.printErrors()
			}
			return
		case <-ticker.C:
			r.printStat(r.second)
			r.clear(r.second)

		case <-ticker30.C:
			r.printStat(r.half)
			r.clear(r.half)
			r.printColumns()
		}
	}
}
Beispiel #8
0
func (e *Engine) initializeTickers() {
	freq := computeSpawnFrequency(e.Scenario.test_duration.Seconds(),
		float64(e.Scenario.total_users))
	e.spawn_ticker = time.NewTicker(freq)
	e.test_progress_ticker = time.NewTicker(e.progress_update_frequency)
	e.test_completed_ticker = time.NewTicker(e.Scenario.test_duration)
}
Beispiel #9
0
//Pings the server if we have not recived any messages for 5 minutes
func (irc *Connection) pingLoop() {
	ticker := time.NewTicker(1 * time.Minute)   //Tick every minute.
	ticker2 := time.NewTicker(15 * time.Minute) //Tick every 15 minutes.
	for {
		select {
		case <-ticker.C:
			//Ping if we haven't received anything from the server within 4 minutes
			irc.lastMessageMutex.Lock()
			if time.Since(irc.lastMessage) >= (4 * time.Minute) {
				irc.SendRawf("PING %d", time.Now().UnixNano())
			}
			irc.lastMessageMutex.Unlock()
		case <-ticker2.C:
			//Ping every 15 minutes.
			irc.SendRawf("PING %d", time.Now().UnixNano())
			//Try to recapture nickname if it's not as configured.
			if irc.nick != irc.nickcurrent {
				irc.nickcurrent = irc.nick
				irc.SendRawf("NICK %s", irc.nick)
			}
		case <-irc.endping:
			ticker.Stop()
			ticker2.Stop()
			irc.pingerExit <- true
			return
		}
	}
}
Beispiel #10
0
// startGossip loops on a periodic ticker to gossip node-related
// information. Starts a goroutine to loop until the node is closed.
func (n *Node) startGossip(ctx context.Context, stopper *stop.Stopper) {
	stopper.RunWorker(func() {
		gossipStoresInterval := envutil.EnvOrDefaultDuration("gossip_stores_interval",
			gossip.DefaultGossipStoresInterval)
		statusTicker := time.NewTicker(gossipStatusInterval)
		storesTicker := time.NewTicker(gossipStoresInterval)
		nodeTicker := time.NewTicker(gossipNodeDescriptorInterval)
		defer storesTicker.Stop()
		defer nodeTicker.Stop()
		n.gossipStores(ctx) // one-off run before going to sleep
		for {
			select {
			case <-statusTicker.C:
				n.ctx.Gossip.LogStatus()
			case <-storesTicker.C:
				n.gossipStores(ctx)
			case <-nodeTicker.C:
				if err := n.ctx.Gossip.SetNodeDescriptor(&n.Descriptor); err != nil {
					log.Warningf(ctx, "couldn't gossip descriptor for node %d: %s", n.Descriptor.NodeID, err)
				}
			case <-stopper.ShouldStop():
				return
			}
		}
	})
}
Beispiel #11
0
func VersionsSyncJob() {
	versionWait.Add(1)
	defer versionWait.Done()
	QueryDeployVersions()
	tickCheck := time.NewTicker(10 * 1000 * time.Millisecond)
	defer tickCheck.Stop()
	tickDownload := time.NewTicker(1000 * time.Millisecond)

	for {
		select {
		case <-quitSync:
			tickDownload.Stop()
			return
		case <-tickCheck.C:
			QueryDeployVersions()
		case <-tickDownload.C:
			d := needDownload
			DownloadDeployVersions()
			if d != needDownload {
				tickDownload.Stop()
				if needDownload {
					tickDownload = time.NewTicker(10 * time.Millisecond)
				} else {
					tickDownload = time.NewTicker(1000 * time.Millisecond)
				}
			}
		}
	}
}
Beispiel #12
0
// RunScraper implements Target.
func (t *Target) RunScraper(sampleAppender storage.SampleAppender) {
	defer close(t.scraperStopped)

	lastScrapeInterval := t.interval()

	log.Debugf("Starting scraper for target %v...", t)

	select {
	case <-time.After(t.offset(lastScrapeInterval)):
		// Continue after scraping offset.
	case <-t.scraperStopping:
		return
	}

	ticker := time.NewTicker(lastScrapeInterval)
	defer ticker.Stop()

	t.scrape(sampleAppender)

	// Explanation of the contraption below:
	//
	// In case t.scraperStopping has something to receive, we want to read
	// from that channel rather than starting a new scrape (which might take very
	// long). That's why the outer select has no ticker.C. Should t.scraperStopping
	// not have anything to receive, we go into the inner select, where ticker.C
	// is in the mix.
	for {
		select {
		case <-t.scraperStopping:
			return
		default:
			select {
			case <-t.scraperStopping:
				return
			case <-ticker.C:
				took := time.Since(t.status.LastScrape())

				intervalStr := lastScrapeInterval.String()

				// On changed scrape interval the new interval becomes effective
				// after the next scrape.
				if iv := t.interval(); iv != lastScrapeInterval {
					ticker.Stop()
					ticker = time.NewTicker(iv)
					lastScrapeInterval = iv
				}

				targetIntervalLength.WithLabelValues(intervalStr).Observe(
					float64(took) / float64(time.Second), // Sub-second precision.
				)
				if sampleAppender.NeedsThrottling() {
					targetSkippedScrapes.WithLabelValues(intervalStr).Inc()
					t.status.setLastError(errSkippedScrape)
					continue
				}
				t.scrape(sampleAppender)
			}
		}
	}
}
Beispiel #13
0
//获取所有网络设备信息
func (this *db) GetAllNetDevice() ([]*NetDevice, error) {
	rows, err := this.Conn.Query("select d.id, d.uuid, d.ip, d.snmp_version, d.snmp_community, d.snmp_port," +
		"d.config_update_interval, d.check_interval, p.ip from network_device as d left join system_proxy as p on d.proxy_id = p.id")
	if err != nil {
		return nil, err
	}
	defer rows.Close()
	devices := []*NetDevice{}
	for rows.Next() {
		device := &NetDevice{}
		var proxy sql.NullString
		if err := rows.Scan(&device.ID, &device.UUID, &device.IpAddr, &device.SnmpVersion,
			&device.SnmpCommunity, &device.SnmpPort, &device.UpdateInterval,
			&device.CheckInterval, &proxy); err != nil {
			continue
		}
		device.Proxy = proxy.String
		device.stopChan = make(chan struct{})
		device.updateTicker = time.NewTicker(time.Second * time.Duration(device.UpdateInterval))
		device.checkTicker = time.NewTicker(time.Second * time.Duration(device.CheckInterval))
		device.DeviceInterfaces = make(map[string]*DeviceInterface)
		//获取oid
		if oids, err := this.GetCustomOidByDeviceID(device.ID); err == nil {
			device.CustomOids = oids
		}
		//获取端口
		ifts := this.GetInterfacesByDeviceId(device.ID)
		for _, ift := range ifts {
			device.DeviceInterfaces[string(ift.Index)] = ift
		}
		devices = append(devices, device)
	}
	return devices, nil
}
Beispiel #14
0
func countDown() {
	endTimer := time.NewTimer(time.Duration(state.TimeLeft) * time.Millisecond)
	gameTicker := time.NewTicker(1 * time.Millisecond)
	paused := false

	for {
		select {
		case <-gameTicker.C:
			state.TimeLeft--

		case shouldPause := <-pauseChannel:
			if shouldPause && !paused {
				endTimer.Stop()
				gameTicker.Stop()
				paused = true
			} else if !shouldPause && paused {
				endTimer.Reset(time.Duration(state.TimeLeft) * time.Millisecond)
				gameTicker = time.NewTicker(1 * time.Millisecond)
				paused = false
			}

		case <-interruptChannel:
			endTimer.Stop()
			gameTicker.Stop()

		case <-endTimer.C:
			endTimer.Stop()
			gameTicker.Stop()
			go intermission()
			return
		}
	}
}
Beispiel #15
0
// Pings the server if we have not received any messages for 5 minutes
// to keep the connection alive. To be used as a goroutine.
func (irc *Connection) pingLoop() {
	defer irc.Done()
	ticker := time.NewTicker(1 * time.Minute) // Tick every minute for monitoring
	ticker2 := time.NewTicker(irc.PingFreq)   // Tick at the ping frequency.
	for {
		select {
		case <-ticker.C:
			//Ping if we haven't received anything from the server within the keep alive period
			if time.Since(irc.lastMessage) >= irc.KeepAlive {
				irc.SendRawf("PING %d", time.Now().UnixNano())
			}
		case <-ticker2.C:
			//Ping at the ping frequency
			irc.SendRawf("PING %d", time.Now().UnixNano())
			//Try to recapture nickname if it's not as configured.
			if irc.nick != irc.nickcurrent {
				irc.nickcurrent = irc.nick
				irc.SendRawf("NICK %s", irc.nick)
			}
		case <-irc.end:
			ticker.Stop()
			ticker2.Stop()
			return
		}
	}
}
Beispiel #16
0
func (t *transport) runDiscoverMode() bool {
	var discoverTicker = time.NewTicker(10 * time.Minute)
	defer discoverTicker.Stop()

	var updateTicker = time.NewTicker(5 * time.Second)
	defer updateTicker.Stop()

	var knownAddrs = make(map[string]bool)

	for {
		select {

		case <-t.done:
			return true // done

		case <-updateTicker.C:
			changed := t.updateKnownAddresses(knownAddrs)
			if changed {
				t.discoverNAT()
			}

		case <-discoverTicker.C:
			t.discoverNAT()

		}

		if t.nat != nil {
			return false // not done
		}
	}
}
Beispiel #17
0
func newProducer(brokers []string, kafkaVersion sarama.KafkaVersion, retryOptions config.Retry) Producer {
	var p sarama.SyncProducer
	var err error
	brokerConfig := newBrokerConfig(kafkaVersion, rawPartition)

	repeatTick := time.NewTicker(retryOptions.Period)
	panicTick := time.NewTicker(retryOptions.Stop)
	defer repeatTick.Stop()
	defer panicTick.Stop()

loop:
	for {
		select {
		case <-panicTick.C:
			panic(fmt.Errorf("Failed to create Kafka producer: %v", err))
		case <-repeatTick.C:
			logger.Debug("Connecting to Kafka cluster:", brokers)
			p, err = sarama.NewSyncProducer(brokers, brokerConfig)
			if err == nil {
				break loop
			}
		}
	}

	logger.Debug("Connected to the Kafka cluster")
	return &producerImpl{producer: p}
}
Beispiel #18
0
func (t *transport) runMappingMode() bool {
	var refreshTicker = time.NewTicker(50 * time.Minute)
	defer refreshTicker.Stop()

	var updateTicker = time.NewTicker(5 * time.Second)
	defer updateTicker.Stop()

	for {
		select {

		case <-t.done:
			t.mapping = make(map[string]*natMapping)
			return true // done

		case <-refreshTicker.C:
			t.refreshMapping()

		case <-updateTicker.C:
			t.updateMappings()

		}

		if t.nat == nil {
			t.mapping = make(map[string]*natMapping)
			return false // not done
		}
	}
}
Beispiel #19
0
// Async
func (conn *LocalConnection) SetEstablished() {
	conn.sendAction(func() error {
		stopTicker(conn.heartbeat)
		old := conn.established
		conn.Lock()
		conn.established = true
		conn.Unlock()
		if old {
			return nil
		}
		conn.Router.Ourself.ConnectionEstablished(conn)
		if err := conn.ensureForwarders(); err != nil {
			return err
		}
		// Send a large frame down the DF channel in order to prompt
		// PMTU discovery to start.
		conn.Send(true, PMTUDiscovery)
		conn.heartbeat = time.NewTicker(SlowHeartbeat)
		conn.fragTest = time.NewTicker(FragTestInterval)
		// avoid initial waits for timers to fire
		conn.Send(true, conn.heartbeatFrame)
		conn.performFragTest()
		return nil
	})
}
Beispiel #20
0
func globalLogFileUpdater() {
	init := true
	var now time.Time
	ticker := time.NewTicker(time.Duration(24 - time.Now().Hour()))
	for {
		if !init {
			now = <-ticker.C
			ticker.Stop()
			ticker = time.NewTicker(time.Hour * 24)
		} else {
			now = time.Now()
			roomLogChannel[0] = make(chan string, 18)
		}
		globalLogLock.Lock()
		globalLog.Close()
		year, month, day := now.Date()
		filename := fmt.Sprintf("room#0-%d-%s-%d", day,
			month.String(), year)

		globalLog, err := os.OpenFile(filename,
			os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
		if err != nil {
			helpers.Logger.Critical("%s", err.Error())
			continue
		}
		if !init {
			StopLogger(0)
		}
		globalLogLock.Unlock()
		go logListener(roomLogChannel[0], globalLog, 0)
		init = false
	}
}
Beispiel #21
0
func Ticker(b *Block) {

	type tickerRule struct {
		Period int
	}

	rule := &tickerRule{
		Period: 1,
	}

	ticker := time.NewTicker(time.Duration(rule.Period) * time.Second)

	for {
		select {
		case tick := <-ticker.C:
			var msg BMsg
			Set(msg, "t", tick)
			broadcast(b.OutChans, msg)

		case msg := <-b.AddChan:
			updateOutChans(msg, b)

		case r := <-b.Routes["set_rule"]:
			unmarshal(r, &rule)
			ticker = time.NewTicker(time.Duration(rule.Period) * time.Second)

		case r := <-b.Routes["get_rule"]:
			marshal(r, rule)

		case <-b.QuitChan:
			quit(b)
			return
		}
	}
}
Beispiel #22
0
// Hearbeating to ensure our connection ngrokd is still live
func (c *ClientModel) heartbeat(lastPongAddr *int64, conn conn.Conn) {
	lastPing := time.Unix(atomic.LoadInt64(lastPongAddr)-1, 0)
	ping := time.NewTicker(pingInterval)
	pongCheck := time.NewTicker(time.Second)

	defer func() {
		conn.Close()
		ping.Stop()
		pongCheck.Stop()
	}()

	for {
		select {
		case <-pongCheck.C:
			lastPong := time.Unix(0, atomic.LoadInt64(lastPongAddr))
			needPong := lastPong.Sub(lastPing) < 0
			pongLatency := time.Since(lastPing)

			if needPong && pongLatency > maxPongLatency {
				c.Info("Last ping: %v, Last pong: %v", lastPing, lastPong)
				c.Info("Connection stale, haven't gotten PongMsg in %d seconds", int(pongLatency.Seconds()))
				return
			}

		case <-ping.C:
			err := msg.WriteMsg(conn, &msg.Ping{})
			if err != nil {
				conn.Debug("Got error %v when writing PingMsg", err)
				return
			}
			lastPing = time.Now()
		}
	}
}
Beispiel #23
0
func (t *topic) backgroundClean() {
	t.wg.Add(1)
	defer t.wg.Done()

	bgQuit := false
	backupTick := time.NewTicker(bgBackupInterval)
	cleanTick := time.NewTicker(bgCleanInterval)
	for !bgQuit {
		select {
		case <-backupTick.C:
			err := t.exportLines()
			if err != nil {
				log.Printf("topic[%s] export lines error: %s", t.name, err)
			}
		case <-cleanTick.C:
			if !t.persist {
				log.Printf("cleaning... %v", t.persist)
				bgQuit := t.clean()
				if bgQuit {
					// log.Printf("topic[%s] t.clean return quit: %v", t.name, bgQuit)
					break
				}
			}
		case <-t.quit:
			// log.Printf("topic[%s] background clean catched quit", t.name)
			bgQuit = true
			break
		}
	}
	// log.Printf("topic[%s] background clean exit.", t.name)
}
Beispiel #24
0
func RepeatEveryUntil(repeatInterval int, runTime int, fn func(), quit <-chan bool) <-chan func() {
	if repeatInterval == 0 || runTime == 0 {
		return Once(fn)
	} else {
		ch := make(chan func())
		var tickerQuit *time.Ticker
		ticker := time.NewTicker(time.Duration(repeatInterval) * time.Second)
		if runTime > 0 {
			tickerQuit = time.NewTicker(time.Duration(runTime) * time.Second)
		}
		go func() {
			defer close(ch)
			ch <- fn
			for {
				select {
				case <-ticker.C:
					ch <- fn
				case <-quit:
					ticker.Stop()
					return
				case <-tickerQuit.C:
					ticker.Stop()
					return
				}
			}
		}()
		return ch
	}
}
Beispiel #25
0
func (c *cliClient) torPromptUI() error {
	banner := "Please start a Tor SOCKS listener on port 9050 or 9150..."
	bannerLength := 4 + len(banner)
	c.Printf("%s %s", termPrefix, banner)

	phase := 0
	animateTicker := time.NewTicker(250 * time.Millisecond)
	defer animateTicker.Stop()
	probeTicker := time.NewTicker(1 * time.Second)
	defer probeTicker.Stop()

	for {
		select {
		case <-c.interrupt:
			return errInterrupted
		case <-animateTicker.C:
			c.Printf("\x1b[%dD", bannerLength)
			phase = c.drawChevrons(phase)
			c.Printf("\x1b[%dC", bannerLength)
		case <-probeTicker.C:
			if c.detectTor() {
				return nil
			}
		}
	}

	return nil
}
Beispiel #26
0
func (q *queue) housekeeping() {
	defer func() {
		log.Trace("queue[%s] housekeeping done", q.ident())
		q.wg.Done()
	}()

	log.Trace("queue[%s] start housekeeping...", q.ident())

	purgeTick := time.NewTicker(q.purgeInterval)
	defer purgeTick.Stop()

	cursorChkpnt := time.NewTicker(time.Second)
	defer cursorChkpnt.Stop()

	for {
		select {
		case <-purgeTick.C:
			if err := q.Purge(); err != nil {
				log.Error("queue[%s] purge: %s", q.ident(), err)
			}

		case <-cursorChkpnt.C:
			if err := q.cursor.dump(); err != nil {
				log.Error("queue[%s] cursor checkpoint: %s", q.ident(), err)
			}

		case <-q.quit:
			return
		}
	}
}
Beispiel #27
0
func (bs *Bitswap) rebroadcastWorker(parent context.Context) {
	ctx, cancel := context.WithCancel(parent)
	defer cancel()

	broadcastSignal := time.NewTicker(rebroadcastDelay.Get())
	defer broadcastSignal.Stop()

	tick := time.NewTicker(10 * time.Second)
	defer tick.Stop()

	for {
		log.Event(ctx, "Bitswap.Rebroadcast.idle")
		select {
		case <-tick.C:
			n := bs.wm.wl.Len()
			if n > 0 {
				log.Debug(n, "keys in bitswap wantlist")
			}
		case <-broadcastSignal.C: // resend unfulfilled wantlist keys
			log.Event(ctx, "Bitswap.Rebroadcast.active")
			entries := bs.wm.wl.Entries()
			if len(entries) > 0 {
				bs.connectToProviders(ctx, entries)
			}
		case <-parent.Done():
			return
		}
	}
}
Beispiel #28
0
// NewPipeline creates a new Transporter Pipeline using the given tree of nodes, and Event Emitter
// eg.
//   source :=
//   	transporter.NewNode("source", "mongo", adaptor.Config{"uri": "mongodb://localhost/", "namespace": "boom.foo", "debug": false, "tail": true}).
// 	  	Add(transporter.NewNode("out", "file", adaptor.Config{"uri": "stdout://"}))
//   pipeline, err := transporter.NewPipeline(source, events.NewNoopEmitter(), 1*time.Second, state.NewFilestore(pid, "/tmp/transporter.state"), 10*time.Second)
//   if err != nil {
// 	  fmt.Println(err)
// 	  os.Exit(1)
//   }
// pipeline.Run()
func NewPipeline(source *Node, emitter events.Emitter, interval time.Duration, sessionStore state.SessionStore, sessionInterval time.Duration) (*Pipeline, error) {
	pipeline := &Pipeline{
		source:        source,
		emitter:       emitter,
		metricsTicker: time.NewTicker(interval),
	}

	if sessionStore != nil {
		pipeline.sessionStore = sessionStore
		pipeline.sessionTicker = time.NewTicker(sessionInterval)
	}

	// init the pipeline
	err := pipeline.source.Init(interval)
	if err != nil {
		return pipeline, err
	}

	// init the emitter with the right chan
	pipeline.emitter.Init(source.pipe.Event)

	// start the emitters
	go pipeline.startErrorListener(source.pipe.Err)
	go pipeline.startMetricsGatherer()

	if sessionStore != nil {
		pipeline.initState()
		go pipeline.startStateSaver()
	}
	pipeline.emitter.Start()

	return pipeline, nil
}
Beispiel #29
0
// Async
func (conn *LocalConnection) SetEstablished() {
	conn.sendAction(func() error {
		stopTicker(conn.heartbeat)
		old := conn.established
		conn.Lock()
		conn.established = true
		conn.Unlock()
		if old {
			return nil
		}
		conn.Router.Ourself.ConnectionEstablished(conn)
		if err := conn.ensureForwarders(); err != nil {
			return err
		}
		// Send a large frame down the DF channel in order to prompt
		// PMTU discovery to start.
		conn.Forward(true, &ForwardedFrame{
			srcPeer: conn.local,
			dstPeer: conn.remote,
			frame:   PMTUDiscovery},
			nil)
		conn.heartbeat = time.NewTicker(SlowHeartbeat)
		conn.fragTest = time.NewTicker(FragTestInterval)
		// avoid initial waits for timers to fire
		conn.Forward(true, conn.heartbeatFrame, nil)
		conn.setStackFrag(false)
		if err := conn.sendSimpleProtocolMsg(ProtocolStartFragmentationTest); err != nil {
			return err
		}
		return nil
	})
}
Beispiel #30
0
// enters consume state, triggered by the mainLoop
func (c *Consumer) consume() bool {
	hbTicker := time.NewTicker(c.config.Group.Heartbeat.Interval)
	defer hbTicker.Stop()

	ocTicker := time.NewTicker(c.config.Consumer.Offsets.CommitInterval)
	defer ocTicker.Stop()

	for {
		select {
		case <-hbTicker.C:
			switch err := c.heartbeat(); err {
			case nil, sarama.ErrNoError:
			case sarama.ErrNotCoordinatorForConsumer, sarama.ErrRebalanceInProgress:
				return false
			default:
				c.handleError(err)
				return false
			}
		case <-ocTicker.C:
			if err := c.commitOffsetsWithRetry(c.config.Group.Offsets.Retry.Max); err != nil {
				c.handleError(err)
				return false
			}
		case <-c.dying:
			return true
		}
	}
}