Esempio n. 1
1
func (md *MDServerRemote) backgroundRekeyChecker(ctx context.Context) {
	for {
		select {
		case <-md.rekeyTimer.C:
			if !md.conn.IsConnected() {
				md.rekeyTimer.Reset(MdServerBackgroundRekeyPeriod)
				continue
			}

			// Assign an ID to this rekey check so we can track it.
			logTags := make(logger.CtxLogTags)
			logTags[CtxMDSRIDKey] = CtxMDSROpID
			newCtx := logger.NewContextWithLogTags(ctx, logTags)
			id, err := MakeRandomRequestID()
			if err != nil {
				md.log.CWarningf(ctx,
					"Couldn't generate a random request ID: %v", err)
			} else {
				newCtx = context.WithValue(newCtx, CtxMDSRIDKey, id)
			}

			md.log.CDebugf(newCtx, "Checking for rekey folders")
			if err := md.getFoldersForRekey(newCtx, md.client); err != nil {
				md.log.CWarningf(newCtx, "MDServerRemote: getFoldersForRekey "+
					"failed with %v", err)
			}
			md.rekeyTimer.Reset(MdServerBackgroundRekeyPeriod)
		case <-ctx.Done():
			return
		}
	}
}
Esempio n. 2
1
// Waits until a sufficient quorum is assembled
func (ks *Keyserver) blockingLookup(ctx context.Context, req *proto.LookupRequest, epoch uint64) (*proto.LookupProof, error) {
	newSignatures := make(chan interface{}, newSignatureBufferSize)
	ks.signatureBroadcast.Subscribe(epoch, newSignatures)
	defer ks.signatureBroadcast.Unsubscribe(epoch, newSignatures)
	verifiersLeft := coname.ListQuorum(req.QuorumRequirement, nil)
	ratifications, haveVerifiers, err := ks.findRatificationsForEpoch(epoch, verifiersLeft)
	if err != nil {
		return nil, err
	}
	for v := range haveVerifiers {
		delete(verifiersLeft, v)
	}
	for !coname.CheckQuorum(req.QuorumRequirement, haveVerifiers) {
		select {
		case <-ctx.Done():
			return nil, fmt.Errorf("timed out while waiting for ratification")
		case v := <-newSignatures:
			newSig := v.(*proto.SignedEpochHead)
			for id := range newSig.Signatures {
				if _, ok := verifiersLeft[id]; ok {
					ratifications = append(ratifications, newSig)
					delete(verifiersLeft, id)
					haveVerifiers[id] = struct{}{}
				}
			}
		}
	}
	return ks.assembleLookupProof(req, epoch, ratifications)
}
Esempio n. 3
1
func WebSensorsAgent(ctx context.Context, db data.DB, u *models.User) {
	// Get the db's changes, then filter by updates, then
	// filter by whether this user can read the record
	changes := data.Filter(data.FilterKind(db.Changes(), models.EventKind), func(c *data.Change) bool {
		ok, _ := access.CanRead(db, u, c.Record)
		return ok
	})

Run:
	for {
		select {
		case c, ok := <-*changes:
			if !ok {
				break Run
			}

			switch c.Record.(*models.Event).Name {
			case WEB_SENSOR_LOCATION:
				webSensorLocation(db, u, c.Record.(*models.Event).Data)
			}
		case <-ctx.Done():
			break Run

		}
	}
}
Esempio n. 4
1
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
	r.ID = s.reqIDGen.Next()

	data, err := r.Marshal()
	if err != nil {
		return nil, err
	}

	if len(data) > maxRequestBytes {
		return nil, ErrRequestTooLarge
	}

	ch := s.w.Register(r.ID)

	s.r.Propose(ctx, data)

	select {
	case x := <-ch:
		return x.(*applyResult), nil
	case <-ctx.Done():
		s.w.Trigger(r.ID, nil) // GC wait
		return nil, ctx.Err()
	case <-s.done:
		return nil, ErrStopped
	}
}
Esempio n. 5
1
// Handle is the quicklog handle method for processing a log line
func (u *Handler) Handle(ctx context.Context, prev <-chan ql.Line, next chan<- ql.Line, config map[string]interface{}) error {

	field := "uuid"
	if u.FieldName != "" {
		field = u.FieldName
	}

	ok := true

	fieldIface := config["field"]
	if fieldIface != nil {
		field, ok = fieldIface.(string)
		if !ok {
			log.Log(ctx).Warn("Could not parse UUID config, using field=uuid")
			field = "uuid"
		}
	}

	log.Log(ctx).Debug("Starting filter handler", "handler", "uuid", "field", field)

	go func() {
		for {
			select {
			case line := <-prev:
				line.Data[field] = uuid.NewV4().String()
				next <- line
			case <-ctx.Done():
				return
			}
		}
	}()

	return nil
}
Esempio n. 6
1
// Upload is called to perform the upload.
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
	if u.currentUploads != nil {
		defer atomic.AddInt32(u.currentUploads, -1)

		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
			return distribution.Descriptor{}, errors.New("concurrency limit exceeded")
		}
	}

	// Sleep a bit to simulate a time-consuming upload.
	for i := int64(0); i <= 10; i++ {
		select {
		case <-ctx.Done():
			return distribution.Descriptor{}, ctx.Err()
		case <-time.After(10 * time.Millisecond):
			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
		}
	}

	if u.simulateRetries != 0 {
		u.simulateRetries--
		return distribution.Descriptor{}, errors.New("simulating retry")
	}

	return distribution.Descriptor{}, nil
}
Esempio n. 7
1
// waitForStateChange blocks until the state changes to something other than the sourceState.
func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
	ac.mu.Lock()
	defer ac.mu.Unlock()
	if sourceState != ac.state {
		return ac.state, nil
	}
	done := make(chan struct{})
	var err error
	go func() {
		select {
		case <-ctx.Done():
			ac.mu.Lock()
			err = ctx.Err()
			ac.stateCV.Broadcast()
			ac.mu.Unlock()
		case <-done:
		}
	}()
	defer close(done)
	for sourceState == ac.state {
		ac.stateCV.Wait()
		if err != nil {
			return ac.state, err
		}
	}
	return ac.state, nil
}
Esempio n. 8
1
func runExec(ctx context.Context, db *sql.DB, query string) error {
	done := make(chan struct{})
	var (
		errMsg error
	)
	go func() {
		for {
			if _, err := db.Exec(query); err != nil {
				errMsg = err
				time.Sleep(time.Second)
				continue
			} else {
				errMsg = nil
				done <- struct{}{}
				break
			}
		}
	}()
	select {
	case <-done:
		return errMsg
	case <-ctx.Done():
		return fmt.Errorf("runExec %s timed out with %v / %v", query, ctx.Err(), errMsg)
	}
}
Esempio n. 9
1
func (c *Cluster) reconnectOnFailure(ctx context.Context) {
	for {
		<-ctx.Done()
		c.Lock()
		if c.stop || c.node != nil {
			c.Unlock()
			return
		}
		c.reconnectDelay *= 2
		if c.reconnectDelay > maxReconnectDelay {
			c.reconnectDelay = maxReconnectDelay
		}
		logrus.Warnf("Restarting swarm in %.2f seconds", c.reconnectDelay.Seconds())
		delayCtx, cancel := context.WithTimeout(context.Background(), c.reconnectDelay)
		c.cancelDelay = cancel
		c.Unlock()
		<-delayCtx.Done()
		if delayCtx.Err() != context.DeadlineExceeded {
			return
		}
		c.Lock()
		if c.node != nil {
			c.Unlock()
			return
		}
		var err error
		_, ctx, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "", "", false)
		if err != nil {
			c.err = err
			ctx = delayCtx
		}
		c.Unlock()
	}
}
Esempio n. 10
0
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
// iv) transport is in TransientFailure and the RPC is fail-fast.
func (ac *addrConn) wait(ctx context.Context, failFast bool) (transport.ClientTransport, error) {
	for {
		ac.mu.Lock()
		switch {
		case ac.state == Shutdown:
			ac.mu.Unlock()
			return nil, errConnClosing
		case ac.state == Ready:
			ct := ac.transport
			ac.mu.Unlock()
			return ct, nil
		case ac.state == TransientFailure && failFast:
			ac.mu.Unlock()
			return nil, Errorf(codes.Unavailable, "grpc: RPC failed fast due to transport failure")
		default:
			ready := ac.ready
			if ready == nil {
				ready = make(chan struct{})
				ac.ready = ready
			}
			ac.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, toRPCErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Esempio n. 11
0
func runQuery(ctx context.Context, db *sql.DB, query string) (*sql.Rows, error) {
	done := make(chan struct{})
	var (
		rows   *sql.Rows
		errMsg error
	)
	go func() {
		for {
			rs, err := db.Query(query)
			if err != nil {
				errMsg = err
				time.Sleep(time.Second)
				continue
			} else {
				rows = rs
				errMsg = nil
				done <- struct{}{}
				break
			}
		}
	}()
	select {
	case <-done:
		return rows, errMsg
	case <-ctx.Done():
		return nil, fmt.Errorf("runQuery %s timed out with %v / %v", query, ctx.Err(), errMsg)
	}
}
Esempio n. 12
0
// AcquireTTL acquires the lock as with `Acquire` but places a TTL on it. After the TTL expires the lock is released automatically.
func (l *lock) AcquireTTL(ctx context.Context, ttl time.Duration) error {
	var err error
	done := make(chan error)

Loop:
	for {
		acquireCtx, cancel := context.WithCancel(context.Background())
		go func() {
			done <- l.acquireTry(acquireCtx, ttl)
		}()

		select {
		case err = <-done:
			cancel()
			if err != ErrExists {
				break Loop
			}
		case <-ctx.Done():
			cancel()
			<-done
			if err != ErrExists {
				err = ErrTimeout
			}
			break Loop
		}
	}
	return err
}
Esempio n. 13
0
func (*hostnameHandler) Handle(ctx context.Context, prev <-chan ql.Line, next chan<- ql.Line, config map[string]interface{}) error {

	field := "hostname"
	ok := true

	fieldIface := config["field"]
	if fieldIface != nil {
		field, ok = fieldIface.(string)
		if !ok {
			log.Log(ctx).Warn("Could not parse hostname config, using field=hostname")
			field = "hostname"
		}
	}

	log.Log(ctx).Debug("Starting filter handler", "handler", "hostname", "field", field)

	hostname, _ := os.Hostname()

	go func() {
		for {
			select {
			case line := <-prev:
				line.Data[field] = hostname
				next <- line
			case <-ctx.Done():
				return
			}
		}
	}()

	return nil
}
Esempio n. 14
0
func (n *network) Run(ctx context.Context) {
	wg := sync.WaitGroup{}

	log.Info("Watching for new subnet leases")
	evts := make(chan []subnet.Event)
	wg.Add(1)
	go func() {
		subnet.WatchLeases(ctx, n.sm, n.name, n.lease, evts)
		wg.Done()
	}()

	n.rl = make([]netlink.Route, 0, 10)
	wg.Add(1)
	go func() {
		n.routeCheck(ctx)
		wg.Done()
	}()

	defer wg.Wait()

	for {
		select {
		case evtBatch := <-evts:
			n.handleSubnetEvents(evtBatch)

		case <-ctx.Done():
			return
		}
	}
}
Esempio n. 15
0
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed.
func (ac *addrConn) wait(ctx context.Context) (transport.ClientTransport, error) {
	for {
		ac.mu.Lock()
		switch {
		case ac.state == Shutdown:
			ac.mu.Unlock()
			return nil, errConnClosing
		case ac.state == Ready:
			ct := ac.transport
			ac.mu.Unlock()
			return ct, nil
		default:
			ready := ac.ready
			if ready == nil {
				ready = make(chan struct{})
				ac.ready = ready
			}
			ac.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Esempio n. 16
0
func (vnet *VNET) dispatchICMP(ctx context.Context) chan<- *Packet {
	var in = make(chan *Packet)

	vnet.wg.Add(1)
	go func() {
		defer vnet.wg.Done()

		vnet.system.WaitForControllerMAC()
		log.Printf("ICMP: running")

		for {
			var pkt *Packet

			select {
			case pkt = <-in:
			case <-ctx.Done():
				return
			}

			vnet.handleICMP(pkt)
		}
	}()

	return in
}
Esempio n. 17
0
// Helper to reset a ping ticker.
func (md *MDServerRemote) resetPingTicker(intervalSeconds int) {
	md.tickerMu.Lock()
	defer md.tickerMu.Unlock()

	if md.tickerCancel != nil {
		md.tickerCancel()
		md.tickerCancel = nil
	}
	if intervalSeconds <= 0 {
		return
	}

	md.log.Debug("MDServerRemote: starting new ping ticker with interval %d",
		intervalSeconds)

	var ctx context.Context
	ctx, md.tickerCancel = context.WithCancel(context.Background())
	go func() {
		ticker := time.NewTicker(time.Duration(intervalSeconds) * time.Second)
		for {
			select {
			case <-ticker.C:
				err := md.client.Ping(ctx)
				if err != nil {
					md.log.Debug("MDServerRemote: ping error %s", err)
				}

			case <-ctx.Done():
				md.log.Debug("MDServerRemote: stopping ping ticker")
				ticker.Stop()
				return
			}
		}
	}()
}
Esempio n. 18
0
// Run implements the TargetProvider interface.
func (ed *EC2Discovery) Run(ctx context.Context, ch chan<- []*config.TargetGroup) {
	defer close(ch)

	ticker := time.NewTicker(ed.interval)
	defer ticker.Stop()

	// Get an initial set right away.
	tg, err := ed.refresh()
	if err != nil {
		log.Error(err)
	} else {
		ch <- []*config.TargetGroup{tg}
	}

	for {
		select {
		case <-ticker.C:
			tg, err := ed.refresh()
			if err != nil {
				log.Error(err)
			} else {
				ch <- []*config.TargetGroup{tg}
			}
		case <-ctx.Done():
			return
		}
	}
}
Esempio n. 19
0
// NewLedgerClosePump starts a background proc that continually watches the
// history database provided.  The watch is stopped after the provided context
// is cancelled.
//
// Every second, the proc spawned by calling this func will check to see
// if a new ledger has been imported (by ruby-horizon as of 2015-04-30, but
// should eventually end up being in this project).  If a new ledger is seen
// the the channel returned by this function emits
func NewLedgerClosePump(ctx context.Context, db *sql.DB) <-chan time.Time {
	result := make(chan time.Time)

	go func() {
		var lastSeenLedger int32
		for {
			select {
			case <-time.After(1 * time.Second):
				var latestLedger int32
				row := db.QueryRow("SELECT MAX(sequence) FROM history_ledgers")
				err := row.Scan(&latestLedger)

				if err != nil {
					log.Warn(ctx, "Failed to check latest ledger", err)
					break
				}

				if latestLedger > lastSeenLedger {
					log.Debugf(ctx, "saw new ledger: %d, prev: %d", latestLedger, lastSeenLedger)
					lastSeenLedger = latestLedger
					result <- time.Now()
				}

			case <-ctx.Done():
				log.Info(ctx, "canceling ledger pump")
				return
			}
		}
	}()

	return result
}
Esempio n. 20
0
func (f *FS) processNotifications(ctx context.Context) {
	for {
		select {
		case <-ctx.Done():
			f.notificationMutex.Lock()
			c := f.notifications
			f.notifications = nil
			f.notificationMutex.Unlock()
			c.Close()
			for range c.Out() {
				// Drain the output queue to allow the Channel close
				// Out() and shutdown any goroutines.
				f.log.CWarningf(ctx,
					"Throwing away notification after shutdown")
			}
			return
		case i := <-f.notifications.Out():
			notifyFn, ok := i.(func())
			if !ok {
				f.log.CWarningf(ctx, "Got a bad notification function: %v", i)
				continue
			}
			notifyFn()
			f.notificationGroup.Done()
		}
	}
}
Esempio n. 21
0
File: raft.go Progetto: Mic92/docker
// LeaderConn returns current connection to cluster leader or raftselector.ErrIsLeader
// if current machine is leader.
func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
	cc, err := n.getLeaderConn()
	if err == nil {
		return cc, nil
	}
	if err == raftselector.ErrIsLeader {
		return nil, err
	}
	ticker := time.NewTicker(1 * time.Second)
	defer ticker.Stop()
	for {
		select {
		case <-ticker.C:
			cc, err := n.getLeaderConn()
			if err == nil {
				return cc, nil
			}
			if err == raftselector.ErrIsLeader {
				return nil, err
			}
		case <-ctx.Done():
			return nil, ctx.Err()
		}
	}
}
Esempio n. 22
0
File: raft.go Progetto: Mic92/docker
// configure sends a configuration change through consensus and
// then waits for it to be applied to the server. It will block
// until the change is performed or there is an error.
func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error {
	cc.ID = n.reqIDGen.Next()

	ctx, cancel := context.WithCancel(ctx)
	ch := n.wait.register(cc.ID, nil, cancel)

	if err := n.raftNode.ProposeConfChange(ctx, cc); err != nil {
		n.wait.cancel(cc.ID)
		return err
	}

	select {
	case x := <-ch:
		if err, ok := x.(error); ok {
			return err
		}
		if x != nil {
			log.G(ctx).Panic("raft: configuration change error, return type should always be error")
		}
		return nil
	case <-ctx.Done():
		n.wait.cancel(cc.ID)
		return ctx.Err()
	}
}
Esempio n. 23
0
func (s rpcServer) Wait(ctx context.Context, req *pb.WaitReq) (*pb.WaitResp, error) {
	glog.Infof("Got request to wait for %d nodes for %q", req.Count, req.Key)
	rc := make(chan ssResp)
	s <- ssInput{
		respChan: rc,
		key:      req.Key,
		count:    req.Count,
		arrive:   true,
	}
	got := <-rc
	resp := pb.WaitResp{Count: got.count}
	select {
	case <-got.done:
		resp.Start = true
		return &resp, nil
	case <-ctx.Done():
		glog.Infof("Connection ended for %q", req.Key)
		s <- ssInput{
			respChan: nil,
			key:      req.Key,
			count:    req.Count,
			arrive:   false,
		}
		return nil, ctx.Err()
	}
}
Esempio n. 24
0
// Wait blocks until i) the new transport is up or ii) ctx is done or iii)
func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
	for {
		cc.mu.Lock()
		switch {
		case cc.state == Shutdown:
			cc.mu.Unlock()
			return nil, ErrClientConnClosing
		case cc.state == Ready:
			cc.mu.Unlock()
			return cc.transport, nil
		case cc.state == TransientFailure:
			cc.mu.Unlock()
			// Break out so that the caller gets chance to pick another transport to
			// perform rpc instead of sticking to this transport.
			return nil, ErrTransientFailure
		default:
			ready := cc.ready
			if ready == nil {
				ready = make(chan struct{})
				cc.ready = ready
			}
			cc.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Esempio n. 25
0
// Bunch goes through a channel containing amqp deliveries and bunches them up in arrays of size
// bunchLen and then forwards onto a channel with a buffer chanBuf.
// Bunch never stops sending unless the context done channel is closed.
func Bunch(ctx context.Context, messages chan amqp.Delivery, bunchLen, chanBuff int) (bufferCh chan []amqp.Delivery) {
	bufferCh = make(chan []amqp.Delivery, chanBuff)
	// cap messages is 2 * qos
	go func() {
		var buffer []amqp.Delivery
		i := 0
	loop:
		for {
			select {
			case msg := <-messages:
				buffer = append(buffer, msg)
				i++
			case <-ctx.Done():
				break loop
			default:
			}
			if i == bunchLen {
				bufferCh <- buffer
				buffer = make([]amqp.Delivery, 0)
				i = 0
			}
		}
	}()
	return bufferCh
}
Esempio n. 26
0
func listener(ctx context.Context, frameSize int, addr string) error {
	ln, err := net.Listen("tcp", addr)
	if err != nil {
		fmt.Fprintf(os.Stderr, "net.Listen(%q): %s\n", addr, err.Error())
		return err
	}
	go func() {
		<-ctx.Done()
		ln.Close()
	}()

	c, err := ln.Accept()
	if err != nil {
		fmt.Fprintf(os.Stderr, "ln.Accept(): %s\n", err.Error())
		return err
	}
	fc := fnet.FromOrderedStream(c, frameSize)
	go func() {
		<-ctx.Done()
		fc.Close()
	}()

	if err = clockstation.Run(ctx, fc, time.Tick(50*time.Millisecond)); err != nil {
		return fmt.Errorf("clockstation.Run: %s\n", err.Error())
	}
	return nil
}
Esempio n. 27
0
func (sto *appengineStorage) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef, after string, limit int) error {
	defer close(dest)

	loan := ctxPool.Get()
	defer loan.Return()
	actx := loan

	prefix := sto.namespace + "|"
	keyBegin := datastore.NewKey(actx, memKind, prefix+after, 0, nil)
	keyEnd := datastore.NewKey(actx, memKind, sto.namespace+"~", 0, nil)

	q := datastore.NewQuery(memKind).Limit(int(limit)).Filter("__key__>", keyBegin).Filter("__key__<", keyEnd)
	it := q.Run(actx)
	var row memEnt
	for {
		key, err := it.Next(&row)
		if err == datastore.Done {
			break
		}
		if err != nil {
			return err
		}
		select {
		case dest <- blob.SizedRef{blob.ParseOrZero(key.StringID()[len(prefix):]), uint32(row.Size)}:
		case <-ctx.Done():
			return ctx.Err()
		}
	}
	return nil
}
Esempio n. 28
0
// events issues a call to the events API and returns a channel with all
// events. The stream of events can be shutdown by cancelling the context.
func (c *containerAdapter) events(ctx context.Context) <-chan events.Message {
	log.G(ctx).Debugf("waiting on events")
	buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter())
	eventsq := make(chan events.Message, len(buffer))

	for _, event := range buffer {
		eventsq <- event
	}

	go func() {
		defer c.backend.UnsubscribeFromEvents(l)

		for {
			select {
			case ev := <-l:
				jev, ok := ev.(events.Message)
				if !ok {
					log.G(ctx).Warnf("unexpected event message: %q", ev)
					continue
				}
				select {
				case eventsq <- jev:
				case <-ctx.Done():
					return
				}
			case <-ctx.Done():
				return
			}
		}
	}()

	return eventsq
}
Esempio n. 29
0
func (ts *targetSet) runScraping(ctx context.Context) {
	ctx, ts.cancelScraping = context.WithCancel(ctx)

	ts.scrapePool.init(ctx)

Loop:
	for {
		// Throttle syncing to once per five seconds.
		select {
		case <-ctx.Done():
			break Loop
		case <-time.After(5 * time.Second):
		}

		select {
		case <-ctx.Done():
			break Loop
		case <-ts.syncCh:
			ts.mtx.RLock()
			ts.sync()
			ts.mtx.RUnlock()
		}
	}

	// We want to wait for all pending target scrapes to complete though to ensure there'll
	// be no more storage writes after this point.
	ts.scrapePool.stop()
}
Esempio n. 30
0
func (s *session) heartbeat(ctx context.Context) error {
	log.G(ctx).Debugf("(*session).heartbeat")
	client := api.NewDispatcherClient(s.conn)
	heartbeat := time.NewTimer(1) // send out a heartbeat right away
	defer heartbeat.Stop()

	for {
		select {
		case <-heartbeat.C:
			heartbeatCtx, cancel := context.WithTimeout(ctx, dispatcherRPCTimeout)
			resp, err := client.Heartbeat(heartbeatCtx, &api.HeartbeatRequest{
				SessionID: s.sessionID,
			})
			cancel()
			if err != nil {
				if grpc.Code(err) == codes.NotFound {
					err = errNodeNotRegistered
				}

				return err
			}

			period, err := ptypes.Duration(&resp.Period)
			if err != nil {
				return err
			}

			heartbeat.Reset(period)
		case <-s.closed:
			return errSessionClosed
		case <-ctx.Done():
			return ctx.Err()
		}
	}
}