func (cl *gatewayClient) monitorUplink() { var retries int newStream: for { ctx, cancel := context.WithCancel(cl.Context()) cl.uplink.Lock() cl.uplink.cancel = cancel cl.uplink.Unlock() stream, err := cl.client.client.GatewayUplink(ctx) if err != nil { cl.Ctx.WithError(errors.FromGRPCError(err)).Warn("Failed to open new monitor uplink stream") retries++ time.Sleep(backoff.Backoff(retries)) continue } retries = 0 cl.Ctx.Debug("Opened new monitor uplink stream") // The actual stream go func() { for { select { case <-ctx.Done(): return case uplink, ok := <-cl.uplink.ch: if ok { stream.Send(uplink) cl.Ctx.Debug("Sent uplink to monitor") } } } }() msg := new(empty.Empty) for { if err := stream.RecvMsg(&msg); err != nil { cl.Ctx.WithError(errors.FromGRPCError(err)).Warn("Received error on monitor uplink stream, closing...") stream.CloseSend() cl.Ctx.Debug("Closed monitor uplink stream") cl.uplink.Lock() cl.uplink.cancel() cl.uplink.cancel = nil cl.uplink.Unlock() retries++ time.Sleep(backoff.Backoff(retries)) continue newStream } } } }
// WithTTNDialer creates a dialer for TTN func WithTTNDialer() grpc.DialOption { return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { ctx := log.Get().WithField("Address", addr) d := net.Dialer{Timeout: timeout, KeepAlive: KeepAlive} var retries int for { conn, err := d.Dial("tcp", addr) if err == nil { ctx.Debug("Connected to gRPC server") return conn, nil } if err, ok := err.(*net.OpError); ok && err.Op == "dial" && retries <= MaxRetries { ctx.WithError(err).Debug("Could not connect to gRPC server, reconnecting...") time.Sleep(backoff.Backoff(retries)) retries++ continue } return nil, err } }) }
// NewMonitoredRouterStream starts and monitors a RouterStream func NewMonitoredRouterStream(client BrokerClient, getContextFunc func() context.Context) RouterStream { s := &routerStream{ up: make(chan *UplinkMessage, DefaultBufferSize), down: make(chan *DownlinkMessage, DefaultBufferSize), err: make(chan error), } s.setup.Add(1) s.client = client s.ctx = log.Get() go func() { var retries int for { // Session channels up := make(chan *UplinkMessage) errCh := make(chan error) // Session client var ctx context.Context ctx, s.cancel = context.WithCancel(getContextFunc()) client, err := s.client.Associate(ctx) s.setup.Done() if err != nil { s.ctx.WithError(err).Warn("Could not start Associate stream, retrying...") s.setup.Add(1) time.Sleep(backoff.Backoff(retries)) retries++ continue } retries = 0 s.ctx.Debug("Started Associate stream") // Receive downlink errors go func() { for { message, err := client.Recv() if message != nil { s.ctx.Debug("Receiving Downlink message") select { case s.down <- message: default: s.ctx.Warn("Dropping Downlink message, buffer full") } } if err != nil { errCh <- err break } } close(errCh) }() // Send uplink go func() { for message := range up { s.ctx.Debug("Sending Uplink message") if err := client.Send(message); err != nil { s.ctx.WithError(err).Warn("Error sending Uplink message") break } } }() // Monitoring var mErr error monitor: for { select { case mErr = <-errCh: break monitor case msg, ok := <-s.up: if !ok { break monitor // channel closed } up <- msg } } close(up) client.CloseSend() if mErr == nil || mErr == io.EOF || grpc.Code(mErr) == codes.Canceled { s.ctx.Debug("Stopped Associate stream") } else { s.ctx.WithError(mErr).Warn("Error in Associate stream") } if s.closing { break } s.setup.Add(1) time.Sleep(backoff.Backoff(retries)) retries++ } }() return s }
// NewMonitoredHandlerSubscribeStream starts and monitors a HandlerSubscribeStream func NewMonitoredHandlerSubscribeStream(client BrokerClient, getContextFunc func() context.Context) HandlerSubscribeStream { s := &handlerSubscribeStream{ ch: make(chan *DeduplicatedUplinkMessage, DefaultBufferSize), err: make(chan error), } s.setup.Add(1) s.client = client s.ctx = log.Get() go func() { var client Broker_SubscribeClient var err error var retries int var message *DeduplicatedUplinkMessage for { // Session client var ctx context.Context ctx, s.cancel = context.WithCancel(getContextFunc()) client, err = s.client.Subscribe(ctx, &SubscribeRequest{}) s.setup.Done() if err != nil { if grpc.Code(err) == codes.Canceled { s.ctx.Debug("Stopped Uplink stream") break } s.ctx.WithError(err).Warn("Could not start Uplink stream, retrying...") s.setup.Add(1) time.Sleep(backoff.Backoff(retries)) retries++ continue } retries = 0 s.ctx.Info("Started Uplink stream") for { message, err = client.Recv() if message != nil { s.ctx.Debug("Receiving Uplink message") select { case s.ch <- message: default: s.ctx.Warn("Dropping Uplink message, buffer full") } } if err != nil { break } } if err == nil || err == io.EOF || grpc.Code(err) == codes.Canceled { s.ctx.Debug("Stopped Uplink stream") } else { s.ctx.WithError(err).Warn("Error in Uplink stream") } if s.closing { break } s.setup.Add(1) time.Sleep(backoff.Backoff(retries)) retries++ } close(s.ch) }() return s }
// NewMonitoredHandlerPublishStream starts and monitors a HandlerPublishStream func NewMonitoredHandlerPublishStream(client BrokerClient, getContextFunc func() context.Context) HandlerPublishStream { s := &handlerPublishStream{ ch: make(chan *DownlinkMessage, DefaultBufferSize), err: make(chan error), } s.setup.Add(1) s.client = client s.ctx = log.Get() go func() { var retries int for { // Session channels ch := make(chan *DownlinkMessage) errCh := make(chan error) // Session client client, err := s.client.Publish(getContextFunc()) s.setup.Done() if err != nil { if grpc.Code(err) == codes.Canceled { s.ctx.Debug("Stopped Downlink stream") break } s.ctx.WithError(err).Warn("Could not start Downlink stream, retrying...") s.setup.Add(1) time.Sleep(backoff.Backoff(retries)) retries++ continue } retries = 0 s.ctx.Info("Started Downlink stream") // Receive errors go func() { empty := new(empty.Empty) if err := client.RecvMsg(empty); err != nil { errCh <- err } close(errCh) }() // Send go func() { for message := range ch { s.ctx.Debug("Sending Downlink message") if err := client.Send(message); err != nil { s.ctx.WithError(err).Warn("Error sending Downlink message") break } } }() // Monitoring var mErr error monitor: for { select { case mErr = <-errCh: break monitor case msg, ok := <-s.ch: if !ok { break monitor // channel closed } ch <- msg } } close(ch) client.CloseAndRecv() if mErr == nil || mErr == io.EOF || grpc.Code(mErr) == codes.Canceled { s.ctx.Debug("Stopped Downlink stream") } else { s.ctx.WithError(mErr).Warn("Error in Downlink stream") } if s.closing { break } s.setup.Add(1) time.Sleep(backoff.Backoff(retries)) retries++ } }() return s }
// NewMonitoredGatewayStatusStream starts and monitors a GatewayStatusStream func NewMonitoredGatewayStatusStream(client RouterClientForGateway) GatewayStatusStream { s := &gatewayStatusStream{ ch: make(chan *gateway.Status, DefaultBufferSize), err: make(chan error), } s.setup.Add(1) s.client = client s.ctx = client.GetLogger() go func() { var retries int for { // Session channels ch := make(chan *gateway.Status) errCh := make(chan error) // Session client client, err := s.client.GatewayStatus() s.setup.Done() if err != nil { if grpc.Code(err) == codes.Canceled { s.ctx.Debug("Stopped GatewayStatus stream") break } s.ctx.WithError(err).Warn("Could not start GatewayStatus stream, retrying...") s.setup.Add(1) time.Sleep(backoff.Backoff(retries)) retries++ continue } s.ctx.Info("Started GatewayStatus stream") // Receive errors go func() { empty := new(empty.Empty) if err := client.RecvMsg(empty); err != nil { errCh <- err } close(errCh) }() // Send go func() { for status := range ch { s.ctx.Debug("Sending GatewayStatus message") if err := client.Send(status); err != nil { s.ctx.WithError(err).Warn("Error sending GatewayStatus message") break } } }() // Monitoring var mErr error monitor: for { select { case <-time.After(10 * time.Second): retries = 0 case mErr = <-errCh: break monitor case msg, ok := <-s.ch: if !ok { break monitor // channel closed } ch <- msg } } close(ch) client.CloseAndRecv() if mErr == nil || mErr == io.EOF || grpc.Code(mErr) == codes.Canceled { s.ctx.Debug("Stopped GatewayStatus stream") } else { s.ctx.WithError(mErr).Warn("Error in GatewayStatus stream") } if s.closing { break } s.setup.Add(1) time.Sleep(backoff.Backoff(retries)) retries++ } }() return s }
// NewMonitoredDownlinkStream starts and monitors a DownlinkStream func NewMonitoredDownlinkStream(client RouterClientForGateway) DownlinkStream { s := &downlinkStream{ ch: make(chan *DownlinkMessage, DefaultBufferSize), err: make(chan error), } s.setup.Add(1) s.client = client s.ctx = client.GetLogger() go func() { var client Router_SubscribeClient var err error var retries int var message *DownlinkMessage for { client, s.cancel, err = s.client.Subscribe() s.setup.Done() if err != nil { if grpc.Code(err) == codes.Canceled { s.ctx.Debug("Stopped Downlink stream") break } s.ctx.WithError(err).Warn("Could not start Downlink stream, retrying...") s.setup.Add(1) time.Sleep(backoff.Backoff(retries)) retries++ continue } s.ctx.Info("Started Downlink stream") for { message, err = client.Recv() if message != nil { s.ctx.Debug("Receiving Downlink message") select { case s.ch <- message: default: s.ctx.Warn("Dropping Downlink message, buffer full") } } if err != nil { break } retries = 0 } if err == nil || err == io.EOF || grpc.Code(err) == codes.Canceled { s.ctx.Debug("Stopped Downlink stream") } else { s.ctx.WithError(err).Warn("Error in Downlink stream") } if s.closing { break } s.setup.Add(1) time.Sleep(backoff.Backoff(retries)) retries++ } close(s.ch) }() return s }