Beispiel #1
0
// NewRouterClientForGateway returns a new RouterClient for the given gateway ID and access token
func NewRouterClientForGateway(client RouterClient, gatewayID, token string) RouterClientForGateway {
	ctx, cancel := context.WithCancel(context.Background())
	return &routerClientForGateway{
		ctx:       log.Get().WithField("GatewayID", gatewayID),
		client:    client,
		gatewayID: gatewayID,
		token:     token,
		bgCtx:     ctx,
		cancel:    cancel,
	}
}
Beispiel #2
0
// NewClient creates a new DefaultClient
func NewClient(ctx log.Interface, id, username, password string, brokers ...string) Client {
	if ctx == nil {
		ctx = log.Get()
	}

	mqttOpts := MQTT.NewClientOptions()

	for _, broker := range brokers {
		mqttOpts.AddBroker(broker)
	}

	mqttOpts.SetClientID(fmt.Sprintf("%s-%s", id, random.String(16)))
	mqttOpts.SetUsername(username)
	mqttOpts.SetPassword(password)

	// TODO: Some tuning of these values probably won't hurt:
	mqttOpts.SetKeepAlive(30 * time.Second)
	mqttOpts.SetPingTimeout(10 * time.Second)

	mqttOpts.SetCleanSession(true)

	mqttOpts.SetDefaultPublishHandler(func(client MQTT.Client, msg MQTT.Message) {
		ctx.Warnf("Received unhandled message: %v", msg)
	})

	var reconnecting bool

	mqttOpts.SetConnectionLostHandler(func(client MQTT.Client, err error) {
		ctx.Warnf("Disconnected (%s). Reconnecting...", err.Error())
		reconnecting = true
	})

	ttnClient := &DefaultClient{
		ctx:           ctx,
		subscriptions: make(map[string]MQTT.MessageHandler),
	}

	mqttOpts.SetOnConnectHandler(func(client MQTT.Client) {
		ctx.Info("Connected to MQTT")
		if reconnecting {
			for topic, handler := range ttnClient.subscriptions {
				ctx.Infof("Re-subscribing to topic: %s", topic)
				ttnClient.subscribe(topic, handler)
			}
			reconnecting = false
		}
	})

	ttnClient.mqtt = MQTT.NewClient(mqttOpts)

	return ttnClient
}
Beispiel #3
0
func dial(address string, tlsConfig *tls.Config, fallback bool) (conn *grpc.ClientConn, err error) {
	ctx := log.Get().WithField("Address", address)
	opts := DialOptions
	if tlsConfig != nil {
		tlsConfig.ServerName = strings.SplitN(address, ":", 2)[0] // trim the port
		opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
	} else {
		opts = append(opts, grpc.WithInsecure())
	}
	conn, err = grpc.Dial(
		address,
		opts...,
	)
	if err == nil {
		return
	}

	switch err := err.(type) {
	case x509.CertificateInvalidError,
		x509.ConstraintViolationError,
		x509.HostnameError,
		x509.InsecureAlgorithmError,
		x509.SystemRootsError,
		x509.UnhandledCriticalExtension,
		x509.UnknownAuthorityError:
		// Non-temporary error while connecting to a TLS-enabled server
		return nil, err
	case tls.RecordHeaderError:
		if fallback {
			ctx.WithError(err).Warn("Could not connect to gRPC server with TLS, reconnecting without it...")
			return dial(address, nil, fallback)
		}
		return nil, err
	}

	log.Get().WithField("ErrType", fmt.Sprintf("%T", err)).WithError(err).Error("Unhandled dial error [please create issue on Github]")
	return nil, err
}
Beispiel #4
0
// NewClient creates a new DefaultClient
func NewClient(ctx log.Interface, username, password, host string) Client {
	if ctx == nil {
		ctx = log.Get()
	}
	credentials := "guest:guest"
	if username != "" {
		if password != "" {
			credentials = fmt.Sprintf("%s:%s", username, password)
		} else {
			credentials = username
		}
	}
	return &DefaultClient{
		ctx:      ctx,
		url:      fmt.Sprintf("amqp://%[email protected]%s", credentials, host),
		mutex:    &sync.Mutex{},
		channels: make(map[*DefaultChannelClient]*AMQP.Channel),
	}
}
Beispiel #5
0
// WithTTNDialer creates a dialer for TTN
func WithTTNDialer() grpc.DialOption {
	return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
		ctx := log.Get().WithField("Address", addr)
		d := net.Dialer{Timeout: timeout, KeepAlive: KeepAlive}
		var retries int
		for {
			conn, err := d.Dial("tcp", addr)
			if err == nil {
				ctx.Debug("Connected to gRPC server")
				return conn, nil
			}
			if err, ok := err.(*net.OpError); ok && err.Op == "dial" && retries <= MaxRetries {
				ctx.WithError(err).Debug("Could not connect to gRPC server, reconnecting...")
				time.Sleep(backoff.Backoff(retries))
				retries++
				continue
			}
			return nil, err
		}
	})
}
Beispiel #6
0
// NewMonitoredRouterStream starts and monitors a RouterStream
func NewMonitoredRouterStream(client BrokerClient, getContextFunc func() context.Context) RouterStream {
	s := &routerStream{
		up:   make(chan *UplinkMessage, DefaultBufferSize),
		down: make(chan *DownlinkMessage, DefaultBufferSize),
		err:  make(chan error),
	}
	s.setup.Add(1)
	s.client = client
	s.ctx = log.Get()

	go func() {
		var retries int

		for {
			// Session channels
			up := make(chan *UplinkMessage)
			errCh := make(chan error)

			// Session client
			var ctx context.Context
			ctx, s.cancel = context.WithCancel(getContextFunc())
			client, err := s.client.Associate(ctx)
			s.setup.Done()
			if err != nil {
				s.ctx.WithError(err).Warn("Could not start Associate stream, retrying...")
				s.setup.Add(1)
				time.Sleep(backoff.Backoff(retries))
				retries++
				continue
			}
			retries = 0

			s.ctx.Debug("Started Associate stream")

			// Receive downlink errors
			go func() {
				for {
					message, err := client.Recv()
					if message != nil {
						s.ctx.Debug("Receiving Downlink message")
						select {
						case s.down <- message:
						default:
							s.ctx.Warn("Dropping Downlink message, buffer full")
						}
					}
					if err != nil {
						errCh <- err
						break
					}
				}
				close(errCh)
			}()

			// Send uplink
			go func() {
				for message := range up {
					s.ctx.Debug("Sending Uplink message")
					if err := client.Send(message); err != nil {
						s.ctx.WithError(err).Warn("Error sending Uplink message")
						break
					}
				}
			}()

			// Monitoring
			var mErr error

		monitor:
			for {
				select {
				case mErr = <-errCh:
					break monitor
				case msg, ok := <-s.up:
					if !ok {
						break monitor // channel closed
					}
					up <- msg
				}
			}

			close(up)
			client.CloseSend()

			if mErr == nil || mErr == io.EOF || grpc.Code(mErr) == codes.Canceled {
				s.ctx.Debug("Stopped Associate stream")
			} else {
				s.ctx.WithError(mErr).Warn("Error in Associate stream")
			}

			if s.closing {
				break
			}

			s.setup.Add(1)
			time.Sleep(backoff.Backoff(retries))
			retries++
		}
	}()

	return s
}
Beispiel #7
0
// NewMonitoredHandlerSubscribeStream starts and monitors a HandlerSubscribeStream
func NewMonitoredHandlerSubscribeStream(client BrokerClient, getContextFunc func() context.Context) HandlerSubscribeStream {
	s := &handlerSubscribeStream{
		ch:  make(chan *DeduplicatedUplinkMessage, DefaultBufferSize),
		err: make(chan error),
	}
	s.setup.Add(1)
	s.client = client
	s.ctx = log.Get()

	go func() {
		var client Broker_SubscribeClient
		var err error
		var retries int
		var message *DeduplicatedUplinkMessage

		for {
			// Session client
			var ctx context.Context
			ctx, s.cancel = context.WithCancel(getContextFunc())
			client, err = s.client.Subscribe(ctx, &SubscribeRequest{})
			s.setup.Done()
			if err != nil {
				if grpc.Code(err) == codes.Canceled {
					s.ctx.Debug("Stopped Uplink stream")
					break
				}
				s.ctx.WithError(err).Warn("Could not start Uplink stream, retrying...")
				s.setup.Add(1)
				time.Sleep(backoff.Backoff(retries))
				retries++
				continue
			}
			retries = 0

			s.ctx.Info("Started Uplink stream")

			for {
				message, err = client.Recv()
				if message != nil {
					s.ctx.Debug("Receiving Uplink message")
					select {
					case s.ch <- message:
					default:
						s.ctx.Warn("Dropping Uplink message, buffer full")
					}
				}
				if err != nil {
					break
				}
			}

			if err == nil || err == io.EOF || grpc.Code(err) == codes.Canceled {
				s.ctx.Debug("Stopped Uplink stream")
			} else {
				s.ctx.WithError(err).Warn("Error in Uplink stream")
			}

			if s.closing {
				break
			}

			s.setup.Add(1)
			time.Sleep(backoff.Backoff(retries))
			retries++
		}

		close(s.ch)
	}()
	return s
}
Beispiel #8
0
// NewMonitoredHandlerPublishStream starts and monitors a HandlerPublishStream
func NewMonitoredHandlerPublishStream(client BrokerClient, getContextFunc func() context.Context) HandlerPublishStream {
	s := &handlerPublishStream{
		ch:  make(chan *DownlinkMessage, DefaultBufferSize),
		err: make(chan error),
	}
	s.setup.Add(1)
	s.client = client
	s.ctx = log.Get()

	go func() {
		var retries int

		for {
			// Session channels
			ch := make(chan *DownlinkMessage)
			errCh := make(chan error)

			// Session client
			client, err := s.client.Publish(getContextFunc())
			s.setup.Done()
			if err != nil {
				if grpc.Code(err) == codes.Canceled {
					s.ctx.Debug("Stopped Downlink stream")
					break
				}
				s.ctx.WithError(err).Warn("Could not start Downlink stream, retrying...")
				s.setup.Add(1)
				time.Sleep(backoff.Backoff(retries))
				retries++
				continue
			}
			retries = 0

			s.ctx.Info("Started Downlink stream")

			// Receive errors
			go func() {
				empty := new(empty.Empty)
				if err := client.RecvMsg(empty); err != nil {
					errCh <- err
				}
				close(errCh)
			}()

			// Send
			go func() {
				for message := range ch {
					s.ctx.Debug("Sending Downlink message")
					if err := client.Send(message); err != nil {
						s.ctx.WithError(err).Warn("Error sending Downlink message")
						break
					}
				}
			}()

			// Monitoring
			var mErr error

		monitor:
			for {
				select {
				case mErr = <-errCh:
					break monitor
				case msg, ok := <-s.ch:
					if !ok {
						break monitor // channel closed
					}
					ch <- msg
				}
			}

			close(ch)
			client.CloseAndRecv()

			if mErr == nil || mErr == io.EOF || grpc.Code(mErr) == codes.Canceled {
				s.ctx.Debug("Stopped Downlink stream")
			} else {
				s.ctx.WithError(mErr).Warn("Error in Downlink stream")
			}

			if s.closing {
				break
			}

			s.setup.Add(1)
			time.Sleep(backoff.Backoff(retries))
			retries++
		}
	}()

	return s
}
Beispiel #9
0
// NewRouterStreamServer returns a new RouterStreamServer
func NewRouterStreamServer() *RouterStreamServer {
	return &RouterStreamServer{
		ctx: log.Get(),
	}
}
Beispiel #10
0
				Timeout: 5 * time.Second,
			}
			logHandlers = append(logHandlers, levelHandler.New(esHandler.New(&esHandler.Config{
				Client:     esClient,
				Prefix:     cmd.Name(),
				BufferSize: 10,
			}), logLevel))
		}

		ctx = &log.Logger{
			Handler: multiHandler.New(logHandlers...),
		}

		// Set the API/gRPC logger
		ttnlog.Set(apex.Wrap(ctx))
		grpclog.SetLogger(grpc.Wrap(ttnlog.Get()))

		ctx.WithFields(log.Fields{
			"ComponentID":              viper.GetString("id"),
			"Description":              viper.GetString("description"),
			"Discovery Server Address": viper.GetString("discovery-address"),
			"Auth Servers":             viper.GetStringMapString("auth-servers"),
			"Monitors":                 viper.GetStringMapString("monitor-servers"),
		}).Info("Initializing The Things Network")
	},
	PersistentPostRun: func(cmd *cobra.Command, args []string) {
		if logFile != nil {
			logFile.Close()
		}
	},
}