func (self *TcpServer) ListenAndServe() error { if self.inherit { file := os.NewFile(uintptr(3), "sock") tmp, err := net.FileListener(file) file.Close() if err != nil { log.Error("Error: %s", err) return nil } listener := tmp.(*net.TCPListener) self.listener = &MyListener{Listener: listener} } else { addr, err := net.ResolveTCPAddr("tcp4", self.ListenAddress) listener, err := net.ListenTCP("tcp", addr) if err != nil { panic(fmt.Sprintf("Error: %s", err)) return err } self.listener = &MyListener{Listener: listener} } log.Info("momonga_tcp: started tcp server: %s", self.listener.Addr().String()) for i := 0; i < self.config.GetAcceptorCount(); i++ { go self.Serve(self.listener) } return nil }
func (self *Momonga) Subscribe(p *codec.SubscribeMessage, conn Connection) { log.Debug("Subscribe Message: [%s] %+v\n", conn.GetId(), p) ack := codec.NewSubackMessage() ack.PacketIdentifier = p.PacketIdentifier // TODO: 稀にconnがMonnectionの時がある cn := conn.(*MmuxConnection) var retained []*codec.PublishMessage // どのレベルでlockするか qosBuffer := bytes.NewBuffer(make([]byte, len(p.Payload))) for _, payload := range p.Payload { // don't subscribe multiple time if cn.IsSubscribed(payload.TopicPath) { log.Error("Map exists. [%s:%s]", conn.GetId(), payload.TopicPath) continue } set := &SubscribeSet{ TopicFilter: payload.TopicPath, ClientId: conn.GetId(), QoS: int(payload.RequestedQos), } binary.Write(qosBuffer, binary.BigEndian, payload.RequestedQos) // Retain self.TopicMatcher.Add(payload.TopicPath, set) conn.AppendSubscribedTopic(payload.TopicPath, set) retaines := self.RetainMatch(payload.TopicPath) if len(retaines) > 0 { for i := range retaines { log.Debug("Retains: %s", retaines[i].TopicName) id := conn.GetOutGoingTable().NewId() pp, _ := codec.CopyPublishMessage(retaines[i]) pp.PacketIdentifier = id conn.GetOutGoingTable().Register(id, pp, conn) retained = append(retained, pp) } } Metrics.System.Broker.SubscriptionsCount.Add(1) } ack.Qos = qosBuffer.Bytes() // MEMO: we can reply directly, no need to route, persite message. log.Debug("Send Suback Message To: %s", conn.GetId()) conn.WriteMessageQueue(ack) if len(retained) > 0 { log.Debug("Send retained Message To: %s", conn.GetId()) for i := range retained { conn.WriteMessageQueue(retained[i]) } } }
func (self *TlsServer) ListenAndServe() error { if self.inherit { file := os.NewFile(uintptr(3), "sock") tmp, err := net.FileListener(file) file.Close() if err != nil { log.Error("Error: %s", err) return nil } cert, err := tls.LoadX509KeyPair(self.config.Server.Certfile, self.config.Server.Keyfile) if err != nil { panic(fmt.Sprintf("LoadX509KeyPair error: %s", err)) } config := &tls.Config{Certificates: []tls.Certificate{cert}} self.tlsConfig = config // TODO: IS THIS CORRECT? listener := tmp.(net.Listener) self.listener = &MyListener{Listener: listener} } else { cert, err := tls.LoadX509KeyPair(self.config.Server.Certfile, self.config.Server.Keyfile) if err != nil { panic(fmt.Sprintf("LoadX509KeyPair error: %s", err)) } config := &tls.Config{Certificates: []tls.Certificate{cert}} listener, err := tls.Listen("tcp", self.ListenAddress, config) self.tlsConfig = config if err != nil { panic(fmt.Sprintf("Error: %s", err)) } self.listener = &MyListener{Listener: listener} } log.Info("momonga_tls: started tls server: %s", self.listener.Addr().String()) for i := 0; i < self.config.GetAcceptorCount(); i++ { go self.Serve(self.listener) } return nil }
func (self *HttpServer) ListenAndServe() error { if self.inherit { file := os.NewFile(uintptr(5), "sock") tmp, err := net.FileListener(file) file.Close() if err != nil { log.Error("HttpServer: %s", err) return nil } listener := tmp.(*net.TCPListener) self.listener = NewHttpListener(listener) } else { addr, err := net.ResolveTCPAddr("tcp4", self.Address) base, err := net.ListenTCP("tcp", addr) listener := NewHttpListener(base) if err != nil { return err } self.listener = listener } return self.Serve(self.listener) }
func (self *TlsServer) Serve(l net.Listener) error { defer func() { l.Close() }() var tempDelay time.Duration // how long to sleep on accept failure Accept: for { select { case <-self.stop: break Accept default: client, err := l.Accept() if err != nil { if v, ok := client.(*net.TCPConn); ok { v.SetNoDelay(true) v.SetKeepAlive(true) } if ne, ok := err.(net.Error); ok && ne.Temporary() { if tempDelay == 0 { tempDelay = 5 * time.Millisecond } else { tempDelay *= 2 } if max := 1 * time.Second; tempDelay > max { tempDelay = max } log.Info("momonga: Accept error: %v; retrying in %v", err, tempDelay) time.Sleep(tempDelay) continue } if strings.Contains(err.Error(), "use of closed network connection") { log.Error("Accept Failed: %s", err) continue } log.Error("Accept Error: %s", err) return err } tempDelay = 0 myconf := GetDefaultMyConfig() myconf.MaxMessageSize = self.Engine.Config().Server.MessageSizeLimit conn := NewMyConnection(myconf) conn.SetMyConnection(client) conn.SetId(client.RemoteAddr().String()) log.Debug("Accepted: %s", conn.GetId()) go self.Engine.HandleConnection(conn) } } self.listener.(*MyListener).wg.Wait() self.once.Do(func() { self.wg.Done() }) return nil }
func (self *Momonga) HandleConnection(conn Connection) { defer func() { if err := recover(); err != nil { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] log.Error("momonga: panic serving %s: %v\n%s", conn.GetId(), err, buf) } }() hndr := NewHandler(conn, self) for { // TODO: change api name, actually this processes message _, err := conn.ReadMessage() if err != nil { log.Debug("Read Message Error: %s", err) } if conn.GetState() == STATE_CLOSED { err = &DisconnectError{} } Metrics.System.Broker.Messages.Received.Add(1) if err != nil { Metrics.System.Broker.Clients.Connected.Add(-1) log.Debug("DISCONNECT: %s", conn.GetId()) // ここでmyconnがかえる場合はhandshake前に死んでる //self.Engine.CleanSubscription(conn) mux, e := self.GetConnectionByClientId(conn.GetId()) if e != nil { log.Error("(while processing disconnect) can't fetch connection: %s, %T", conn.GetId(), conn) } if _, ok := err.(*DisconnectError); !ok { if conn.HasWillMessage() { self.SendWillMessage(conn) } if err == io.EOF { // nothing to do } else { log.Error("Handle Connection Error: %s", err) } } if mux != nil { var dummy *DummyPlug if mux.ShouldCleanSession() { self.CleanSubscription(mux) self.RemoveConnectionByClientId(mux.GetId()) } else { dummy = NewDummyPlug(self) dummy.SetId(conn.GetId()) } mux.Detach(conn, dummy) } if v, ok := conn.(*MyConnection); ok { v.Terminate() } conn.Close() hndr.Close() return } } }
func (self *Momonga) Handshake(p *codec.ConnectMessage, conn *MyConnection) *MmuxConnection { log.Debug("handshaking: %s", p.Identifier) if conn.Connected == true { log.Error("wrong sequence. (connect twice)") conn.Close() return nil } if ok := self.checkVersion(p); ok != nil { log.Error("magic is not expected: %s %+v\n", string(p.Magic), p) conn.Close() return nil } // TODO: implement authenticator if ok, _ := self.Authenticate(p.UserName, p.Password); !ok { log.Error("authenticate failed:") conn.Close() return nil } // preserve messagen when will flag set if (p.Flag & 0x4) > 0 { conn.SetWillMessage(*p.Will) } if (p.Version & 0x80) > 0 { conn.Bridged = true } if !p.CleanSession { conn.DisableCleanSession() } var mux *MmuxConnection var err error reply := codec.NewConnackMessage() if mux, err = self.GetConnectionByClientId(p.Identifier); err == nil { // [MQTT-3.2.2-2] If the Server accepts a connection with CleanSession set to 0, // the value set in Session Present depends on whether the Server already has stored Session state // for the supplied client ID. If the Server has stored Session state, // it MUST set Session Present to 1 in the CONNACK packet. reply.Reserved |= 0x01 } // CONNACK MUST BE FIRST RESPONSE // clean周りはAttachでぜんぶやるべきでは conn.WriteMessageQueue(reply) if mux != nil { log.Info("Attach to mux[%s]", mux.GetId()) conn.SetId(p.Identifier) conn.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetState(STATE_CONNECTED) mux.DisableCleanSession() if conn.ShouldCleanSession() { self.CleanSubscription(mux) } mux.Attach(conn) } else { mux = NewMmuxConnection() mux.SetId(p.Identifier) mux.Attach(conn) self.SetConnectionByClientId(mux.GetId(), mux) conn.SetId(p.Identifier) conn.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetState(STATE_CONNECTED) log.Debug("Starting new mux[%s]", mux.GetId()) } if p.CleanSession { // これは正直どうでもいい delete(self.RetryMap, mux.GetId()) } else { // Okay, attach to existing session. tbl := mux.GetOutGoingTable() // これは途中の再送処理 for _, c := range tbl.Hash { msgs := make([]codec.Message, 0) //check qos1, 2 message and resend to this client. if v, ok := c.Message.(*codec.PublishMessage); ok { if v.QosLevel > 0 { //mux.WriteMessageQueue(c.Message) msgs = append(msgs, c.Message) } } tbl.Clean() // TODO: improve this for _, v := range msgs { mux.WriteMessageQueue(v) } } } conn.Connected = true log.Debug("handshake Successful: %s", p.Identifier) Metrics.System.Broker.Clients.Connected.Add(1) return mux }
func (self *Momonga) SendPublishMessage(msg *codec.PublishMessage, client_id string, is_bridged bool) { // Don't pass wrong message here. user should validate the message be ore using this API. if len(msg.TopicName) < 1 { return } // TODO: Have to persist retain message. if msg.Retain > 0 { if len(msg.Payload) == 0 { log.Debug("[DELETE RETAIN: %s]\n%s", msg.TopicName, hex.Dump([]byte(msg.TopicName))) err := self.DataStore.Del([]byte(msg.TopicName), []byte(msg.TopicName)) if err != nil { log.Error("Error: %s\n", err) } // これ配送したらおかしいべ log.Debug("Deleted retain: %s", msg.TopicName) // あれ、ackとかかえすんだっけ? Metrics.System.Broker.Messages.RetainedCount.Add(-1) return } else { buffer := bytes.NewBuffer(nil) codec.WriteMessageTo(msg, buffer) self.DataStore.Put([]byte(msg.TopicName), buffer.Bytes()) Metrics.System.Broker.Messages.RetainedCount.Add(1) } } if Mflags["experimental.qos1"] { if msg.QosLevel == 1 { targets := self.TopicMatcher.Match(msg.TopicName) go func(msg *codec.PublishMessage, set []interface{}) { p := make(chan string, 1000) wg := sync.WaitGroup{} wg.Add(3) // bulk sernder, retry kun, receive kun mchan := make(chan string, 256) term := make(chan bool, 1) cnt := len(set) mng := make(map[string]*codec.PublishMessage) // retry kun。こういう実装だととても楽 go func(term chan bool, mchan chan string, mng map[string]*codec.PublishMessage) { for { select { case m := <-mchan: if msg, ok := mng[m]; ok { conn, err := self.GetConnectionByClientId(m) if err != nil { fmt.Printf("something wrong: %s %s", m, err) continue } if err == nil { log.Debug("sending a retry msg: %s", msg) conn.WriteMessageQueue(msg) } else { log.Debug("connection not exist. next retry") } } case <-term: log.Debug(" retry finished.") wg.Done() return } } }(term, mchan, mng) // reader go func(p chan string, term chan bool, cnt int, mng map[string]*codec.PublishMessage, mchan chan string) { limit := time.After(time.Second * 60) for { select { case id := <-p: cnt-- delete(mng, id) // これはcallbackでやってもいいようなきもするけど、wait groupとかもろもろ渡すの面倒くさい if cnt < 1 { log.Debug(" all delivery finished.") term <- true wg.Done() return } case <-time.After(time.Second * 20): // 終わってないやつをなめてリトライさせる for cid, m := range mng { m.Dupe = true mchan <- cid } case <-limit: log.Debug(" gave up retry.") term <- true wg.Done() return } } }(p, term, cnt, mng, mchan) // sender. これは勝手に終わる go func(msg *codec.PublishMessage, set []interface{}, p chan string, mng map[string]*codec.PublishMessage) { dp := make(map[string]bool) for i := range targets { var tbl *util.MessageTable var ok bool myset := targets[i].(*SubscribeSet) fmt.Printf("myset: %s", myset) // NOTE (from interoperability/client_test.py): // // overlapping subscriptions. When there is more than one matching subscription for the same client for a topic, // the server may send back one message with the highest QoS of any matching subscription, or one message for // each subscription with a matching QoS. // // Currently, We choose one message for each subscription with a matching QoS. // if _, ok := dp[myset.ClientId]; ok { continue } dp[myset.ClientId] = true x, _ := codec.CopyPublishMessage(msg) x.QosLevel = myset.QoS conn, err := self.GetConnectionByClientId(myset.ClientId) // これは面倒臭い。clean sessionがtrueで再接続した時はもはや別人として扱わなければならない if x.QosLevel == 0 { // QoS 0にダウングレードしたらそのまま終わらせる conn.WriteMessageQueue(x) p <- myset.ClientId continue } if tbl, ok = self.InflightTable[myset.ClientId]; !ok { self.InflightTable[myset.ClientId] = util.NewMessageTable() // callback仕込めるんだよなー。QoS1なら使わなくてもいいかなー。とかおもったり tbl = self.InflightTable[myset.ClientId] } id := tbl.NewId() x.PacketIdentifier = id x.Opaque = p tbl.Register2(id, x, 1, x) if err != nil { continue } mng[myset.ClientId] = x conn.WriteMessageQueue(x) } log.Debug(" all fisrt delivery finished.") wg.Done() }(msg, targets, p, mng) wg.Wait() close(p) close(mchan) close(term) mng = nil log.Debug(" okay, cleanup qos1 sending thread.") }(msg, targets) return } } // Publishで受け取ったMessageIdのやつのCountをとっておく // で、Pubackが帰ってきたらrefcountを下げて0になったらMessageを消す //log.Debug("TopicName: %s %s", m.TopicName, m.Payload) targets := self.TopicMatcher.Match(msg.TopicName) if msg.TopicName[0:1] == "#" { // TODO: [MQTT-4.7.2-1] The Server MUST NOT match Topic Filters starting with a wildcard character // (# or +) with Topic Names beginning with a $ character } // list つくってからとって、だとタイミング的に居ない奴も出てくるんだよな。マジカオス // ここで必要なのは, connection(clientId), subscribed qosがわかればあとは投げるだけ // なんで、Qlobberがかえすのはであるべきなんだけど。これすっげー消しづらいのよね・・・ // { // Connection: Connection or client id // QoS: // } // いやまぁエラーハンドリングちゃんとやってれば問題ない。 // client idのほうがベターだな。Connectionを無駄に参照つけると後が辛い dp := make(map[string]bool) count := 0 for i := range targets { var cn Connection var ok error myset := targets[i].(*SubscribeSet) clientId := myset.ClientId //clientId := targets[i].(string) // NOTE (from interoperability/client_test.py): // // overlapping subscriptions. When there is more than one matching subscription for the same client for a topic, // the server may send back one message with the highest QoS of any matching subscription, or one message for // each subscription with a matching QoS. // // Currently, We choose one message for each subscription with a matching QoS. // if _, ok := dp[clientId]; ok { continue } dp[clientId] = true cn, ok = self.GetConnectionByClientId(clientId) if ok != nil { // どちらかというとClientが悪いと思うよ! // リスト拾った瞬間にはいたけど、その後いなくなったんだから配送リストからこれらは無視すべき log.Info("(can't fetch %s. already disconnected, or unsubscribed?)", clientId) continue } if cn.IsBridge() && clientId == client_id { // Don't send message to same bridge continue } var x *codec.PublishMessage if msg.QosLevel == 0 { // we don't need to copy message on QoS 0 x = msg } else { x = codec.MustCopyPublishMessage(msg) } subscriberQos := myset.QoS // Downgrade QoS if subscriberQos < x.QosLevel { x.QosLevel = subscriberQos } if x.QosLevel > 0 { // TODO: ClientごとにInflightTableを持つ // engineのOutGoingTableなのはとりあえず、ということ id := self.OutGoingTable.NewId() x.PacketIdentifier = id if sender, ok := x.Opaque.(Connection); ok { // TODO: ここ(でなにをするつもりだったのか・・w) self.OutGoingTable.Register2(x.PacketIdentifier, x, len(targets), sender) } } cn.WriteMessageQueue(x) count++ } Metrics.System.Broker.Messages.Sent.Add(int64(count)) }
func NewApplication(configPath string) *Application { conf, err := configuration.LoadConfiguration(configPath) if err != nil { log.Error("Can't read config.toml. use default setting.: %s", err) } log.SetupLogging(conf.Server.LogLevel, conf.Server.LogFile) pid := strconv.Itoa(os.Getpid()) if conf.Server.PidFile != "" { if err := ioutil.WriteFile(conf.Server.PidFile, []byte(pid), 0644); err != nil { panic(err) } util.WritePid(conf.Server.PidFile) } // NOTE: INHERIT=TRUE means the process invoked from os.StartProess inherit := false if os.Getenv("INHERIT") == "TRUE" { inherit = true } log.Info("Momonga started pid: %s (inherit:%t)", pid, inherit) engine := NewMomonga(conf) app := &Application{ Engine: engine, Servers: []Server{}, configPath: configPath, config: conf, } // TODO: improve this block if conf.Server.Port > 0 { t := NewTcpServer(engine, conf, inherit) t.wg = &app.wg app.RegisterServer(t) } if conf.Server.Socket != "" { u := NewUnixServer(engine, conf, inherit) u.wg = &app.wg app.RegisterServer(u) } if conf.Server.HttpPort > 0 { h := NewHttpServer(engine, conf, inherit) h.wg = &app.wg app.RegisterServer(h) } if conf.Server.EnableTls && conf.Server.TlsPort > 0 { h := NewTlsServer(engine, conf, inherit) h.wg = &app.wg app.RegisterServer(h) } // memonized application path app.execPath, err = exec.LookPath(os.Args[0]) if err != nil { log.Error("Error: %s", err) return app } app.workingDir, err = os.Getwd() if err != nil { log.Error("Error: %s", err) return app } if conf.Bridge.Address != "" { //TODO: Bridgeは複数指定できる //TODO: Bridgeは途中で制御できる // /api/bridge/list // /api/bridge/connection/stop // /api/bridge/connection/status // /api/bridge/connection/start // /api/bridge/connection/delete // /api/bridge/connection/new?address=&port=&type=both&topic[]= // /api/bridge/config go func() { flag := 0 switch conf.Bridge.Type { case "both": flag = 3 case "out": flag = 1 case "in": flag = 2 default: panic(fmt.Sprintf("%s does not support.", conf.Bridge.Type)) } // in addr := fmt.Sprintf("%s:%d", conf.Bridge.Address, conf.Bridge.Port) c := client.NewClient(client.Option{ TransporterCallback: func() (net.Conn, error) { conn, err := net.Dial("tcp", addr) return conn, err }, Identifier: fmt.Sprintf(conf.Bridge.ClientId), Magic: []byte("MQTT"), Version: 4 | 0x80, Keepalive: 0, }) c.Connect() c.WaitConnection() if flag == 1 || flag == 3 { c.Subscribe("#", 2) c.SetRequestPerSecondLimit(-1) c.On("publish", func(msg *codec.PublishMessage) { engine.SendPublishMessage(msg, conf.Bridge.Connection, true) }) } //out if flag == 2 || flag == 3 { addr2 := fmt.Sprintf("%s:%d", conf.Server.BindAddress, conf.Server.Port) c2 := client.NewClient(client.Option{ TransporterCallback: func() (net.Conn, error) { conn, err := net.Dial("tcp", addr2) return conn, err }, Identifier: fmt.Sprintf(conf.Bridge.ClientId), Magic: []byte("MQTT"), Version: 4 | 0x80, Keepalive: 0, }) c2.Connect() c2.WaitConnection() c2.Subscribe("#", 2) c2.SetRequestPerSecondLimit(-1) c2.On("publish", func(msg *codec.PublishMessage) { c.Publish(msg.TopicName, msg.Payload, msg.QosLevel) }) } select {} }() } return app }
func (self *Application) Start() { self.wg.Add(1) ch := make(chan os.Signal, 8) // TODO: windows can't use signal. split this block into another file. signals := []os.Signal{syscall.SIGINT, syscall.SIGHUP, syscall.SIGUSR2, syscall.SIGQUIT} signal.Notify(ch, signals...) go func(ch chan os.Signal) { for { select { case x := <-ch: switch x { case syscall.SIGINT: self.Stop() case syscall.SIGQUIT: // TODO: like sigdump feature. should change file descriptor pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) pprof.Lookup("heap").WriteTo(os.Stdout, 1) pprof.Lookup("block").WriteTo(os.Stdout, 1) case syscall.SIGHUP: // reload config log.Info("reload configuration from %s", self.configPath) configuration.LoadConfigurationTo(self.configPath, self.config) case syscall.SIGUSR2: // graceful restart self.mu.Lock() var env []string for _, v := range os.Environ() { env = append(env, v) } discriptors := append([]*os.File{ os.Stdin, os.Stdout, os.Stderr, }) for i := 0; i < len(self.Servers); i++ { svr := self.Servers[i] f, e := svr.Listener().File() if e != nil { log.Error("Error: %s", e) self.mu.Unlock() continue } fd, _ := syscall.Dup(int(f.Fd())) fx := os.NewFile(uintptr(fd), "sock") discriptors = append(discriptors, []*os.File{fx}...) } env = append(env, "INHERIT=TRUE") p, err := os.StartProcess(self.execPath, os.Args, &os.ProcAttr{ Dir: self.workingDir, Env: env, Files: discriptors, }) if err != nil { log.Error("Error: %s, stop gracefull restart.", err) p.Kill() self.mu.Unlock() continue } // maybe, new server is alive in 3 seconds time.Sleep(time.Second * 3) for i := 0; i < len(self.Servers); i++ { svr := self.Servers[i] svr.Graceful() } self.wg.Done() // Kill current connection in N seconds. self.Engine.Doom() self.mu.Unlock() return } } } }(ch) go self.Engine.Run() for i := 0; i < len(self.Servers); i++ { svr := self.Servers[i] self.wg.Add(1) go svr.ListenAndServe() } }