func (self *Handler) Publish(p *codec.PublishMessage) { //log.Info("Received Publish Message: %s: %+v", p.PacketIdentifier, p) conn := self.Connection // TODO: check permission. // TODO: この部分はengine側にあるべき機能なので治す(というか下のconnectionがやる所?) if p.QosLevel == 1 { ack := codec.NewPubackMessage() ack.PacketIdentifier = p.PacketIdentifier conn.WriteMessageQueue(ack) log.Debug("Send puback message to sender. [%s: %d]", conn.GetId(), ack.PacketIdentifier) } else if p.QosLevel == 2 { ack := codec.NewPubrecMessage() ack.PacketIdentifier = p.PacketIdentifier conn.WriteMessageQueue(ack) log.Debug("Send pubrec message to sender. [%s: %d]", conn.GetId(), ack.PacketIdentifier) } // TODO: QoSによっては適切なMessageIDを追加する // Server / ClientはそれぞれMessageTableが違う if p.QosLevel > 0 { // TODO: と、いうことはメッセージの deep コピーが簡単にできるようにしないとだめ // 色々考えると面倒だけど、ひとまずはフルコピーでやっとこう // id := conn.GetOutGoingTable().NewId() // p.PacketIdentifier = id conn.GetOutGoingTable().Register(p.PacketIdentifier, p, conn) p.Opaque = conn } // NOTE: We don't block here. currently use goroutine but should pass message to background worker. go self.Engine.SendPublishMessage(p, conn.GetId(), conn.IsBridge()) }
func (self *Momonga) Subscribe(p *codec.SubscribeMessage, conn Connection) { log.Debug("Subscribe Message: [%s] %+v\n", conn.GetId(), p) ack := codec.NewSubackMessage() ack.PacketIdentifier = p.PacketIdentifier // TODO: 稀にconnがMonnectionの時がある cn := conn.(*MmuxConnection) var retained []*codec.PublishMessage // どのレベルでlockするか qosBuffer := bytes.NewBuffer(make([]byte, len(p.Payload))) for _, payload := range p.Payload { // don't subscribe multiple time if cn.IsSubscribed(payload.TopicPath) { log.Error("Map exists. [%s:%s]", conn.GetId(), payload.TopicPath) continue } set := &SubscribeSet{ TopicFilter: payload.TopicPath, ClientId: conn.GetId(), QoS: int(payload.RequestedQos), } binary.Write(qosBuffer, binary.BigEndian, payload.RequestedQos) // Retain self.TopicMatcher.Add(payload.TopicPath, set) conn.AppendSubscribedTopic(payload.TopicPath, set) retaines := self.RetainMatch(payload.TopicPath) if len(retaines) > 0 { for i := range retaines { log.Debug("Retains: %s", retaines[i].TopicName) id := conn.GetOutGoingTable().NewId() pp, _ := codec.CopyPublishMessage(retaines[i]) pp.PacketIdentifier = id conn.GetOutGoingTable().Register(id, pp, conn) retained = append(retained, pp) } } Metrics.System.Broker.SubscriptionsCount.Add(1) } ack.Qos = qosBuffer.Bytes() // MEMO: we can reply directly, no need to route, persite message. log.Debug("Send Suback Message To: %s", conn.GetId()) conn.WriteMessageQueue(ack) if len(retained) > 0 { log.Debug("Send retained Message To: %s", conn.GetId()) for i := range retained { conn.WriteMessageQueue(retained[i]) } } }
func (self *Momonga) setupCallback() { self.OutGoingTable.SetOnFinish(func(id uint16, message codec.Message, opaque interface{}) { switch message.GetType() { case codec.PACKET_TYPE_PUBLISH: p := message.(*codec.PublishMessage) if p.QosLevel == 2 { ack := codec.NewPubcompMessage() ack.PacketIdentifier = p.PacketIdentifier // TODO: WHAAAT? I don't remember this // if conn != nil { // conn.WriteMessageQueue(ack) // } } break default: log.Debug("1Not supported; %d", message.GetType()) } }) // For now if self.EnableSys { msg := codec.NewPublishMessage() msg.TopicName = "$SYS/broker/broker/version" msg.Payload = []byte("0.1.0") msg.Retain = 1 self.SendPublishMessage(msg, "", false) } }
func (self *Handler) Pubcomp(messageId uint16) { //pubcompを受け取る、ということはserverがsender log.Debug("Received Pubcomp Message from %s", self.Connection.GetId()) self.Engine.OutGoingTable.Unref(messageId) self.Connection.GetOutGoingTable().Unref(messageId) }
func (self *MmuxConnection) Attach(conn Connection) { self.Mutex.Lock() defer self.Mutex.Unlock() var container Connection container = conn old := atomic.SwapPointer((*unsafe.Pointer)((unsafe.Pointer)(&self.Connection)), unsafe.Pointer(&container)) if old != nil { // 1.If the ClientId represents a Client already connected to the Server // then the Server MUST disconnect the existing Client [MQTT-3.1.4-2]. (*(*Connection)(old)).Close() log.Debug("close existing connection") } self.CleanSession = conn.ShouldCleanSession() if conn.ShouldCleanSession() { self.OfflineQueue = self.OfflineQueue[:0] self.SubscribeMap = make(map[string]bool) self.SubscribedTopics = make(map[string]*SubscribeSet) // Should I remove remaining QoS1, QoS2 message at this time? self.OutGoingTable.Clean() } else { if len(self.OfflineQueue) > 0 { log.Info("Process Offline Queue: Playback: %d", len(self.OfflineQueue)) for i := 0; i < len(self.OfflineQueue); i++ { self.writeMessageQueue(self.OfflineQueue[i]) } self.OfflineQueue = self.OfflineQueue[:0] } } }
func (self *Handler) Disconnect() { log.Debug("Received disconnect from %s", self.Connection.GetId()) if cn, ok := self.Connection.(*MyConnection); ok { cn.Disconnect() } Metrics.System.Broker.Clients.Connected.Add(-1) //return &DisconnectError{} }
func (self *Momonga) Unsubscribe(messageId uint16, granted int, payloads []codec.SubscribePayload, conn Connection) { log.Debug("Unsubscribe :") ack := codec.NewUnsubackMessage() ack.PacketIdentifier = messageId topics := conn.GetSubscribedTopics() for _, payload := range payloads { if v, ok := topics[payload.TopicPath]; ok { self.TopicMatcher.Remove(payload.TopicPath, v) } } conn.WriteMessageQueue(ack) }
func (self *Handler) HandshakeInternal(p *codec.ConnectMessage) { var conn *MyConnection var ok bool if conn, ok = self.Connection.(*MyConnection); !ok { log.Debug("wrong sequence.") self.Connection.Close() return } mux := self.Engine.Handshake(p, conn) if mux != nil { self.Connection = mux } }
func (self *Handler) Puback(messageId uint16) { log.Debug("Received Puback Message from [%s: %d]", self.Connection.GetId(), messageId) if tbl, ok := self.Engine.InflightTable[self.Connection.GetId()]; ok { p, _ := tbl.Get(messageId) if msg, ok := p.(*codec.PublishMessage); ok { // TODO: やっぱclose済みのチャンネルにおくっちゃうよねー msg.Opaque.(chan string) <- self.Connection.GetId() } if t, ok := self.Engine.InflightTable[self.Connection.GetId()]; ok { t.Unref(messageId) } } // TODO: これのIDは内部的なの? self.Engine.OutGoingTable.Unref(messageId) self.Connection.GetOutGoingTable().Unref(messageId) }
func (self *TlsServer) Serve(l net.Listener) error { defer func() { l.Close() }() var tempDelay time.Duration // how long to sleep on accept failure Accept: for { select { case <-self.stop: break Accept default: client, err := l.Accept() if err != nil { if v, ok := client.(*net.TCPConn); ok { v.SetNoDelay(true) v.SetKeepAlive(true) } if ne, ok := err.(net.Error); ok && ne.Temporary() { if tempDelay == 0 { tempDelay = 5 * time.Millisecond } else { tempDelay *= 2 } if max := 1 * time.Second; tempDelay > max { tempDelay = max } log.Info("momonga: Accept error: %v; retrying in %v", err, tempDelay) time.Sleep(tempDelay) continue } if strings.Contains(err.Error(), "use of closed network connection") { log.Error("Accept Failed: %s", err) continue } log.Error("Accept Error: %s", err) return err } tempDelay = 0 myconf := GetDefaultMyConfig() myconf.MaxMessageSize = self.Engine.Config().Server.MessageSizeLimit conn := NewMyConnection(myconf) conn.SetMyConnection(client) conn.SetId(client.RemoteAddr().String()) log.Debug("Accepted: %s", conn.GetId()) go self.Engine.HandleConnection(conn) } } self.listener.(*MyListener).wg.Wait() self.once.Do(func() { self.wg.Done() }) return nil }
func (self *Momonga) HandleConnection(conn Connection) { defer func() { if err := recover(); err != nil { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] log.Error("momonga: panic serving %s: %v\n%s", conn.GetId(), err, buf) } }() hndr := NewHandler(conn, self) for { // TODO: change api name, actually this processes message _, err := conn.ReadMessage() if err != nil { log.Debug("Read Message Error: %s", err) } if conn.GetState() == STATE_CLOSED { err = &DisconnectError{} } Metrics.System.Broker.Messages.Received.Add(1) if err != nil { Metrics.System.Broker.Clients.Connected.Add(-1) log.Debug("DISCONNECT: %s", conn.GetId()) // ここでmyconnがかえる場合はhandshake前に死んでる //self.Engine.CleanSubscription(conn) mux, e := self.GetConnectionByClientId(conn.GetId()) if e != nil { log.Error("(while processing disconnect) can't fetch connection: %s, %T", conn.GetId(), conn) } if _, ok := err.(*DisconnectError); !ok { if conn.HasWillMessage() { self.SendWillMessage(conn) } if err == io.EOF { // nothing to do } else { log.Error("Handle Connection Error: %s", err) } } if mux != nil { var dummy *DummyPlug if mux.ShouldCleanSession() { self.CleanSubscription(mux) self.RemoveConnectionByClientId(mux.GetId()) } else { dummy = NewDummyPlug(self) dummy.SetId(conn.GetId()) } mux.Detach(conn, dummy) } if v, ok := conn.(*MyConnection); ok { v.Terminate() } conn.Close() hndr.Close() return } } }
func (self *Momonga) Handshake(p *codec.ConnectMessage, conn *MyConnection) *MmuxConnection { log.Debug("handshaking: %s", p.Identifier) if conn.Connected == true { log.Error("wrong sequence. (connect twice)") conn.Close() return nil } if ok := self.checkVersion(p); ok != nil { log.Error("magic is not expected: %s %+v\n", string(p.Magic), p) conn.Close() return nil } // TODO: implement authenticator if ok, _ := self.Authenticate(p.UserName, p.Password); !ok { log.Error("authenticate failed:") conn.Close() return nil } // preserve messagen when will flag set if (p.Flag & 0x4) > 0 { conn.SetWillMessage(*p.Will) } if (p.Version & 0x80) > 0 { conn.Bridged = true } if !p.CleanSession { conn.DisableCleanSession() } var mux *MmuxConnection var err error reply := codec.NewConnackMessage() if mux, err = self.GetConnectionByClientId(p.Identifier); err == nil { // [MQTT-3.2.2-2] If the Server accepts a connection with CleanSession set to 0, // the value set in Session Present depends on whether the Server already has stored Session state // for the supplied client ID. If the Server has stored Session state, // it MUST set Session Present to 1 in the CONNACK packet. reply.Reserved |= 0x01 } // CONNACK MUST BE FIRST RESPONSE // clean周りはAttachでぜんぶやるべきでは conn.WriteMessageQueue(reply) if mux != nil { log.Info("Attach to mux[%s]", mux.GetId()) conn.SetId(p.Identifier) conn.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetState(STATE_CONNECTED) mux.DisableCleanSession() if conn.ShouldCleanSession() { self.CleanSubscription(mux) } mux.Attach(conn) } else { mux = NewMmuxConnection() mux.SetId(p.Identifier) mux.Attach(conn) self.SetConnectionByClientId(mux.GetId(), mux) conn.SetId(p.Identifier) conn.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetState(STATE_CONNECTED) log.Debug("Starting new mux[%s]", mux.GetId()) } if p.CleanSession { // これは正直どうでもいい delete(self.RetryMap, mux.GetId()) } else { // Okay, attach to existing session. tbl := mux.GetOutGoingTable() // これは途中の再送処理 for _, c := range tbl.Hash { msgs := make([]codec.Message, 0) //check qos1, 2 message and resend to this client. if v, ok := c.Message.(*codec.PublishMessage); ok { if v.QosLevel > 0 { //mux.WriteMessageQueue(c.Message) msgs = append(msgs, c.Message) } } tbl.Clean() // TODO: improve this for _, v := range msgs { mux.WriteMessageQueue(v) } } } conn.Connected = true log.Debug("handshake Successful: %s", p.Identifier) Metrics.System.Broker.Clients.Connected.Add(1) return mux }
func (self *Momonga) SendPublishMessage(msg *codec.PublishMessage, client_id string, is_bridged bool) { // Don't pass wrong message here. user should validate the message be ore using this API. if len(msg.TopicName) < 1 { return } // TODO: Have to persist retain message. if msg.Retain > 0 { if len(msg.Payload) == 0 { log.Debug("[DELETE RETAIN: %s]\n%s", msg.TopicName, hex.Dump([]byte(msg.TopicName))) err := self.DataStore.Del([]byte(msg.TopicName), []byte(msg.TopicName)) if err != nil { log.Error("Error: %s\n", err) } // これ配送したらおかしいべ log.Debug("Deleted retain: %s", msg.TopicName) // あれ、ackとかかえすんだっけ? Metrics.System.Broker.Messages.RetainedCount.Add(-1) return } else { buffer := bytes.NewBuffer(nil) codec.WriteMessageTo(msg, buffer) self.DataStore.Put([]byte(msg.TopicName), buffer.Bytes()) Metrics.System.Broker.Messages.RetainedCount.Add(1) } } if Mflags["experimental.qos1"] { if msg.QosLevel == 1 { targets := self.TopicMatcher.Match(msg.TopicName) go func(msg *codec.PublishMessage, set []interface{}) { p := make(chan string, 1000) wg := sync.WaitGroup{} wg.Add(3) // bulk sernder, retry kun, receive kun mchan := make(chan string, 256) term := make(chan bool, 1) cnt := len(set) mng := make(map[string]*codec.PublishMessage) // retry kun。こういう実装だととても楽 go func(term chan bool, mchan chan string, mng map[string]*codec.PublishMessage) { for { select { case m := <-mchan: if msg, ok := mng[m]; ok { conn, err := self.GetConnectionByClientId(m) if err != nil { fmt.Printf("something wrong: %s %s", m, err) continue } if err == nil { log.Debug("sending a retry msg: %s", msg) conn.WriteMessageQueue(msg) } else { log.Debug("connection not exist. next retry") } } case <-term: log.Debug(" retry finished.") wg.Done() return } } }(term, mchan, mng) // reader go func(p chan string, term chan bool, cnt int, mng map[string]*codec.PublishMessage, mchan chan string) { limit := time.After(time.Second * 60) for { select { case id := <-p: cnt-- delete(mng, id) // これはcallbackでやってもいいようなきもするけど、wait groupとかもろもろ渡すの面倒くさい if cnt < 1 { log.Debug(" all delivery finished.") term <- true wg.Done() return } case <-time.After(time.Second * 20): // 終わってないやつをなめてリトライさせる for cid, m := range mng { m.Dupe = true mchan <- cid } case <-limit: log.Debug(" gave up retry.") term <- true wg.Done() return } } }(p, term, cnt, mng, mchan) // sender. これは勝手に終わる go func(msg *codec.PublishMessage, set []interface{}, p chan string, mng map[string]*codec.PublishMessage) { dp := make(map[string]bool) for i := range targets { var tbl *util.MessageTable var ok bool myset := targets[i].(*SubscribeSet) fmt.Printf("myset: %s", myset) // NOTE (from interoperability/client_test.py): // // overlapping subscriptions. When there is more than one matching subscription for the same client for a topic, // the server may send back one message with the highest QoS of any matching subscription, or one message for // each subscription with a matching QoS. // // Currently, We choose one message for each subscription with a matching QoS. // if _, ok := dp[myset.ClientId]; ok { continue } dp[myset.ClientId] = true x, _ := codec.CopyPublishMessage(msg) x.QosLevel = myset.QoS conn, err := self.GetConnectionByClientId(myset.ClientId) // これは面倒臭い。clean sessionがtrueで再接続した時はもはや別人として扱わなければならない if x.QosLevel == 0 { // QoS 0にダウングレードしたらそのまま終わらせる conn.WriteMessageQueue(x) p <- myset.ClientId continue } if tbl, ok = self.InflightTable[myset.ClientId]; !ok { self.InflightTable[myset.ClientId] = util.NewMessageTable() // callback仕込めるんだよなー。QoS1なら使わなくてもいいかなー。とかおもったり tbl = self.InflightTable[myset.ClientId] } id := tbl.NewId() x.PacketIdentifier = id x.Opaque = p tbl.Register2(id, x, 1, x) if err != nil { continue } mng[myset.ClientId] = x conn.WriteMessageQueue(x) } log.Debug(" all fisrt delivery finished.") wg.Done() }(msg, targets, p, mng) wg.Wait() close(p) close(mchan) close(term) mng = nil log.Debug(" okay, cleanup qos1 sending thread.") }(msg, targets) return } } // Publishで受け取ったMessageIdのやつのCountをとっておく // で、Pubackが帰ってきたらrefcountを下げて0になったらMessageを消す //log.Debug("TopicName: %s %s", m.TopicName, m.Payload) targets := self.TopicMatcher.Match(msg.TopicName) if msg.TopicName[0:1] == "#" { // TODO: [MQTT-4.7.2-1] The Server MUST NOT match Topic Filters starting with a wildcard character // (# or +) with Topic Names beginning with a $ character } // list つくってからとって、だとタイミング的に居ない奴も出てくるんだよな。マジカオス // ここで必要なのは, connection(clientId), subscribed qosがわかればあとは投げるだけ // なんで、Qlobberがかえすのはであるべきなんだけど。これすっげー消しづらいのよね・・・ // { // Connection: Connection or client id // QoS: // } // いやまぁエラーハンドリングちゃんとやってれば問題ない。 // client idのほうがベターだな。Connectionを無駄に参照つけると後が辛い dp := make(map[string]bool) count := 0 for i := range targets { var cn Connection var ok error myset := targets[i].(*SubscribeSet) clientId := myset.ClientId //clientId := targets[i].(string) // NOTE (from interoperability/client_test.py): // // overlapping subscriptions. When there is more than one matching subscription for the same client for a topic, // the server may send back one message with the highest QoS of any matching subscription, or one message for // each subscription with a matching QoS. // // Currently, We choose one message for each subscription with a matching QoS. // if _, ok := dp[clientId]; ok { continue } dp[clientId] = true cn, ok = self.GetConnectionByClientId(clientId) if ok != nil { // どちらかというとClientが悪いと思うよ! // リスト拾った瞬間にはいたけど、その後いなくなったんだから配送リストからこれらは無視すべき log.Info("(can't fetch %s. already disconnected, or unsubscribed?)", clientId) continue } if cn.IsBridge() && clientId == client_id { // Don't send message to same bridge continue } var x *codec.PublishMessage if msg.QosLevel == 0 { // we don't need to copy message on QoS 0 x = msg } else { x = codec.MustCopyPublishMessage(msg) } subscriberQos := myset.QoS // Downgrade QoS if subscriberQos < x.QosLevel { x.QosLevel = subscriberQos } if x.QosLevel > 0 { // TODO: ClientごとにInflightTableを持つ // engineのOutGoingTableなのはとりあえず、ということ id := self.OutGoingTable.NewId() x.PacketIdentifier = id if sender, ok := x.Opaque.(Connection); ok { // TODO: ここ(でなにをするつもりだったのか・・w) self.OutGoingTable.Register2(x.PacketIdentifier, x, len(targets), sender) } } cn.WriteMessageQueue(x) count++ } Metrics.System.Broker.Messages.Sent.Add(int64(count)) }
func (self *Handler) Pubrel(messageId uint16) { ack := codec.NewPubcompMessage() ack.PacketIdentifier = messageId self.Connection.WriteMessageQueue(ack) log.Debug("Send pubcomp message to sender. [%s: %d]", self.Connection.GetId(), messageId) }
func (self *Handler) Unsubscribe(messageId uint16, granted int, payloads []codec.SubscribePayload) { log.Debug("Received unsubscribe from [%s]: %s\n", self.Connection.GetId(), messageId) self.Engine.Unsubscribe(messageId, granted, payloads, self.Connection) }
func (self *DummyPlug) Run() { for { select { case <-self.stop: return case b := <-self.Switch: self.Running = b case m := <-self.message: if self.Running { // メッセージが来たらengineのAPIをたたけばOK switch m.GetType() { case mqtt.PACKET_TYPE_CONNECT: case mqtt.PACKET_TYPE_CONNACK: case mqtt.PACKET_TYPE_PUBLISH: // TODO: if p, ok := m.(*mqtt.PublishMessage); ok { fmt.Printf("%s\n", p) switch p.QosLevel { case 1: log.Debug("[DUMMY] Received Publish Message from [%d]. reply puback", p.PacketIdentifier) self.engine.OutGoingTable.Unref(p.PacketIdentifier) case 2: log.Debug("[DUMMY] Received Publish Message from [%d]. reply pubrec", p.PacketIdentifier) self.engine.OutGoingTable.Unref(p.PacketIdentifier) rel := mqtt.NewPubrelMessage() rel.PacketIdentifier = p.PacketIdentifier // NOTE: client have to reply pubrec to the server self.message <- rel } } case mqtt.PACKET_TYPE_DISCONNECT: // TODO: なの? case mqtt.PACKET_TYPE_SUBSCRIBE: case mqtt.PACKET_TYPE_SUBACK: case mqtt.PACKET_TYPE_UNSUBSCRIBE: case mqtt.PACKET_TYPE_UNSUBACK: case mqtt.PACKET_TYPE_PINGRESP: case mqtt.PACKET_TYPE_PINGREQ: case mqtt.PACKET_TYPE_PUBACK: // TODO: (nothign to do) case mqtt.PACKET_TYPE_PUBREC: // TODO: (nothign to do) case mqtt.PACKET_TYPE_PUBREL: // TODO: if p, ok := m.(*mqtt.PubrelMessage); ok { log.Debug("[DUMMY] Received Pubrel Message from [%d]. send pubcomp", p.PacketIdentifier) self.engine.OutGoingTable.Unref(p.PacketIdentifier) cmp := mqtt.NewPubcompMessage() cmp.PacketIdentifier = p.PacketIdentifier // NOTE: client have to reply pubcomp to the server self.message <- cmp } case mqtt.PACKET_TYPE_PUBCOMP: // TODO: (nothing) default: return } } else { // discards message } } } }