func NewMmuxConnection() *MmuxConnection { conn := &MmuxConnection{ OutGoingTable: util.NewMessageTable(), SubscribeMap: map[string]bool{}, MaxOfflineQueue: 1000, Created: time.Now(), Identifier: "", SubscribedTopics: make(map[string]*SubscribeSet), CleanSession: true, } return conn }
// QoS 1, 2 are available. but really suck implementation. // reconsider qos design later. func NewMomonga(config *configuration.Config) *Momonga { engine := &Momonga{ OutGoingTable: util.NewMessageTable(), TopicMatcher: util.NewQlobber(), Connections: map[uint32]map[string]*MmuxConnection{}, RetryMap: map[string][]*Retryable{}, Started: time.Now(), EnableSys: false, DataStore: datastore.NewMemstore(), LockPool: map[uint32]*sync.RWMutex{}, config: config, InflightTable: map[string]*util.MessageTable{}, } // initialize lock pool for i := 0; i < config.GetLockPoolSize(); i++ { engine.LockPool[uint32(i)] = &sync.RWMutex{} engine.Connections[uint32(i)] = make(map[string]*MmuxConnection) } auth := config.GetAuthenticators() if len(auth) > 0 { for i := 0; i < len(auth); i++ { var authenticator Authenticator switch auth[i].Type { case "empty": authenticator = &EmptyAuthenticator{} authenticator.Init(config) default: panic(fmt.Sprintf("Unsupported type specified: [%s]", auth[i].Type)) } engine.registerAuthenticator(authenticator) } } else { authenticator := &EmptyAuthenticator{} authenticator.Init(config) engine.registerAuthenticator(authenticator) } engine.setupCallback() return engine }
// TODO: どっかで綺麗にしたい func NewMyConnection(conf *MyConfig) *MyConnection { if conf == nil { conf = &defaultConfig } c := &MyConnection{ Events: make(map[string]interface{}), Queue: make(chan codec.Message, conf.QueueSize), OfflineQueue: make([]codec.Message, 0), MaxOfflineQueue: conf.OfflineQueueSize, InflightTable: util.NewMessageTable(), SubscribeHistory: make(map[string]int), Mutex: sync.RWMutex{}, Qlobber: util.NewQlobber(), SubscribedTopics: make(map[string]int), Last: time.Now(), CleanSession: true, Keepalive: conf.Keepalive, State: STATE_INIT, Closed: make(chan bool), MaxMessageSize: conf.MaxMessageSize, fch: make(chan bool, 1), } c.t = time.AfterFunc(time.Second*300, func() { // NOTE: assume flush if c.State == STATE_CONNECTED { c.kickFlusher() } }) c.logger = log.Global if conf.Logger != nil { c.logger = conf.Logger } if conf.WritePerSec > 0 { c.balancer = &util.Balancer{ PerSec: conf.WritePerSec, } } if conf.MaxMessageSize > 0 { c.MaxMessageSize = conf.MaxMessageSize } c.Events["connected"] = func() { c.State = STATE_CONNECTED } c.Events["connack"] = func(result uint8) { if result == 0 { c.SetState(STATE_CONNECTED) if c.Reconnect { for key, qos := range c.SubscribeHistory { c.Subscribe(key, qos) } } //TODO: このアホっぽい実装はあとでちゃんとなおす。なおしたい if len(c.OfflineQueue) > 0 { c.Mutex.Lock() var targets []codec.Message for len(c.OfflineQueue) > 0 { targets = append(targets, c.OfflineQueue[0]) c.OfflineQueue = c.OfflineQueue[1:] } c.Mutex.Unlock() for i := 0; i < len(targets); i++ { c.Queue <- targets[i] } } c.setupKicker() } else { c.State = STATE_CLOSED } } // for Wait API c.InflightTable.SetOnFinish(func(id uint16, message codec.Message, opaque interface{}) { if m, ok := message.(*codec.PublishMessage); ok { if m.QosLevel == 1 { if b, ok := opaque.(chan bool); ok { close(b) } } else if m.QosLevel == 2 { if b, ok := opaque.(chan bool); ok { close(b) } } } }) // こっちに集約できるとClientが薄くなれる c.Events["publish"] = func(msg *codec.PublishMessage) { if msg.QosLevel == 1 { ack := codec.NewPubackMessage() ack.PacketIdentifier = msg.PacketIdentifier c.WriteMessageQueue(ack) c.logger.Debug("Send puback message to sender. [%s: %d]", c.GetId(), ack.PacketIdentifier) } else if msg.QosLevel == 2 { ack := codec.NewPubrecMessage() ack.PacketIdentifier = msg.PacketIdentifier c.WriteMessageQueue(ack) c.logger.Debug("Send pubrec message to sender. [%s: %d]", c.GetId(), ack.PacketIdentifier) } } c.Events["puback"] = func(messageId uint16) { c.InflightTable.Unref(messageId) } c.Events["pubrec"] = func(messageId uint16) { ack := codec.NewPubrelMessage() ack.PacketIdentifier = messageId c.Queue <- ack } c.Events["pubrel"] = func(messageId uint16) { ack := codec.NewPubcompMessage() ack.PacketIdentifier = messageId c.Queue <- ack c.InflightTable.Unref(ack.PacketIdentifier) // Unackknowleged } c.Events["pubcomp"] = func(messageId uint16) { c.InflightTable.Unref(messageId) } c.Events["unsuback"] = func(messageId uint16) { mm, err := c.InflightTable.Get(messageId) if err == nil { if v, ok := mm.(*codec.UnsubscribeMessage); ok { delete(c.SubscribeHistory, v.TopicName) } } c.InflightTable.Remove(messageId) } c.Events["subscribe"] = func(p *codec.SubscribeMessage) { } c.Events["suback"] = func(messageId uint16, grunted int) { c.InflightTable.Remove(messageId) } c.Events["unsubscribe"] = func(messageId uint16, granted int, payload []codec.SubscribePayload) { for i := 0; i < len(payload); i++ { delete(c.SubscribeHistory, payload[i].TopicPath) } } // これはコネクション渡したほうがいいんではないだろうか。 c.Events["pingreq"] = func() { // TODO: check Ping count periodically, abort MyConnection when the counter exceeded. c.PingCounter++ } c.Events["pingresp"] = func() { // nothing to do. c.PingCounter-- } c.Events["disconnect"] = func() { // nothing to do ? c.State = STATE_CLOSED } c.Events["error"] = func(err error) { //fmt.Printf("Error: %s\n", err) } c.Events["connect"] = func(msg *codec.ConnectMessage) { } c.Events["parsed"] = func() { } // Write Queue go func() { for { select { case msg := <-c.Queue: state := c.GetState() if state == STATE_CONNECTED || state == STATE_CONNECTING { if msg.GetType() == codec.PACKET_TYPE_PUBLISH { sb := msg.(*codec.PublishMessage) if sb.QosLevel < 0 { c.logger.Error("QoS under zero. %s: %#v", c.Id, sb) break } if sb.QosLevel > 0 { id := c.InflightTable.NewId() sb.PacketIdentifier = id c.InflightTable.Register(id, sb, nil) } } e := c.writeMessage(msg) if e != nil { if v, ok := c.Events["error"].(func(error)); ok { v(e) } } c.invalidateTimer() } else { c.OfflineQueue = append(c.OfflineQueue, msg) } case <-c.Closed: if c.KeepLoop { time.Sleep(time.Second) } else { return } } } }() go c.flusher() return c }
func (self *Momonga) SendPublishMessage(msg *codec.PublishMessage, client_id string, is_bridged bool) { // Don't pass wrong message here. user should validate the message be ore using this API. if len(msg.TopicName) < 1 { return } rpc, jsonObj := stringutils.IsJsonRpc(string(msg.Payload)) if rpc { fmt.Println("******************** message *****************", jsonObj) obj := jsonObj.(map[string]interface{}) method := obj["method"] paramsObj := obj["params"].(map[string]interface{}) // lets pick params and create a query string params := url.Values{} for k, v := range paramsObj { if str, ok := v.(string); ok { params.Add(k, str) } } api := gopencils.Api("http://localhost:5080/api") resp := new(interface{}) res := api.Res(strings.Replace(msg.TopicName, "/data/", "", 1), resp) res.SetHeader("Authorization", "Bearer 2eEkMA6x7NmNQwzpE3qLvgvQgdILER") switch method { case "get": res.Get() fmt.Println(client_id, "GET ", msg.TopicName, params.Encode()) case "create": res.Post(paramsObj) fmt.Println(client_id, "POST ", msg.TopicName, params.Encode()) case "update": fmt.Println(client_id, "PUT ", msg.TopicName, params.Encode()) case "delete": fmt.Println(client_id, "DELETE ", msg.TopicName, params.Encode()) } //fmt.Println(*resp) result, _ := json.Marshal(*resp) fmt.Println(string(result)) fmt.Println(msg) msg.TopicName = "/client/" + client_id msg.Payload = result fmt.Println(msg) } // TODO: Have to persist retain message. if msg.Retain > 0 { if len(msg.Payload) == 0 { log.Debug("[DELETE RETAIN: %s]\n%s", msg.TopicName, hex.Dump([]byte(msg.TopicName))) err := self.DataStore.Del([]byte(msg.TopicName), []byte(msg.TopicName)) if err != nil { log.Error("Error: %s\n", err) } // これ配送したらおかしいべ log.Debug("Deleted retain: %s", msg.TopicName) // あれ、ackとかかえすんだっけ? Metrics.System.Broker.Messages.RetainedCount.Add(-1) return } else { buffer := bytes.NewBuffer(nil) codec.WriteMessageTo(msg, buffer) self.DataStore.Put([]byte(msg.TopicName), buffer.Bytes()) Metrics.System.Broker.Messages.RetainedCount.Add(1) } } if Mflags["experimental.qos1"] { if msg.QosLevel == 1 { targets := self.TopicMatcher.Match(msg.TopicName) go func(msg *codec.PublishMessage, set []interface{}) { p := make(chan string, 1000) wg := sync.WaitGroup{} wg.Add(3) // bulk sernder, retry kun, receive kun mchan := make(chan string, 256) term := make(chan bool, 1) cnt := len(set) mng := make(map[string]*codec.PublishMessage) // retry kun。こういう実装だととても楽 go func(term chan bool, mchan chan string, mng map[string]*codec.PublishMessage) { for { select { case m := <-mchan: if msg, ok := mng[m]; ok { conn, err := self.GetConnectionByClientId(m) if err != nil { fmt.Printf("something wrong: %s %s", m, err) continue } if err == nil { log.Debug("sending a retry msg: %s", msg) conn.WriteMessageQueue(msg) } else { log.Debug("connection not exist. next retry") } } case <-term: log.Debug(" retry finished.") wg.Done() return } } }(term, mchan, mng) // reader go func(p chan string, term chan bool, cnt int, mng map[string]*codec.PublishMessage, mchan chan string) { limit := time.After(time.Second * 60) for { select { case id := <-p: cnt-- delete(mng, id) // これはcallbackでやってもいいようなきもするけど、wait groupとかもろもろ渡すの面倒くさい if cnt < 1 { log.Debug(" all delivery finished.") term <- true wg.Done() return } case <-time.After(time.Second * 20): // 終わってないやつをなめてリトライさせる for cid, m := range mng { m.Dupe = true mchan <- cid } case <-limit: log.Debug(" gave up retry.") term <- true wg.Done() return } } }(p, term, cnt, mng, mchan) // sender. これは勝手に終わる go func(msg *codec.PublishMessage, set []interface{}, p chan string, mng map[string]*codec.PublishMessage) { dp := make(map[string]bool) for i := range targets { var tbl *util.MessageTable var ok bool myset := targets[i].(*SubscribeSet) fmt.Printf("myset: %s", myset) // NOTE (from interoperability/client_test.py): // // overlapping subscriptions. When there is more than one matching subscription for the same client for a topic, // the server may send back one message with the highest QoS of any matching subscription, or one message for // each subscription with a matching QoS. // // Currently, We choose one message for each subscription with a matching QoS. // if _, ok := dp[myset.ClientId]; ok { continue } dp[myset.ClientId] = true x, _ := codec.CopyPublishMessage(msg) x.QosLevel = myset.QoS conn, err := self.GetConnectionByClientId(myset.ClientId) // これは面倒臭い。clean sessionがtrueで再接続した時はもはや別人として扱わなければならない if x.QosLevel == 0 { // QoS 0にダウングレードしたらそのまま終わらせる conn.WriteMessageQueue(x) p <- myset.ClientId continue } if tbl, ok = self.InflightTable[myset.ClientId]; !ok { self.InflightTable[myset.ClientId] = util.NewMessageTable() // callback仕込めるんだよなー。QoS1なら使わなくてもいいかなー。とかおもったり tbl = self.InflightTable[myset.ClientId] } id := tbl.NewId() x.PacketIdentifier = id x.Opaque = p tbl.Register2(id, x, 1, x) if err != nil { continue } mng[myset.ClientId] = x conn.WriteMessageQueue(x) } log.Debug(" all fisrt delivery finished.") wg.Done() }(msg, targets, p, mng) wg.Wait() close(p) close(mchan) close(term) mng = nil log.Debug(" okay, cleanup qos1 sending thread.") }(msg, targets) return } } // Publishで受け取ったMessageIdのやつのCountをとっておく // で、Pubackが帰ってきたらrefcountを下げて0になったらMessageを消す //log.Debug("TopicName: %s %s", m.TopicName, m.Payload) targets := self.TopicMatcher.Match(msg.TopicName) if msg.TopicName[0:1] == "#" { // TODO: [MQTT-4.7.2-1] The Server MUST NOT match Topic Filters starting with a wildcard character // (# or +) with Topic Names beginning with a $ character } // list つくってからとって、だとタイミング的に居ない奴も出てくるんだよな。マジカオス // ここで必要なのは, connection(clientId), subscribed qosがわかればあとは投げるだけ // なんで、Qlobberがかえすのはであるべきなんだけど。これすっげー消しづらいのよね・・・ // { // Connection: Connection or client id // QoS: // } // いやまぁエラーハンドリングちゃんとやってれば問題ない。 // client idのほうがベターだな。Connectionを無駄に参照つけると後が辛い dp := make(map[string]bool) count := 0 for i := range targets { var cn Connection var ok error myset := targets[i].(*SubscribeSet) clientId := myset.ClientId //clientId := targets[i].(string) // NOTE (from interoperability/client_test.py): // // overlapping subscriptions. When there is more than one matching subscription for the same client for a topic, // the server may send back one message with the highest QoS of any matching subscription, or one message for // each subscription with a matching QoS. // // Currently, We choose one message for each subscription with a matching QoS. // if _, ok := dp[clientId]; ok { continue } dp[clientId] = true cn, ok = self.GetConnectionByClientId(clientId) if ok != nil { // どちらかというとClientが悪いと思うよ! // リスト拾った瞬間にはいたけど、その後いなくなったんだから配送リストからこれらは無視すべき log.Info("(can't fetch %s. already disconnected, or unsubscribed?)", clientId) continue } if cn.IsBridge() && clientId == client_id { // Don't send message to same bridge continue } var x *codec.PublishMessage if msg.QosLevel == 0 { // we don't need to copy message on QoS 0 x = msg } else { x = codec.MustCopyPublishMessage(msg) } subscriberQos := myset.QoS // Downgrade QoS if subscriberQos < x.QosLevel { x.QosLevel = subscriberQos } if x.QosLevel > 0 { // TODO: ClientごとにInflightTableを持つ // engineのOutGoingTableなのはとりあえず、ということ id := self.OutGoingTable.NewId() x.PacketIdentifier = id if sender, ok := x.Opaque.(Connection); ok { // TODO: ここ(でなにをするつもりだったのか・・w) self.OutGoingTable.Register2(x.PacketIdentifier, x, len(targets), sender) } } cn.WriteMessageQueue(x) count++ } Metrics.System.Broker.Messages.Sent.Add(int64(count)) }