func main() { foreGround := flag.Bool("foreground", true, "run as foreground") configFile := flag.String("config", "config.toml", "the config file") flag.Parse() // f, _ := os.Create("profiler") // pprof.StartCPUProfile(f) // defer func() { // pprof.StopCPUProfile() // os.Exit(0) // }() if !*foreGround { err := util.Daemonize(0, 0) if err != 0 { log.Info("daemonize failed") os.Exit(-1) } } pid := os.Getpid() log.Info("Server pid: %d started", pid) confpath, _ := filepath.Abs(*configFile) runtime.GOMAXPROCS(runtime.NumCPU()) app := server.NewApplication(confpath) app.Start() app.Loop() log.Info("Server pid: %d finished", pid) }
func (self *HttpServer) Serve(l net.Listener) error { log.Info("momonga_http: started http server: %s", l.Addr().String()) // TODO: how do I stop this? go self.Server.Serve(l) return nil }
func (self *MmuxConnection) Attach(conn Connection) { self.Mutex.Lock() defer self.Mutex.Unlock() var container Connection container = conn old := atomic.SwapPointer((*unsafe.Pointer)((unsafe.Pointer)(&self.Connection)), unsafe.Pointer(&container)) if old != nil { // 1.If the ClientId represents a Client already connected to the Server // then the Server MUST disconnect the existing Client [MQTT-3.1.4-2]. (*(*Connection)(old)).Close() log.Debug("close existing connection") } self.CleanSession = conn.ShouldCleanSession() if conn.ShouldCleanSession() { self.OfflineQueue = self.OfflineQueue[:0] self.SubscribeMap = make(map[string]bool) self.SubscribedTopics = make(map[string]*SubscribeSet) // Should I remove remaining QoS1, QoS2 message at this time? self.OutGoingTable.Clean() } else { if len(self.OfflineQueue) > 0 { log.Info("Process Offline Queue: Playback: %d", len(self.OfflineQueue)) for i := 0; i < len(self.OfflineQueue); i++ { self.writeMessageQueue(self.OfflineQueue[i]) } self.OfflineQueue = self.OfflineQueue[:0] } } }
func (self *TcpServer) ListenAndServe() error { if self.inherit { file := os.NewFile(uintptr(3), "sock") tmp, err := net.FileListener(file) file.Close() if err != nil { log.Error("Error: %s", err) return nil } listener := tmp.(*net.TCPListener) self.listener = &MyListener{Listener: listener} } else { addr, err := net.ResolveTCPAddr("tcp4", self.ListenAddress) listener, err := net.ListenTCP("tcp", addr) if err != nil { panic(fmt.Sprintf("Error: %s", err)) return err } self.listener = &MyListener{Listener: listener} } log.Info("momonga_tcp: started tcp server: %s", self.listener.Addr().String()) for i := 0; i < self.config.GetAcceptorCount(); i++ { go self.Serve(self.listener) } return nil }
func (self *Momonga) Doom() { for _, x := range self.Connections { for _, v := range x { wait := 5 + rand.Intn(30) log.Info("DOOM in %d seconds: %s\n", wait, v.GetId()) go func(x *MmuxConnection, wait int) { time.AfterFunc(time.Second*time.Duration(wait), func() { x.Close() }) }(v, wait) } } time.AfterFunc(time.Second*60, func() { log.Info("Force Exit") os.Exit(0) }) }
func (self *HttpServer) Stop() { close(self.stop) self.once.Do(func() { self.listener.(*HttpListener).wg.Wait() log.Info("Finished HTTP Listener") self.wg.Done() }) }
func (self *HttpListener) File() (f *os.File, err error) { if tl, ok := self.Listener.(*net.TCPListener); ok { file, _ := tl.File() return file, nil } log.Info("HttpListener Failed to convert file: %T", self.Listener) return nil, nil }
func (self *TlsServer) ListenAndServe() error { if self.inherit { file := os.NewFile(uintptr(3), "sock") tmp, err := net.FileListener(file) file.Close() if err != nil { log.Error("Error: %s", err) return nil } cert, err := tls.LoadX509KeyPair(self.config.Server.Certfile, self.config.Server.Keyfile) if err != nil { panic(fmt.Sprintf("LoadX509KeyPair error: %s", err)) } config := &tls.Config{Certificates: []tls.Certificate{cert}} self.tlsConfig = config // TODO: IS THIS CORRECT? listener := tmp.(net.Listener) self.listener = &MyListener{Listener: listener} } else { cert, err := tls.LoadX509KeyPair(self.config.Server.Certfile, self.config.Server.Keyfile) if err != nil { panic(fmt.Sprintf("LoadX509KeyPair error: %s", err)) } config := &tls.Config{Certificates: []tls.Certificate{cert}} listener, err := tls.Listen("tcp", self.ListenAddress, config) self.tlsConfig = config if err != nil { panic(fmt.Sprintf("Error: %s", err)) } self.listener = &MyListener{Listener: listener} } log.Info("momonga_tls: started tls server: %s", self.listener.Addr().String()) for i := 0; i < self.config.GetAcceptorCount(); i++ { go self.Serve(self.listener) } return nil }
func (self *TlsServer) Serve(l net.Listener) error { defer func() { l.Close() }() var tempDelay time.Duration // how long to sleep on accept failure Accept: for { select { case <-self.stop: break Accept default: client, err := l.Accept() if err != nil { if v, ok := client.(*net.TCPConn); ok { v.SetNoDelay(true) v.SetKeepAlive(true) } if ne, ok := err.(net.Error); ok && ne.Temporary() { if tempDelay == 0 { tempDelay = 5 * time.Millisecond } else { tempDelay *= 2 } if max := 1 * time.Second; tempDelay > max { tempDelay = max } log.Info("momonga: Accept error: %v; retrying in %v", err, tempDelay) time.Sleep(tempDelay) continue } if strings.Contains(err.Error(), "use of closed network connection") { log.Error("Accept Failed: %s", err) continue } log.Error("Accept Error: %s", err) return err } tempDelay = 0 myconf := GetDefaultMyConfig() myconf.MaxMessageSize = self.Engine.Config().Server.MessageSizeLimit conn := NewMyConnection(myconf) conn.SetMyConnection(client) conn.SetId(client.RemoteAddr().String()) log.Debug("Accepted: %s", conn.GetId()) go self.Engine.HandleConnection(conn) } } self.listener.(*MyListener).wg.Wait() self.once.Do(func() { self.wg.Done() }) return nil }
func (self *TlsServer) Listener() Listener { log.Info("LIS: %#v\n", self.listener) return self.listener }
func (self *TlsServer) Graceful() { log.Info("stop new accepting") close(self.stop) self.listener.Close() }
func (self *Momonga) Handshake(p *codec.ConnectMessage, conn *MyConnection) *MmuxConnection { log.Debug("handshaking: %s", p.Identifier) if conn.Connected == true { log.Error("wrong sequence. (connect twice)") conn.Close() return nil } if ok := self.checkVersion(p); ok != nil { log.Error("magic is not expected: %s %+v\n", string(p.Magic), p) conn.Close() return nil } // TODO: implement authenticator if ok, _ := self.Authenticate(p.UserName, p.Password); !ok { log.Error("authenticate failed:") conn.Close() return nil } // preserve messagen when will flag set if (p.Flag & 0x4) > 0 { conn.SetWillMessage(*p.Will) } if (p.Version & 0x80) > 0 { conn.Bridged = true } if !p.CleanSession { conn.DisableCleanSession() } var mux *MmuxConnection var err error reply := codec.NewConnackMessage() if mux, err = self.GetConnectionByClientId(p.Identifier); err == nil { // [MQTT-3.2.2-2] If the Server accepts a connection with CleanSession set to 0, // the value set in Session Present depends on whether the Server already has stored Session state // for the supplied client ID. If the Server has stored Session state, // it MUST set Session Present to 1 in the CONNACK packet. reply.Reserved |= 0x01 } // CONNACK MUST BE FIRST RESPONSE // clean周りはAttachでぜんぶやるべきでは conn.WriteMessageQueue(reply) if mux != nil { log.Info("Attach to mux[%s]", mux.GetId()) conn.SetId(p.Identifier) conn.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetState(STATE_CONNECTED) mux.DisableCleanSession() if conn.ShouldCleanSession() { self.CleanSubscription(mux) } mux.Attach(conn) } else { mux = NewMmuxConnection() mux.SetId(p.Identifier) mux.Attach(conn) self.SetConnectionByClientId(mux.GetId(), mux) conn.SetId(p.Identifier) conn.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetKeepaliveInterval(int(p.KeepAlive)) mux.SetState(STATE_CONNECTED) log.Debug("Starting new mux[%s]", mux.GetId()) } if p.CleanSession { // これは正直どうでもいい delete(self.RetryMap, mux.GetId()) } else { // Okay, attach to existing session. tbl := mux.GetOutGoingTable() // これは途中の再送処理 for _, c := range tbl.Hash { msgs := make([]codec.Message, 0) //check qos1, 2 message and resend to this client. if v, ok := c.Message.(*codec.PublishMessage); ok { if v.QosLevel > 0 { //mux.WriteMessageQueue(c.Message) msgs = append(msgs, c.Message) } } tbl.Clean() // TODO: improve this for _, v := range msgs { mux.WriteMessageQueue(v) } } } conn.Connected = true log.Debug("handshake Successful: %s", p.Identifier) Metrics.System.Broker.Clients.Connected.Add(1) return mux }
func (self *Momonga) SendPublishMessage(msg *codec.PublishMessage, client_id string, is_bridged bool) { // Don't pass wrong message here. user should validate the message be ore using this API. if len(msg.TopicName) < 1 { return } // TODO: Have to persist retain message. if msg.Retain > 0 { if len(msg.Payload) == 0 { log.Debug("[DELETE RETAIN: %s]\n%s", msg.TopicName, hex.Dump([]byte(msg.TopicName))) err := self.DataStore.Del([]byte(msg.TopicName), []byte(msg.TopicName)) if err != nil { log.Error("Error: %s\n", err) } // これ配送したらおかしいべ log.Debug("Deleted retain: %s", msg.TopicName) // あれ、ackとかかえすんだっけ? Metrics.System.Broker.Messages.RetainedCount.Add(-1) return } else { buffer := bytes.NewBuffer(nil) codec.WriteMessageTo(msg, buffer) self.DataStore.Put([]byte(msg.TopicName), buffer.Bytes()) Metrics.System.Broker.Messages.RetainedCount.Add(1) } } if Mflags["experimental.qos1"] { if msg.QosLevel == 1 { targets := self.TopicMatcher.Match(msg.TopicName) go func(msg *codec.PublishMessage, set []interface{}) { p := make(chan string, 1000) wg := sync.WaitGroup{} wg.Add(3) // bulk sernder, retry kun, receive kun mchan := make(chan string, 256) term := make(chan bool, 1) cnt := len(set) mng := make(map[string]*codec.PublishMessage) // retry kun。こういう実装だととても楽 go func(term chan bool, mchan chan string, mng map[string]*codec.PublishMessage) { for { select { case m := <-mchan: if msg, ok := mng[m]; ok { conn, err := self.GetConnectionByClientId(m) if err != nil { fmt.Printf("something wrong: %s %s", m, err) continue } if err == nil { log.Debug("sending a retry msg: %s", msg) conn.WriteMessageQueue(msg) } else { log.Debug("connection not exist. next retry") } } case <-term: log.Debug(" retry finished.") wg.Done() return } } }(term, mchan, mng) // reader go func(p chan string, term chan bool, cnt int, mng map[string]*codec.PublishMessage, mchan chan string) { limit := time.After(time.Second * 60) for { select { case id := <-p: cnt-- delete(mng, id) // これはcallbackでやってもいいようなきもするけど、wait groupとかもろもろ渡すの面倒くさい if cnt < 1 { log.Debug(" all delivery finished.") term <- true wg.Done() return } case <-time.After(time.Second * 20): // 終わってないやつをなめてリトライさせる for cid, m := range mng { m.Dupe = true mchan <- cid } case <-limit: log.Debug(" gave up retry.") term <- true wg.Done() return } } }(p, term, cnt, mng, mchan) // sender. これは勝手に終わる go func(msg *codec.PublishMessage, set []interface{}, p chan string, mng map[string]*codec.PublishMessage) { dp := make(map[string]bool) for i := range targets { var tbl *util.MessageTable var ok bool myset := targets[i].(*SubscribeSet) fmt.Printf("myset: %s", myset) // NOTE (from interoperability/client_test.py): // // overlapping subscriptions. When there is more than one matching subscription for the same client for a topic, // the server may send back one message with the highest QoS of any matching subscription, or one message for // each subscription with a matching QoS. // // Currently, We choose one message for each subscription with a matching QoS. // if _, ok := dp[myset.ClientId]; ok { continue } dp[myset.ClientId] = true x, _ := codec.CopyPublishMessage(msg) x.QosLevel = myset.QoS conn, err := self.GetConnectionByClientId(myset.ClientId) // これは面倒臭い。clean sessionがtrueで再接続した時はもはや別人として扱わなければならない if x.QosLevel == 0 { // QoS 0にダウングレードしたらそのまま終わらせる conn.WriteMessageQueue(x) p <- myset.ClientId continue } if tbl, ok = self.InflightTable[myset.ClientId]; !ok { self.InflightTable[myset.ClientId] = util.NewMessageTable() // callback仕込めるんだよなー。QoS1なら使わなくてもいいかなー。とかおもったり tbl = self.InflightTable[myset.ClientId] } id := tbl.NewId() x.PacketIdentifier = id x.Opaque = p tbl.Register2(id, x, 1, x) if err != nil { continue } mng[myset.ClientId] = x conn.WriteMessageQueue(x) } log.Debug(" all fisrt delivery finished.") wg.Done() }(msg, targets, p, mng) wg.Wait() close(p) close(mchan) close(term) mng = nil log.Debug(" okay, cleanup qos1 sending thread.") }(msg, targets) return } } // Publishで受け取ったMessageIdのやつのCountをとっておく // で、Pubackが帰ってきたらrefcountを下げて0になったらMessageを消す //log.Debug("TopicName: %s %s", m.TopicName, m.Payload) targets := self.TopicMatcher.Match(msg.TopicName) if msg.TopicName[0:1] == "#" { // TODO: [MQTT-4.7.2-1] The Server MUST NOT match Topic Filters starting with a wildcard character // (# or +) with Topic Names beginning with a $ character } // list つくってからとって、だとタイミング的に居ない奴も出てくるんだよな。マジカオス // ここで必要なのは, connection(clientId), subscribed qosがわかればあとは投げるだけ // なんで、Qlobberがかえすのはであるべきなんだけど。これすっげー消しづらいのよね・・・ // { // Connection: Connection or client id // QoS: // } // いやまぁエラーハンドリングちゃんとやってれば問題ない。 // client idのほうがベターだな。Connectionを無駄に参照つけると後が辛い dp := make(map[string]bool) count := 0 for i := range targets { var cn Connection var ok error myset := targets[i].(*SubscribeSet) clientId := myset.ClientId //clientId := targets[i].(string) // NOTE (from interoperability/client_test.py): // // overlapping subscriptions. When there is more than one matching subscription for the same client for a topic, // the server may send back one message with the highest QoS of any matching subscription, or one message for // each subscription with a matching QoS. // // Currently, We choose one message for each subscription with a matching QoS. // if _, ok := dp[clientId]; ok { continue } dp[clientId] = true cn, ok = self.GetConnectionByClientId(clientId) if ok != nil { // どちらかというとClientが悪いと思うよ! // リスト拾った瞬間にはいたけど、その後いなくなったんだから配送リストからこれらは無視すべき log.Info("(can't fetch %s. already disconnected, or unsubscribed?)", clientId) continue } if cn.IsBridge() && clientId == client_id { // Don't send message to same bridge continue } var x *codec.PublishMessage if msg.QosLevel == 0 { // we don't need to copy message on QoS 0 x = msg } else { x = codec.MustCopyPublishMessage(msg) } subscriberQos := myset.QoS // Downgrade QoS if subscriberQos < x.QosLevel { x.QosLevel = subscriberQos } if x.QosLevel > 0 { // TODO: ClientごとにInflightTableを持つ // engineのOutGoingTableなのはとりあえず、ということ id := self.OutGoingTable.NewId() x.PacketIdentifier = id if sender, ok := x.Opaque.(Connection); ok { // TODO: ここ(でなにをするつもりだったのか・・w) self.OutGoingTable.Register2(x.PacketIdentifier, x, len(targets), sender) } } cn.WriteMessageQueue(x) count++ } Metrics.System.Broker.Messages.Sent.Add(int64(count)) }
func NewApplication(configPath string) *Application { conf, err := configuration.LoadConfiguration(configPath) if err != nil { log.Error("Can't read config.toml. use default setting.: %s", err) } log.SetupLogging(conf.Server.LogLevel, conf.Server.LogFile) pid := strconv.Itoa(os.Getpid()) if conf.Server.PidFile != "" { if err := ioutil.WriteFile(conf.Server.PidFile, []byte(pid), 0644); err != nil { panic(err) } util.WritePid(conf.Server.PidFile) } // NOTE: INHERIT=TRUE means the process invoked from os.StartProess inherit := false if os.Getenv("INHERIT") == "TRUE" { inherit = true } log.Info("Momonga started pid: %s (inherit:%t)", pid, inherit) engine := NewMomonga(conf) app := &Application{ Engine: engine, Servers: []Server{}, configPath: configPath, config: conf, } // TODO: improve this block if conf.Server.Port > 0 { t := NewTcpServer(engine, conf, inherit) t.wg = &app.wg app.RegisterServer(t) } if conf.Server.Socket != "" { u := NewUnixServer(engine, conf, inherit) u.wg = &app.wg app.RegisterServer(u) } if conf.Server.HttpPort > 0 { h := NewHttpServer(engine, conf, inherit) h.wg = &app.wg app.RegisterServer(h) } if conf.Server.EnableTls && conf.Server.TlsPort > 0 { h := NewTlsServer(engine, conf, inherit) h.wg = &app.wg app.RegisterServer(h) } // memonized application path app.execPath, err = exec.LookPath(os.Args[0]) if err != nil { log.Error("Error: %s", err) return app } app.workingDir, err = os.Getwd() if err != nil { log.Error("Error: %s", err) return app } if conf.Bridge.Address != "" { //TODO: Bridgeは複数指定できる //TODO: Bridgeは途中で制御できる // /api/bridge/list // /api/bridge/connection/stop // /api/bridge/connection/status // /api/bridge/connection/start // /api/bridge/connection/delete // /api/bridge/connection/new?address=&port=&type=both&topic[]= // /api/bridge/config go func() { flag := 0 switch conf.Bridge.Type { case "both": flag = 3 case "out": flag = 1 case "in": flag = 2 default: panic(fmt.Sprintf("%s does not support.", conf.Bridge.Type)) } // in addr := fmt.Sprintf("%s:%d", conf.Bridge.Address, conf.Bridge.Port) c := client.NewClient(client.Option{ TransporterCallback: func() (net.Conn, error) { conn, err := net.Dial("tcp", addr) return conn, err }, Identifier: fmt.Sprintf(conf.Bridge.ClientId), Magic: []byte("MQTT"), Version: 4 | 0x80, Keepalive: 0, }) c.Connect() c.WaitConnection() if flag == 1 || flag == 3 { c.Subscribe("#", 2) c.SetRequestPerSecondLimit(-1) c.On("publish", func(msg *codec.PublishMessage) { engine.SendPublishMessage(msg, conf.Bridge.Connection, true) }) } //out if flag == 2 || flag == 3 { addr2 := fmt.Sprintf("%s:%d", conf.Server.BindAddress, conf.Server.Port) c2 := client.NewClient(client.Option{ TransporterCallback: func() (net.Conn, error) { conn, err := net.Dial("tcp", addr2) return conn, err }, Identifier: fmt.Sprintf(conf.Bridge.ClientId), Magic: []byte("MQTT"), Version: 4 | 0x80, Keepalive: 0, }) c2.Connect() c2.WaitConnection() c2.Subscribe("#", 2) c2.SetRequestPerSecondLimit(-1) c2.On("publish", func(msg *codec.PublishMessage) { c.Publish(msg.TopicName, msg.Payload, msg.QosLevel) }) } select {} }() } return app }
func (self *Application) Start() { self.wg.Add(1) ch := make(chan os.Signal, 8) // TODO: windows can't use signal. split this block into another file. signals := []os.Signal{syscall.SIGINT, syscall.SIGHUP, syscall.SIGUSR2, syscall.SIGQUIT} signal.Notify(ch, signals...) go func(ch chan os.Signal) { for { select { case x := <-ch: switch x { case syscall.SIGINT: self.Stop() case syscall.SIGQUIT: // TODO: like sigdump feature. should change file descriptor pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) pprof.Lookup("heap").WriteTo(os.Stdout, 1) pprof.Lookup("block").WriteTo(os.Stdout, 1) case syscall.SIGHUP: // reload config log.Info("reload configuration from %s", self.configPath) configuration.LoadConfigurationTo(self.configPath, self.config) case syscall.SIGUSR2: // graceful restart self.mu.Lock() var env []string for _, v := range os.Environ() { env = append(env, v) } discriptors := append([]*os.File{ os.Stdin, os.Stdout, os.Stderr, }) for i := 0; i < len(self.Servers); i++ { svr := self.Servers[i] f, e := svr.Listener().File() if e != nil { log.Error("Error: %s", e) self.mu.Unlock() continue } fd, _ := syscall.Dup(int(f.Fd())) fx := os.NewFile(uintptr(fd), "sock") discriptors = append(discriptors, []*os.File{fx}...) } env = append(env, "INHERIT=TRUE") p, err := os.StartProcess(self.execPath, os.Args, &os.ProcAttr{ Dir: self.workingDir, Env: env, Files: discriptors, }) if err != nil { log.Error("Error: %s, stop gracefull restart.", err) p.Kill() self.mu.Unlock() continue } // maybe, new server is alive in 3 seconds time.Sleep(time.Second * 3) for i := 0; i < len(self.Servers); i++ { svr := self.Servers[i] svr.Graceful() } self.wg.Done() // Kill current connection in N seconds. self.Engine.Doom() self.mu.Unlock() return } } } }(ch) go self.Engine.Run() for i := 0; i < len(self.Servers); i++ { svr := self.Servers[i] self.wg.Add(1) go svr.ListenAndServe() } }