//apend data func (self *SegmentLog) Appends(logs []*oplog) error { //if closed if self.isOpen == 0 { return errors.New(fmt.Sprintf("SegmentLog Is Closed!|%s", self.path)) } length := int64(0) for _, lo := range logs { tmp := lo.marshal() for { l, err := self.bw.Write(tmp) length += int64(l) if nil != err && err != io.ErrShortWrite { log.Error("SegmentLog|Append|FAIL|%s|%d/%d", err, l, len(tmp)) return err } else if nil == err { break } else { self.bw.Reset(self.wf) log.Error("SegmentLog|Append|FAIL|%s", err) } tmp = tmp[l:] } } //flush self.bw.Flush() //move offset atomic.AddInt64(&self.offset, int64(length)) return nil }
//获取订阅关系并添加watcher func (self *ZKManager) GetBindAndWatch(topic string) (map[string][]*Binding, error) { path := KITEQ_SUB + "/" + topic exist, _, _, err := self.session.ExistsW(path) if !exist { //不存在订阅关系的时候需要创建该topic和 return make(map[string][]*Binding, 0), err } //获取topic下的所有qserver groupIds, _, _, err := self.session.ChildrenW(path) if nil != err { log.Error("ZKManager|GetBindAndWatch|GroupID|FAIL|%s|%s\n", err, path) return nil, err } hps := make(map[string][]*Binding, len(groupIds)) //获取topic对应的所有groupId下的订阅关系 for _, groupId := range groupIds { tmppath := path + "/" + groupId binds, err := self.getBindData(tmppath) if nil != err { log.Error("GetBindAndWatch|getBindData|FAIL|%s|%s\n", tmppath, err) continue } //去掉分组后面的-bind gid := strings.TrimSuffix(groupId, "-bind") hps[gid] = binds } return hps, nil }
func (self *KiteMysqlStore) Query(messageId string) *MessageEntity { var entity *MessageEntity s := self.sqlwrapper.hashQuerySQL(messageId) rows, err := self.dbshard.FindSlave(messageId).Query(s, messageId) if nil != err { log.Error("KiteMysqlStore|Query|FAIL|%s|%s\n", err, messageId) return nil } defer rows.Close() if rows.Next() { entity = &MessageEntity{} fc := self.convertor.convertFields(entity, filternothing) err := rows.Scan(fc...) if nil != err { log.Error("KiteMysqlStore|Query|SCAN|FAIL|%s|%s\n", err, messageId) return nil } self.convertor.Convert2Entity(fc, entity, filternothing) switch entity.MsgType { case protocol.CMD_BYTES_MESSAGE: //do nothing case protocol.CMD_STRING_MESSAGE: entity.Body = string(entity.GetBody().([]byte)) } } return entity }
//没有body的entity func (self *KiteMysqlStore) PageQueryEntity(hashKey string, kiteServer string, nextDeliveryTime int64, startIdx, limit int) (bool, []*MessageEntity) { s := self.sqlwrapper.hashPQSQL(hashKey) // log.Println(s) rows, err := self.dbshard.FindSlave(hashKey). Query(s, kiteServer, time.Now().Unix(), nextDeliveryTime, startIdx, limit+1) if err != nil { log.Error("KiteMysqlStore|Query|FAIL|%s|%s\n", err, hashKey) return false, nil } defer rows.Close() results := make([]*MessageEntity, 0, limit) for rows.Next() { entity := &MessageEntity{} fc := self.convertor.convertFields(entity, filterbody) err := rows.Scan(fc...) if err != nil { log.Error("KiteMysqlStore|PageQueryEntity|FAIL|%s|%s|%d|%d\n", err, kiteServer, nextDeliveryTime, startIdx) } else { self.convertor.Convert2Entity(fc, entity, filterbody) results = append(results, entity) } } if len(results) > limit { return true, results[:limit] } else { return false, results } }
//发布订阅关系 func (self *ZKManager) PublishBindings(groupId string, bindings []*Binding) error { //按topic分组 groupBind := make(map[string][]*Binding, 10) for _, b := range bindings { g, ok := groupBind[b.Topic] if !ok { g = make([]*Binding, 0, 2) } b.GroupId = groupId g = append(g, b) groupBind[b.Topic] = g } for topic, binds := range groupBind { data, err := MarshalBinds(binds) if nil != err { log.Error("ZKManager|PublishBindings|MarshalBind|FAIL|%s|%s|%t\n", err, groupId, binds) return err } createType := zk.CreatePersistent path := KITEQ_SUB + "/" + topic //注册对应topic的groupId //注册订阅信息 succpath, err := self.registePath(path, groupId+"-bind", createType, data) if nil != err { log.Error("ZKManager|PublishTopic|Bind|FAIL|%s|%s/%s\n", err, path, binds) return err } else { log.Info("ZKManager|PublishTopic|Bind|SUCC|%s|%s\n", succpath, binds) } } return nil }
//内部创建节点的方法 func (self *ZKManager) innerCreatePath(tmppath string, data []byte, createType zk.CreateType) error { exist, _, _, err := self.session.ExistsW(tmppath) if nil == err && !exist { _, err := self.session.Create(tmppath, data, createType, zk.WorldACL(zk.PermAll)) if nil != err { log.Error("ZKManager|innerCreatePath|FAIL|%s|%s\n", err, tmppath) return err } //做一下校验等待 for i := 0; i < 5; i++ { exist, _, _ = self.session.Exists(tmppath) if !exist { time.Sleep(time.Duration(i*100) * time.Millisecond) } else { break } } return err } else if nil != err { log.Error("ZKManager|innerCreatePath|FAIL|%s\n", err) return err } else if nil != data { //存在该节点,推送新数据 _, err := self.session.Set(tmppath, data, -1) if nil != err { log.Error("ZKManager|innerCreatePath|PUSH DATA|FAIL|%s|%s|%s\n", err, tmppath, string(data)) return err } } return nil }
func (self *Segment) Close() error { self.Lock() defer self.Unlock() if atomic.CompareAndSwapInt32(&self.isOpen, 1, 0) { //close segment log self.slog.Close() //close segment err := self.bw.Flush() if nil != err { log.Error("Segment|Close|Writer|FLUSH|FAIL|%s|%s|%s\n", err, self.path, self.name) } //free chunk memory self.chunks = nil err = self.wf.Close() if nil != err { log.Error("Segment|Close|Write FD|FAIL|%s|%s|%s\n", err, self.path, self.name) return err } else { err = self.rf.Close() if nil != err { log.Error("Segment|Close|Read FD|FAIL|%s|%s|%s\n", err, self.path, self.name) } return err } } return nil }
func (self convertor) Convert2Params(entity *store.MessageEntity) []interface{} { val := reflect.ValueOf(*entity) fvs := make([]interface{}, 0, len(self.columns)) for _, v := range self.columns { var fv interface{} if v.columnName == "body" { if entity.MsgType == protocol.CMD_STRING_MESSAGE { fv = []byte(entity.GetBody().(string)) } else if entity.MsgType == protocol.CMD_BYTES_MESSAGE { fv = entity.GetBody().([]byte) } else { log.Error("convertor|Convert2Params|UnSupport MESSAGE TYPE|%s\n", entity.MsgType) } } else { f := val.FieldByName(v.fieldName) // log.Debug("convertor|Convert2Params|%s|%s\n", v.fieldName, f) switch f.Kind() { case reflect.Ptr: header, ok := f.Interface().(*protocol.Header) if ok { //头部用Pb序列化 data, err := protocol.MarshalPbMessage(header) if err != nil { log.Error("convertor|Convert2Params|Marshal|HEAD|FAIL|%s|%s\n", err, f.Addr().Interface()) return nil } fv = data } else { log.Error("convertor|Convert2Params|Not protocol.Header PRT |FAIL|%s\n", f.Addr()) return nil } case reflect.Slice, reflect.Array: if f.Type().Elem().Kind() == reflect.String { data, err := json.Marshal(f.Interface()) if nil != err { log.Error("convertor|Convert2Params|Marshal|Slice|FAIL||%s\n", err) return nil } fv = string(data) } else { fv = f.Interface() } default: fv = f.Interface() } } fvs = append(fvs, &fv) } return fvs }
func (self *KiteMysqlStore) Start() { count := SHARD_SEED //创建Hash的channel batchDelChan := make([]chan string, 0, count) batchUpChan := make([]chan *MessageEntity, 0, count) batchComChan := make([]chan string, 0, count) for i := 0; i < count; i++ { batchUpChan = append(batchUpChan, make(chan *MessageEntity, self.batchUpSize*2)) batchDelChan = append(batchDelChan, make(chan string, self.batchDelSize*2)) batchComChan = append(batchComChan, make(chan string, self.batchUpSize*2)) } //批量的channel self.batchUpChan = batchUpChan self.batchDelChan = batchDelChan self.batchComChan = batchComChan //创建每种批量的preparedstmt stmts := make(map[batchType][][]*StmtPool, 4) for k, v := range self.sqlwrapper.batchSQL { btype := k pool := make([][]*StmtPool, 0, self.dbshard.ShardNum()) //对每个shard构建stmt的pool for i := 0; i < self.dbshard.ShardNum(); i++ { innerPool := make([]*StmtPool, 0, self.dbshard.HashNum()) for j, s := range v { psql := s db := self.dbshard.FindShardById(i*self.dbshard.HashNum() + j).master err, p := NewStmtPool(5, 10, 20, 1*time.Minute, func() (error, *sql.Stmt) { stmt, err := db.Prepare(psql) if nil != err { log.Error("StmtPool|Create Stmt|FAIL|%s|%s\n", err, psql) return err, nil } return nil, stmt }) if nil != err { log.Error("NewKiteMysql|NewStmtPool|FAIL|%s\n", err) panic(err) } innerPool = append(innerPool, p) } pool = append(pool, innerPool) } stmts[btype] = pool } self.stmtPools = stmts for i := 0; i < count; i++ { // log.Printf("KiteMysqlStore|start|SQL|%s\n|%s\n", sqlu, sqld) self.startBatch(i, self.batchUpChan[i], self.batchDelChan[i], self.batchComChan[i]) } log.Info("KiteMysqlStore|Start...") }
func (self *KiteMysqlStore) batchUpdate(hashId int, entity []*MessageEntity) bool { if len(entity) <= 0 { return true } p := self.stmtPool(UPDATE, entity[0].MessageId) err, stmt := p.Get() if nil != err { log.Error("KiteMysqlStore|batchUpdate|GET STMT|FAIL|%s|%d\n", err, hashId) return false } defer p.Release(stmt) args := make([]interface{}, 0, 5) var errs error for _, e := range entity { args = args[:0] sg, err := json.Marshal(e.SuccGroups) if nil != err { log.Error("KiteMysqlStore|batchUpdate|SUCC GROUP|MARSHAL|FAIL|%s|%s|%s\n", err, e.MessageId, e.SuccGroups) errs = err continue } args = append(args, sg) fg, err := json.Marshal(e.FailGroups) if nil != err { log.Error("KiteMysqlStore|batchUpdate|FAIL GROUP|MARSHAL|FAIL|%s|%s|%s\n", err, e.MessageId, e.FailGroups) errs = err continue } args = append(args, fg) //设置一下下一次投递时间 args = append(args, e.NextDeliverTime) args = append(args, e.DeliverCount) args = append(args, e.MessageId) _, err = stmt.Exec(args...) if nil != err { log.Error("KiteMysqlStore|batchUpdate|FAIL|%s|%s\n", err, e) errs = err } } return nil == errs }
func (self *KiteFileStore) Query(messageId string) *MessageEntity { lock, _, el := self.hash(messageId) lock.RLock() defer lock.RUnlock() e, ok := el[messageId] if !ok { return nil } v := e.Value.(*opBody) //wait save done self.waitSaveDone(v) data, err := self.snapshot.Query(v.Id) if nil != err { // log.Error("KiteFileStore|Query|Entity|FAIL|%s|%d", err, v.Id) return nil } var msg interface{} msgType := data[0] switch msgType { case protocol.CMD_BYTES_MESSAGE: var bms protocol.BytesMessage err = protocol.UnmarshalPbMessage(data[1:], &bms) msg = &bms case protocol.CMD_STRING_MESSAGE: var sms protocol.StringMessage err = protocol.UnmarshalPbMessage(data[1:], &sms) msg = &sms default: log.Error("KiteFileStore|Query|INVALID|MSGTYPE|%d", msgType) return nil } if nil != err { log.Error("KiteFileStore|Query|UnmarshalPbMessage|Entity|FAIL|%s", err) return nil } else { entity := NewMessageEntity(protocol.NewQMessage(msg)) //merge data entity.Commit = v.Commit entity.FailGroups = v.FailGroups entity.SuccGroups = v.SuccGroups entity.NextDeliverTime = v.NextDeliverTime entity.DeliverCount = v.DeliverCount return entity } }
func (self *Segment) Open(do func(ol *oplog)) error { self.Lock() defer self.Unlock() if atomic.CompareAndSwapInt32(&self.isOpen, 0, 1) { // log.Info("Segment|Open|BEGIN|%s|%s", self.path, self.name) var rf *os.File var wf *os.File _, err := os.Stat(self.path) //file exist if os.IsNotExist(err) { _, err := os.Create(self.path) if nil != err { log.Error("Segment|Create|FAIL|%s|%s", err, self.path) return err } } //file not exist create file wf, err = os.OpenFile(self.path, os.O_RDWR|os.O_APPEND, os.ModePerm) if nil != err { log.Error("Segment|Open|FAIL|%s|%s", err, self.name) return err } rf, err = os.OpenFile(self.path, os.O_RDWR, os.ModePerm) if nil != err { log.Error("Segment|Open|FAIL|%s|%s", err, self.name) return err } self.rf = rf self.wf = wf //buffer self.br = bufio.NewReader(rf) //load self.loadCheck() // //seek // self.wf.Seek(self.offset, 0) self.bw = bufio.NewWriter(wf) //op segment log self.slog.Open() //recover segment self.recover(do) total, n, d, e := self.stat() log.Info("Segment|Open|SUCC|%s|total:%d,n:%d,d:%d,e:%d", self.name, total, n, d, e) return nil } return nil }
//traverse oplog func (self *SegmentLog) Replay(do func(l *oplog)) { self.Open() offset := int64(0) tmp := make([]byte, 1024) //seek to head self.rf.Seek(0, 0) self.br.Reset(self.rf) for { var length int32 err := binary.Read(self.br, binary.BigEndian, &length) if nil != err { if err == io.EOF { self.br.Reset(self.rf) break } log.Warn("SegmentLog|Replay|LEN|%s|Skip...", err) continue } // log.Debug("SegmentLog|Replay|LEN|%d", length) if int(length) > cap(tmp) { grow := make([]byte, int(length)-cap(tmp)) tmp = append(tmp, grow...) } err = binary.Read(self.br, binary.BigEndian, tmp[:int(length)-4]) if nil != err { self.br.Reset(self.rf) log.Error("SegmentLog|Replay|Data|%s", err) break } var ol oplog r := bytes.NewReader(tmp[:int(length)-4]) deco := gob.NewDecoder(r) err = deco.Decode(&ol) if nil != err { log.Error("SegmentLog|Replay|unmarshal|oplog|FAIL|%s", err) continue } // log.Debug("SegmentLog|Replay|oplog|%s", ol) do(&ol) //line offset += int64(length) } self.offset = int64(offset) }
//订阅关系topic下的group发生变更 func (self *BindExchanger) NodeChange(path string, eventType ZkEvent, childNode []string) { //如果是订阅关系变更则处理 if strings.HasPrefix(path, KITEQ_SUB) { //获取topic split := strings.Split(path, "/") if len(split) < 4 { if eventType == Created { //不合法的订阅璐姐 log.Error("BindExchanger|NodeChange|INVALID SUB PATH |%s|%t\n", path, childNode) } return } //获取topic topic := split[3] self.lock.Lock() defer self.lock.Unlock() //如果topic下无订阅分组节点,直接删除该topic if len(childNode) <= 0 { self.onBindChanged(topic, "", nil) log.Error("BindExchanger|NodeChange|无子节点|%s|%s\n", path, childNode) return } // //对当前的topic的分组进行重新设置 switch eventType { case Created, Child: bm, err := self.zkmanager.GetBindAndWatch(topic) if nil != err { log.Error("BindExchanger|NodeChange|获取订阅关系失败|%s|%s\n", path, childNode) } //如果topic下没有订阅关系分组则青琉璃 if len(bm) > 0 { for groupId, bs := range bm { self.onBindChanged(topic, groupId, bs) } } else { //删除具体某个分组 self.onBindChanged(topic, "", nil) } } } else { // log.Warn("BindExchanger|NodeChange|非SUB节点变更|%s|%s\n", path, childNode) } }
//创建物理连接 func dial(hostport string) (*net.TCPConn, error) { //连接 remoteAddr, err_r := net.ResolveTCPAddr("tcp4", hostport) if nil != err_r { log.Error("KiteClientManager|RECONNECT|RESOLVE ADDR |FAIL|remote:%s\n", err_r) return nil, err_r } conn, err := net.DialTCP("tcp4", nil, remoteAddr) if nil != err { log.Error("KiteClientManager|RECONNECT|%s|FAIL|%s\n", hostport, err) return nil, err } return conn, nil }
func (self *MessageStore) recoverSnapshot() { //current segmentid if len(self.segments) > 0 { //replay log for i, s := range self.segments { err := s.Open(self.replay) if nil != err { log.Error("MessageStore|recoverSnapshot|Fail|%s", err, s.slog.path) panic(err) } //last segments if i == len(self.segments)-1 { if nil != err { panic("MessageStore|Load Last Segment|FAIL|" + err.Error()) } //set snapshost status if len(s.chunks) > 0 { self.chunkId = s.chunks[len(s.chunks)-1].id } } log.Debug("MessageStore|recoverSnapshot|%s", s.name) } } }
func (self *KiteQServer) Start() { self.remotingServer = server.NewRemotionServer(self.kc.server, self.kc.rc, func(rclient *client.RemotingClient, p *packet.Packet) { event := pipe.NewPacketEvent(rclient, p) err := self.pipeline.FireWork(event) if nil != err { log.Error("RemotingServer|onPacketRecieve|FAIL|%s|%t\n", err, p) } else { // log.Debug("RemotingServer|onPacketRecieve|SUCC|%s|%t\n", rclient.RemoteAddr(), packet) } }) err := self.remotingServer.ListenAndServer() if nil != err { log.Crashf("KiteQServer|RemotionServer|START|FAIL|%s|%s\n", err, self.kc.server) } else { log.Info("KiteQServer|RemotionServer|START|SUCC|%s\n", self.kc.server) } //推送可发送的topic列表并且获取了对应topic下的订阅关系 succ := self.exchanger.PushQServer(self.kc.server, self.kc.topics) if !succ { log.Crashf("KiteQServer|PushQServer|FAIL|%s|%s\n", err, self.kc.topics) } else { log.Info("KiteQServer|PushQServer|SUCC|%s\n", self.kc.topics) } //开启流量统计 self.kc.flowstat.Start() //开启recover self.recoverManager.Start() }
func (self *Message) Encode() (error, []byte) { framebuff := new(bytes.Buffer) //write item body for _, v := range self.items { //如果是采用tlv形式的字节编码则写入类型、长度 datat := reflect.TypeOf(v.data).Kind() if datat != reflect.Uint8 && datat != reflect.Uint16 && datat != reflect.Uint32 && datat != reflect.Uint64 { binary.Write(framebuff, binary.BigEndian, v.length) } err := binary.Write(framebuff, binary.BigEndian, v.data) if nil != err { log.Error("MESSAGE|ENCODE|FAIL|%s|%s", err.Error(), v) return err, nil } } buff := make([]byte, 0, 1+framebuff.Len()) bytebuff := bytes.NewBuffer(buff) //frame 的command类型 binary.Write(bytebuff, binary.BigEndian, uint8(self.op)) //frame body binary.Write(bytebuff, binary.BigEndian, framebuff.Bytes()) return nil, bytebuff.Bytes() }
//apend data func (self *SegmentLog) Append(ol *oplog) error { //if closed if self.isOpen == 0 { return errors.New(fmt.Sprintf("SegmentLog Is Closed!|%s", self.path)) } buff := ol.marshal() tmp := buff for { l, err := self.bw.Write(tmp) if nil != err && err != io.ErrShortWrite { log.Error("SegmentLog|Append|FAIL|%s|%d/%d", err, l, len(tmp)) return err } else if nil == err { break } else { self.bw.Reset(self.wf) } tmp = tmp[l:] } self.bw.Flush() //line atomic.AddInt64(&self.offset, int64(len(buff))) return nil }
//发布topic对应的server func (self *ZKManager) PublishQServer(hostport string, topics []string) error { for _, topic := range topics { qpath := KITEQ_SERVER + "/" + topic spath := KITEQ_SUB + "/" + topic ppath := KITEQ_PUB + "/" + topic //创建发送和订阅的根节点 self.traverseCreatePath(ppath, nil, zk.CreatePersistent) // self.session.ExistsW(ppath) self.traverseCreatePath(spath, nil, zk.CreatePersistent) self.session.ExistsW(spath) //先删除当前这个临时节点再注册 避免监听不到临时节点变更的事件 self.session.Delete(qpath+"/"+hostport, -1) //注册当前节点 path, err := self.registePath(qpath, hostport, zk.CreateEphemeral, nil) if nil != err { log.Error("ZKManager|PublishQServer|FAIL|%s|%s/%s\n", err, qpath, hostport) return err } log.Info("ZKManager|PublishQServer|SUCC|%s\n", path) } //注册当前的kiteqserver self.session.Delete(KITEQ_ALIVE_SERVERS+"/"+hostport, -1) self.registePath(KITEQ_ALIVE_SERVERS, hostport, zk.CreateEphemeral, nil) self.registePath(KITEQ_ALL_SERVERS, hostport, zk.CreatePersistent, nil) return nil }
func main() { runtime.GOMAXPROCS(8) startMode := flag.Int("startMode", 1, " 0 为mock ,1 为正式") bindAddr := flag.String("bindAddr", ":17070", "-bindAddr=:17070") certPath := flag.String("certPath", "./cert.pem", "-certPath=xxxxxx/cert.pem or -certPath=http://") keyPath := flag.String("keyPath", "./key.pem", "-keyPath=xxxxxx/key.pem or -keyPath=http://") runMode := flag.Int("runMode", 0, "-runMode=1(online) ,0(sandbox)") storeCap := flag.Int("storeCap", 1000, "-storeCap=100000 //重发链条长度") logxml := flag.String("log", "log.xml", "-log=log.xml //log配置文件") pprofPort := flag.String("pprof", ":9090", "pprof=:9090 //端口") flag.Parse() go func() { if len(*pprofPort) > 0 { addr, _ := net.ResolveTCPAddr("tcp4", *bindAddr) log.Error(http.ListenAndServe(addr.IP.String()+*pprofPort, nil)) } }() //加载log4go的配置 log.LoadConfiguration(*logxml) //设置启动项 option := server.NewOption(*startMode, *bindAddr, *certPath, *keyPath, *runMode, *storeCap) apnsserver := server.NewApnsHttpServer(option) ch := make(chan os.Signal, 1) signal.Notify(ch, os.Kill) //kill掉的server <-ch apnsserver.Shutdown() log.Info("APNS SERVER IS STOPPED!") }
func (self *StmtPool) enhancedPool(size int) error { //初始化一下最小的Poolsize,让入到idlepool中 for i := 0; i < size; i++ { j := 0 var err error var stmt *sql.Stmt for ; j < 3; j++ { err, stmt = self.dialFunc() if nil != err { log.Error("POOL_FACTORY|CREATE STMT|INIT|FAIL|%s\n", err) } else { break } } if j >= 3 { return errors.New("POOL_FACTORY|CREATE STMT|INIT|FAIL|%s" + err.Error()) } idlestmt := &IdleStmt{stmt: stmt, expiredTime: (time.Now().Add(self.idletime))} self.idlePool.PushFront(idlestmt) self.numActive++ } return nil }
//发布topic对应的server func (self *ZKManager) PublishQServer(hostport string, topics []string) error { for _, topic := range topics { qpath := KITEQ_SERVER + "/" + topic spath := KITEQ_SUB + "/" + topic ppath := KITEQ_PUB + "/" + topic //创建发送和订阅的根节点 self.traverseCreatePath(ppath, nil, zk.CreatePersistent) // self.session.ExistsW(ppath) self.traverseCreatePath(spath, nil, zk.CreatePersistent) self.session.ExistsW(spath) //注册当前节点 path, err := self.registePath(qpath, hostport, zk.CreateEphemeral, nil) if nil != err { log.Error("ZKManager|PublishQServer|FAIL|%s|%s/%s\n", err, qpath, hostport) return err } log.Info("ZKManager|PublishQServer|SUCC|%s\n", path) } return nil }
//监听数据变更 func (self *ZKManager) listenEvent() { for !self.isClose { //根据zk的文档 watcher机制是无法保证可靠的,其次需要在每次处理完watcher后要重新注册watcher change := <-self.eventChan path := change.Path switch change.Type { case zk.EventSession: if change.State == zk.StateExpired { log.Warn("ZKManager|OnSessionExpired!") //zk链接开则需要重新链接重新推送 self.watcher.OnSessionExpired() } case zk.EventNodeDeleted: self.session.ExistsW(path) self.watcher.NodeChange(path, ZkEvent(change.Type), []string{}) // log.Info("ZKManager|listenEvent|%s|%s\n", path, change) case zk.EventNodeCreated, zk.EventNodeChildrenChanged: childnodes, _, _, err := self.session.ChildrenW(path) if nil != err { log.Error("ZKManager|listenEvent|CD|%s|%s|%t\n", err, path, change.Type) } else { self.watcher.NodeChange(path, ZkEvent(change.Type), childnodes) // log.Info("ZKManager|listenEvent|%s|%s|%s\n", path, change, childnodes) } case zk.EventNodeDataChanged: split := strings.Split(path, "/") //如果不是bind级别的变更则忽略 if len(split) < 5 || strings.LastIndex(split[4], "-bind") <= 0 { continue } //获取一下数据 binds, err := self.getBindData(path) if nil != err { log.Error("ZKManager|listenEvent|Changed|Get DATA|FAIL|%s|%s\n", err, path) //忽略 continue } self.watcher.DataChange(path, binds) // log.Info("ZKManager|listenEvent|%s|%s|%s\n", path, change, binds) } } }
//apend data func (self *Segment) Append(chunks []*Chunk) error { //if closed if self.isOpen == 0 { return errors.New(fmt.Sprintf("Segment Is Closed!|%s", self.name)) } length := int64(0) for _, c := range chunks { c.sid = self.sid c.offset = self.offset + length tmp := c.marshal() for { l, err := self.bw.Write(tmp) length += int64(l) if nil != err && err != io.ErrShortWrite { log.Error("Segment|Append|FAIL|%s|%d/%d", err, l, len(tmp)) return err } else if nil == err { break } else { self.bw.Reset(self.wf) log.Error("Segment|Append|FAIL|%s", err) } tmp = tmp[l:] } } //flush self.bw.Flush() // log.Debug("Segment|Append|SUCC|%d/%d", l, len(buff)) //tmp cache chunk if nil == self.chunks { self.chunks = make([]*Chunk, 0, 1000) } self.chunks = append(self.chunks, chunks...) //sort // sort.Sort(self.chunks) //move offset self.offset += int64(length) self.byteSize += int32(length) return nil }
func WrapPayLoad(payload *PayLoad) *Item { payloadJson := payload.Marshal() if nil == payloadJson || len(payloadJson) > 256 { log.Error("WRAPPAYLOAD|FAIL|%s|len:%d\n", payloadJson, len(payloadJson)) return nil } return &Item{id: PAY_LOAD, length: uint16(len(payloadJson)), data: payloadJson} }
func (self *Segment) loadChunk(c *Chunk) { if c.length-CHUNK_HEADER <= 0 { log.Error("Segment|LoadChunk|INVALID HEADER|%s|%d|%d", self.name, c.id, c.length) return } data := make([]byte, c.length-CHUNK_HEADER) //seek chunk self.rf.Seek(c.offset+CHUNK_HEADER, 0) self.br.Reset(self.rf) dl, err := io.ReadFull(self.br, data) if nil != err || dl != cap(data) { log.Error("Segment|LoadChunk|Read Data|FAIL|%s|%s|%d|%d/%d", err, self.name, c.id, c.length, dl) return } c.data = data }
func WrapDeviceToken(token string) *Item { decodeToken, err := hex.DecodeString(token) if nil != err { log.Error("WRAPTOKE|FAIL|INVALID TOKEN|%s|%s\n", token, err.Error()) return nil } return &Item{id: DEVICE_TOKEN, length: uint16(len(decodeToken)), data: decodeToken} }
func NewPayLoad(sound string, badge int, alert Alert) *PayLoad { data, err := json.Marshal(alert) if nil != err { log.Error("NEWPAYLOAD|FAIL|ERROR|%s\n", err) return nil } aps := Aps{Alert: string(data), Sound: sound, Badge: badge} return &PayLoad{aps: aps, extParams: make(map[string]interface{})} }
//真正写入网络的流 func (self *Session) write0(tlv *packet.Packet) { p := packet.MarshalPacket(tlv) if nil == p || len(p) <= 0 { log.Error("Session|write0|MarshalPacket|FAIL|EMPTY PACKET|%s", tlv) //如果是同步写出 return } l := 0 tmp := p for { length, err := self.bw.Write(tmp) if nil != err { log.Error("Session|write0|conn|%s|FAIL|%s|%d/%d", self.remoteAddr, err, length, len(tmp)) //链接是关闭的 if err != io.ErrShortWrite { self.Close() return } //如果没有写够则再写一次 if err == io.ErrShortWrite { self.bw.Reset(self.conn) } } l += length //write finish if l == len(p) { break } tmp = p[l:] } // //flush self.bw.Flush() if nil != self.rc.FlowStat { self.rc.FlowStat.WriteFlow.Incr(1) self.rc.FlowStat.WriteBytesFlow.Incr(int32(len(p))) } }