//apend data func (self *SegmentLog) Appends(logs []*oplog) error { //if closed if self.isOpen == 0 { return errors.New(fmt.Sprintf("SegmentLog Is Closed!|%s", self.path)) } length := int64(0) for _, lo := range logs { tmp := lo.marshal() for { l, err := self.bw.Write(tmp) length += int64(l) if nil != err && err != io.ErrShortWrite { log.ErrorLog("kite_store", "SegmentLog|Append|FAIL|%s|%d/%d", err, l, len(tmp)) return err } else if nil == err { break } else { self.bw.Reset(self.wf) log.ErrorLog("kite_store", "SegmentLog|Append|FAIL|%s", err) } tmp = tmp[l:] } } //flush self.bw.Flush() //move offset atomic.AddInt64(&self.offset, int64(length)) return nil }
//获取订阅关系并添加watcher func (self *ZKManager) GetBindAndWatch(topic string) (map[string][]*Binding, error) { path := KITEQ_SUB + "/" + topic exist, _, _, err := self.session.ExistsW(path) if !exist { //不存在订阅关系的时候需要创建该topic和 return make(map[string][]*Binding, 0), err } //获取topic下的所有qserver groupIds, _, _, err := self.session.ChildrenW(path) if nil != err { log.ErrorLog("kite_bind", "ZKManager|GetBindAndWatch|GroupID|FAIL|%s|%s\n", err, path) return nil, err } hps := make(map[string][]*Binding, len(groupIds)) //获取topic对应的所有groupId下的订阅关系 for _, groupId := range groupIds { tmppath := path + "/" + groupId binds, err := self.getBindData(tmppath) if nil != err { log.ErrorLog("kite_bind", "GetBindAndWatch|getBindData|FAIL|%s|%s\n", tmppath, err) continue } //去掉分组后面的-bind gid := strings.TrimSuffix(groupId, "-bind") hps[gid] = binds } return hps, nil }
func (self *KiteMysqlStore) Query(messageId string) *MessageEntity { var entity *MessageEntity s := self.sqlwrapper.hashQuerySQL(messageId) rows, err := self.dbshard.FindSlave(messageId).Query(s, messageId) if nil != err { log.ErrorLog("kite_store", "KiteMysqlStore|Query|FAIL|%s|%s\n", err, messageId) return nil } defer rows.Close() if rows.Next() { entity = &MessageEntity{} fc := self.convertor.convertFields(entity, filternothing) err := rows.Scan(fc...) if nil != err { log.ErrorLog("kite_store", "KiteMysqlStore|Query|SCAN|FAIL|%s|%s\n", err, messageId) return nil } self.convertor.Convert2Entity(fc, entity, filternothing) switch entity.MsgType { case protocol.CMD_BYTES_MESSAGE: //do nothing case protocol.CMD_STRING_MESSAGE: entity.Body = string(entity.GetBody().([]byte)) } } return entity }
func (self *KiteQServer) startCheckConf() { go func() { t := time.NewTicker(1 * time.Minute) for !self.stop { so := ServerOption{} err := loadTomlConf(self.kc.so.configPath, self.kc.so.clusterName, self.kc.so.bindHost, self.kc.so.pprofPort, &so) if nil != err { log.ErrorLog("kite_server", "KiteQServer|startCheckConf|FAIL|%s", err) } //新增或者减少topics if len(so.topics) != len(self.kc.so.topics) { //推送可发送的topic列表并且获取了对应topic下的订阅关系 succ := self.exchanger.PushQServer(self.kc.so.bindHost, so.topics) if !succ { log.ErrorLog("kite_server", "KiteQServer|startCheckConf|PushQServer|FAIL|%s|%s\n", err, so.topics) } else { log.InfoLog("kite_server", "KiteQServer|startCheckConf|PushQServer|SUCC|%s\n", so.topics) } //重置数据 self.kc.so = so } <-t.C } t.Stop() }() }
//迁移过期的消息 func (self *KiteMysqlStore) migrateMessage(now int64, hashKey string) { log.InfoLog("kite_store", "KiteMysqlStore|MoveExpired|START|%s|%d", hashKey) //需要将过期的消息迁移到DLQ中 sql := self.sqlwrapper.hashDLQSQL(DLQ_MOVE_QUERY, hashKey) //获取到需要导入的id,然后导入 isql := self.sqlwrapper.hashDLQSQL(DLQ_MOVE_INSERT, hashKey) //删除已导入的数据 dsql := self.sqlwrapper.hashDLQSQL(DLQ_MOVE_DELETE, hashKey) start := 0 limit := 50 for { messageIds := make([]interface{}, 1, 50) err := func() error { rows, err := self.dbshard.FindSlave(hashKey).Query(sql, self.serverName, now, start, limit) if err != nil { log.ErrorLog("kite_store", "KiteMysqlStore|migrateMessage|Query|FAIL|%s|%s|%s", err, hashKey, sql) return err } defer rows.Close() for rows.Next() { var id int var messageId string err = rows.Scan(&id, &messageId) if nil != err { log.ErrorLog("kite_store", "KiteMysqlStore|MoveExpired|Scan|FAIL|%s|%s|%s", err, hashKey, sql) } else { start = id messageIds = append(messageIds, messageId) } } return nil }() //已经搬迁完毕则退出进行下一个 if nil != err || len(messageIds[1:]) <= 0 { log.WarnLog("kite_store", "KiteMysqlStore|MoveExpired|SUCC|%s|%d|%s", hashKey, start, err) break } in := strings.Repeat("?,", len(messageIds[1:])) in = in[:len(in)-1] isqlTmp := strings.Replace(isql, "{ids}", in, 1) _, err = self.dbshard.FindMaster(hashKey).Exec(isqlTmp, messageIds[1:]...) if err != nil { log.ErrorLog("kite_store", "KiteMysqlStore|MoveExpired|Insert|FAIL|%s|%s", err, isqlTmp, messageIds) break } dsqlTmp := strings.Replace(dsql, "{ids}", in, 1) messageIds[0] = self.serverName _, err = self.dbshard.FindMaster(hashKey).Exec(dsqlTmp, messageIds...) if err != nil { log.ErrorLog("kite_store", "KiteMysqlStore|MoveExpired|DELETE|FAIL|%s|%s|%s|%s", err, dsql, dsqlTmp, messageIds) break } } }
//没有body的entity func (self *KiteMysqlStore) PageQueryEntity(hashKey string, kiteServer string, nextDeliveryTime int64, startIdx, limit int) (bool, []*MessageEntity) { s := self.sqlwrapper.hashPQSQL(hashKey) // log.Println(s) rows, err := self.dbshard.FindSlave(hashKey). Query(s, kiteServer, time.Now().Unix(), nextDeliveryTime, startIdx, limit+1) if err != nil { log.ErrorLog("kite_store", "KiteMysqlStore|Query|FAIL|%s|%s\n", err, hashKey) return false, nil } defer rows.Close() results := make([]*MessageEntity, 0, limit) for rows.Next() { entity := &MessageEntity{} fc := self.convertor.convertFields(entity, filterbody) err := rows.Scan(fc...) if err != nil { log.ErrorLog("kite_store", "KiteMysqlStore|PageQueryEntity|FAIL|%s|%s|%d|%d\n", err, kiteServer, nextDeliveryTime, startIdx) } else { self.convertor.Convert2Entity(fc, entity, filterbody) results = append(results, entity) } } if len(results) > limit { return true, results[:limit] } else { return false, results } }
func (self *Segment) Close() error { self.Lock() defer self.Unlock() if atomic.CompareAndSwapInt32(&self.isOpen, 1, 0) { //close segment log self.slog.Close() //close segment err := self.bw.Flush() if nil != err { log.ErrorLog("kite_store", "Segment|Close|Writer|FLUSH|FAIL|%s|%s|%s\n", err, self.path, self.name) } //free chunk memory self.chunks = nil err = self.wf.Close() if nil != err { log.ErrorLog("kite_store", "Segment|Close|Write FD|FAIL|%s|%s|%s\n", err, self.path, self.name) return err } else { err = self.rf.Close() if nil != err { log.ErrorLog("kite_store", "Segment|Close|Read FD|FAIL|%s|%s|%s\n", err, self.path, self.name) } return err } } return nil }
//发布订阅关系 func (self *ZKManager) PublishBindings(groupId string, bindings []*Binding) error { //按topic分组 groupBind := make(map[string][]*Binding, 10) for _, b := range bindings { g, ok := groupBind[b.Topic] if !ok { g = make([]*Binding, 0, 2) } b.GroupId = groupId g = append(g, b) groupBind[b.Topic] = g } for topic, binds := range groupBind { data, err := MarshalBinds(binds) if nil != err { log.ErrorLog("kite_bind", "ZKManager|PublishBindings|MarshalBind|FAIL|%s|%s|%t\n", err, groupId, binds) return err } createType := zk.CreatePersistent path := KITEQ_SUB + "/" + topic //注册对应topic的groupId //注册订阅信息 succpath, err := self.registePath(path, groupId+"-bind", createType, data) if nil != err { log.ErrorLog("kite_bind", "ZKManager|PublishTopic|Bind|FAIL|%s|%s/%s\n", err, path, binds) return err } else { log.InfoLog("kite_bind", "ZKManager|PublishTopic|Bind|SUCC|%s|%s\n", succpath, binds) } } return nil }
//内部创建节点的方法 func (self *ZKManager) innerCreatePath(tmppath string, data []byte, createType zk.CreateType) error { exist, _, _, err := self.session.ExistsW(tmppath) if nil == err && !exist { _, err := self.session.Create(tmppath, data, createType, zk.WorldACL(zk.PermAll)) if nil != err { log.ErrorLog("kite_bind", "ZKManager|innerCreatePath|FAIL|%s|%s\n", err, tmppath) return err } //做一下校验等待 for i := 0; i < 5; i++ { exist, _, _ = self.session.Exists(tmppath) if !exist { time.Sleep(time.Duration(i*100) * time.Millisecond) } else { break } } return err } else if nil != err { log.ErrorLog("kite_bind", "ZKManager|innerCreatePath|FAIL|%s\n", err) return err } else if nil != data { //存在该节点,推送新数据 _, err := self.session.Set(tmppath, data, -1) if nil != err { log.ErrorLog("kite_bind", "ZKManager|innerCreatePath|PUSH DATA|FAIL|%s|%s|%s\n", err, tmppath, string(data)) return err } } return nil }
func (self convertor) Convert2Params(entity *store.MessageEntity) []interface{} { val := reflect.ValueOf(*entity) fvs := make([]interface{}, 0, len(self.columns)) for _, v := range self.columns { var fv interface{} if v.columnName == "body" { if entity.MsgType == protocol.CMD_STRING_MESSAGE { fv = []byte(entity.GetBody().(string)) } else if entity.MsgType == protocol.CMD_BYTES_MESSAGE { fv = entity.GetBody().([]byte) } else { log.ErrorLog("kite_store", "convertor|Convert2Params|UnSupport MESSAGE TYPE|%s\n", entity.MsgType) } } else { f := val.FieldByName(v.fieldName) // log.Debug("convertor|Convert2Params|%s|%s\n", v.fieldName, f) switch f.Kind() { case reflect.Ptr: header, ok := f.Interface().(*protocol.Header) if ok { //头部用Pb序列化 data, err := protocol.MarshalPbMessage(header) if err != nil { log.ErrorLog("kite_store", "convertor|Convert2Params|Marshal|HEAD|FAIL|%s|%s\n", err, f.Addr().Interface()) return nil } fv = data } else { log.ErrorLog("kite_store", "convertor|Convert2Params|Not protocol.Header PRT |FAIL|%s\n", f.Addr()) return nil } case reflect.Slice, reflect.Array: if f.Type().Elem().Kind() == reflect.String { data, err := json.Marshal(f.Interface()) if nil != err { log.ErrorLog("kite_store", "convertor|Convert2Params|Marshal|Slice|FAIL||%s\n", err) return nil } fv = string(data) } else { fv = f.Interface() } default: fv = f.Interface() } } fvs = append(fvs, &fv) } return fvs }
func (self *KiteMysqlStore) Start() { count := SHARD_SEED //创建Hash的channel batchDelChan := make([]chan string, 0, count) batchUpChan := make([]chan *MessageEntity, 0, count) batchComChan := make([]chan string, 0, count) for i := 0; i < count; i++ { batchUpChan = append(batchUpChan, make(chan *MessageEntity, self.batchUpSize*2)) batchDelChan = append(batchDelChan, make(chan string, self.batchDelSize*2)) batchComChan = append(batchComChan, make(chan string, self.batchUpSize*2)) } //批量的channel self.batchUpChan = batchUpChan self.batchDelChan = batchDelChan self.batchComChan = batchComChan //创建每种批量的preparedstmt stmts := make(map[batchType][][]*StmtPool, 4) for k, v := range self.sqlwrapper.batchSQL { btype := k pool := make([][]*StmtPool, 0, self.dbshard.ShardNum()) //对每个shard构建stmt的pool for i := 0; i < self.dbshard.ShardNum(); i++ { innerPool := make([]*StmtPool, 0, self.dbshard.HashNum()) for j, s := range v { psql := s db := self.dbshard.FindShardById(i*self.dbshard.HashNum() + j).master err, p := NewStmtPool(5, 10, 20, 1*time.Minute, func() (error, *sql.Stmt) { stmt, err := db.Prepare(psql) if nil != err { log.ErrorLog("kite_store", "StmtPool|Create Stmt|FAIL|%s|%s\n", err, psql) return err, nil } return nil, stmt }) if nil != err { log.ErrorLog("kite_store", "NewKiteMysql|NewStmtPool|FAIL|%s\n", err) panic(err) } innerPool = append(innerPool, p) } pool = append(pool, innerPool) } stmts[btype] = pool } self.stmtPools = stmts for i := 0; i < count; i++ { // log.Printf("KiteMysqlStore|start|SQL|%s\n|%s\n", sqlu, sqld) self.startBatch(i, self.batchUpChan[i], self.batchDelChan[i], self.batchComChan[i]) } log.InfoLog("kite_store", "KiteMysqlStore|Start...") }
func (self *KiteMysqlStore) batchUpdate(hashId int, entity []*MessageEntity) bool { if len(entity) <= 0 { return true } p := self.stmtPool(UPDATE, entity[0].MessageId) err, stmt := p.Get() if nil != err { log.ErrorLog("kite_store", "KiteMysqlStore|batchUpdate|GET STMT|FAIL|%s|%d\n", err, hashId) return false } defer p.Release(stmt) args := make([]interface{}, 0, 5) var errs error for _, e := range entity { args = args[:0] sg, err := json.Marshal(e.SuccGroups) if nil != err { log.ErrorLog("kite_store", "KiteMysqlStore|batchUpdate|SUCC GROUP|MARSHAL|FAIL|%s|%s|%s\n", err, e.MessageId, e.SuccGroups) errs = err continue } args = append(args, sg) fg, err := json.Marshal(e.FailGroups) if nil != err { log.ErrorLog("kite_store", "KiteMysqlStore|batchUpdate|FAIL GROUP|MARSHAL|FAIL|%s|%s|%s\n", err, e.MessageId, e.FailGroups) errs = err continue } args = append(args, fg) //设置一下下一次投递时间 args = append(args, e.NextDeliverTime) args = append(args, e.DeliverCount) args = append(args, e.MessageId) _, err = stmt.Exec(args...) if nil != err { log.ErrorLog("kite_store", "KiteMysqlStore|batchUpdate|FAIL|%s|%s\n", err, e) errs = err } } return nil == errs }
func (self *KiteFileStore) Query(messageId string) *MessageEntity { lock, _, el := self.hash(messageId) lock.RLock() defer lock.RUnlock() e, ok := el[messageId] if !ok { return nil } v := e.Value.(*opBody) //wait save done self.waitSaveDone(v) data, err := self.snapshot.Query(v.Id) if nil != err { // log.Error("KiteFileStore|Query|Entity|FAIL|%s|%d", err, v.Id) return nil } var msg interface{} msgType := data[0] switch msgType { case protocol.CMD_BYTES_MESSAGE: var bms protocol.BytesMessage err = protocol.UnmarshalPbMessage(data[1:], &bms) msg = &bms case protocol.CMD_STRING_MESSAGE: var sms protocol.StringMessage err = protocol.UnmarshalPbMessage(data[1:], &sms) msg = &sms default: log.ErrorLog("kite_store", "KiteFileStore|Query|INVALID|MSGTYPE|%d", msgType) return nil } if nil != err { log.ErrorLog("kite_store", "KiteFileStore|Query|UnmarshalPbMessage|Entity|FAIL|%s", err) return nil } else { entity := NewMessageEntity(protocol.NewQMessage(msg)) //merge data entity.Commit = v.Commit entity.FailGroups = v.FailGroups entity.SuccGroups = v.SuccGroups entity.NextDeliverTime = v.NextDeliverTime entity.DeliverCount = v.DeliverCount return entity } }
func (self *Segment) Open(do func(ol *oplog)) error { self.Lock() defer self.Unlock() if atomic.CompareAndSwapInt32(&self.isOpen, 0, 1) { // log.Info("Segment|Open|BEGIN|%s|%s", self.path, self.name) var rf *os.File var wf *os.File _, err := os.Stat(self.path) //file exist if os.IsNotExist(err) { _, err := os.Create(self.path) if nil != err { log.ErrorLog("kite_store", "Segment|Create|FAIL|%s|%s", err, self.path) return err } } //file not exist create file wf, err = os.OpenFile(self.path, os.O_RDWR|os.O_APPEND, os.ModePerm) if nil != err { log.ErrorLog("kite_store", "Segment|Open|FAIL|%s|%s", err, self.name) return err } rf, err = os.OpenFile(self.path, os.O_RDWR, os.ModePerm) if nil != err { log.ErrorLog("kite_store", "Segment|Open|FAIL|%s|%s", err, self.name) return err } self.rf = rf self.wf = wf //buffer self.br = bufio.NewReader(rf) //load self.loadCheck() // //seek // self.wf.Seek(self.offset, 0) self.bw = bufio.NewWriter(wf) //op segment log self.slog.Open() //recover segment self.recover(do) total, n, d, e := self.stat() log.InfoLog("kite_store", "Segment|Open|SUCC|%s|total:%d,n:%d,d:%d,e:%d", self.name, total, n, d, e) return nil } return nil }
//traverse oplog func (self *SegmentLog) Replay(do func(l *oplog)) { self.Open() offset := int64(0) tmp := make([]byte, 1024) //seek to head self.rf.Seek(0, 0) self.br.Reset(self.rf) for { var length int32 err := binary.Read(self.br, binary.BigEndian, &length) if nil != err { if err == io.EOF { self.br.Reset(self.rf) break } log.WarnLog("kite_store", "SegmentLog|Replay|LEN|%s|Skip...", err) continue } // log.Debug("SegmentLog|Replay|LEN|%d", length) if int(length) > cap(tmp) { grow := make([]byte, int(length)-cap(tmp)) tmp = append(tmp, grow...) } err = binary.Read(self.br, binary.BigEndian, tmp[:int(length)-4]) if nil != err { self.br.Reset(self.rf) log.ErrorLog("kite_store", "SegmentLog|Replay|Data|%s", err) break } var ol oplog r := bytes.NewReader(tmp[:int(length)-4]) deco := gob.NewDecoder(r) err = deco.Decode(&ol) if nil != err { log.ErrorLog("kite_store", "SegmentLog|Replay|unmarshal|oplog|FAIL|%s", err) continue } // log.Debug("SegmentLog|Replay|oplog|%s", ol) do(&ol) //line offset += int64(length) } self.offset = int64(offset) }
//订阅关系topic下的group发生变更 func (self *BindExchanger) NodeChange(path string, eventType ZkEvent, childNode []string) { //如果是订阅关系变更则处理 if strings.HasPrefix(path, KITEQ_SUB) { //获取topic split := strings.Split(path, "/") if len(split) < 4 { if eventType == Created { //不合法的订阅璐姐 log.ErrorLog("kite_bind", "BindExchanger|NodeChange|INVALID SUB PATH |%s|%t\n", path, childNode) } return } //获取topic topic := split[3] self.lock.Lock() defer self.lock.Unlock() //如果topic下无订阅分组节点,直接删除该topic if len(childNode) <= 0 { self.onBindChanged(topic, "", nil) log.ErrorLog("kite_bind", "BindExchanger|NodeChange|无子节点|%s|%s\n", path, childNode) return } // //对当前的topic的分组进行重新设置 switch eventType { case Created, Child: bm, err := self.zkmanager.GetBindAndWatch(topic) if nil != err { log.ErrorLog("kite_bind", "BindExchanger|NodeChange|获取订阅关系失败|%s|%s\n", path, childNode) } //如果topic下没有订阅关系分组则青琉璃 if len(bm) > 0 { for groupId, bs := range bm { self.onBindChanged(topic, groupId, bs) } } else { //删除具体某个分组 self.onBindChanged(topic, "", nil) } } } else { // log.Warn("BindExchanger|NodeChange|非SUB节点变更|%s|%s\n", path, childNode) } }
//创建物理连接 func dial(hostport string) (*net.TCPConn, error) { //连接 remoteAddr, err_r := net.ResolveTCPAddr("tcp4", hostport) if nil != err_r { log.ErrorLog("kite_client", "KiteClientManager|RECONNECT|RESOLVE ADDR |FAIL|remote:%s\n", err_r) return nil, err_r } conn, err := net.DialTCP("tcp4", nil, remoteAddr) if nil != err { log.ErrorLog("kite_client", "KiteClientManager|RECONNECT|%s|FAIL|%s\n", hostport, err) return nil, err } return conn, nil }
//发布topic对应的server func (self *ZKManager) PublishQServer(hostport string, topics []string) error { for _, topic := range topics { qpath := KITEQ_SERVER + "/" + topic spath := KITEQ_SUB + "/" + topic ppath := KITEQ_PUB + "/" + topic //创建发送和订阅的根节点 self.traverseCreatePath(ppath, nil, zk.CreatePersistent) // self.session.ExistsW(ppath) self.traverseCreatePath(spath, nil, zk.CreatePersistent) self.session.ExistsW(spath) //先删除当前这个临时节点再注册 避免监听不到临时节点变更的事件 self.session.Delete(qpath+"/"+hostport, -1) //注册当前节点 path, err := self.registePath(qpath, hostport, zk.CreateEphemeral, nil) if nil != err { log.ErrorLog("kite_bind", "ZKManager|PublishQServer|FAIL|%s|%s/%s\n", err, qpath, hostport) return err } log.InfoLog("kite_bind", "ZKManager|PublishQServer|SUCC|%s\n", path) } //注册当前的kiteqserver self.session.Delete(KITEQ_ALIVE_SERVERS+"/"+hostport, -1) self.registePath(KITEQ_ALIVE_SERVERS, hostport, zk.CreateEphemeral, nil) self.registePath(KITEQ_ALL_SERVERS, hostport, zk.CreatePersistent, nil) return nil }
func (self *DeliverResultHandler) checkRedelivery(fevent *deliverResultEvent) bool { //如果不为fly消息那么需要存储投递结果 if !fevent.fly && fevent.deliverCount > 3 { //存储投递结果 self.saveDeliverResult(fevent.messageId, fevent.deliverCount, fevent.succGroups, fevent.deliveryFailGroups) } //检查当前消息的ttl和有效期是否达到最大的,如果达到最大则不允许再次投递 if fevent.expiredTime <= time.Now().Unix() || (fevent.deliverLimit <= fevent.deliverCount && fevent.deliverLimit > 0) { //只是记录一下本次发送记录不发起重投策略 } else if fevent.deliverCount <= 3 { //只有在消息前三次投递才会失败立即重投 fevent.deliverGroups = fevent.deliveryFailGroups fevent.packet.Reset() // log.Info("DeliverResultHandler|checkRedelivery|%s\n", fevent.deliverCount, fevent.deliverEvent) return true } else { //如果投递次数大于3次并且失败了,那么需要持久化一下然后只能等待后续的recover重投了 //log deliver fail log.ErrorLog("kite_deliver", "messageId:%s|Topic:%s|MessageType:%s|DeliverCount:%d|SUCCGROUPS:%s|FAILGROUPS:%s|", fevent.deliverEvent.messageId, fevent.deliverEvent.topic, fevent.deliverEvent.messageType, fevent.deliverCount, fevent.deliverEvent.succGroups, fevent.deliveryFailGroups) } return false }
func (self *MessageStore) recoverSnapshot() { //current segmentid if len(self.segments) > 0 { //replay log for i, s := range self.segments { err := s.Open(self.replay) if nil != err { log.ErrorLog("kite_store", "MessageStore|recoverSnapshot|Fail|%s", err, s.slog.path) panic(err) } //last segments if i == len(self.segments)-1 { if nil != err { panic("MessageStore|Load Last Segment|FAIL|" + err.Error()) } //set snapshost status if len(s.chunks) > 0 { self.chunkId = s.chunks[len(s.chunks)-1].id } } log.DebugLog("kite_store", "MessageStore|recoverSnapshot|%s", s.name) } } }
func (self *KiteQServer) Start() { self.remotingServer = server.NewRemotionServer(self.kc.server, self.kc.rc, func(rclient *client.RemotingClient, p *packet.Packet) { event := pipe.NewPacketEvent(rclient, p) err := self.pipeline.FireWork(event) if nil != err { log.ErrorLog("kite_server", "RemotingServer|onPacketRecieve|FAIL|%s|%t", err, packet.MarshalPacket(p)) } else { // log.Debug("RemotingServer|onPacketRecieve|SUCC|%s|%t\n", rclient.RemoteAddr(), packet) } }) err := self.remotingServer.ListenAndServer() if nil != err { log.Crashf("KiteQServer|RemotionServer|START|FAIL|%s|%s\n", err, self.kc.server) } else { log.InfoLog("kite_server", "KiteQServer|RemotionServer|START|SUCC|%s\n", self.kc.server) } //推送可发送的topic列表并且获取了对应topic下的订阅关系 succ := self.exchanger.PushQServer(self.kc.server, self.kc.topics) if !succ { log.Crashf("KiteQServer|PushQServer|FAIL|%s|%s\n", err, self.kc.topics) } else { log.InfoLog("kite_server", "KiteQServer|PushQServer|SUCC|%s\n", self.kc.topics) } //开启流量统计 self.startFlow() //开启recover self.recoverManager.Start() }
func (self *StmtPool) enhancedPool(size int) error { //初始化一下最小的Poolsize,让入到idlepool中 for i := 0; i < size; i++ { j := 0 var err error var stmt *sql.Stmt for ; j < 3; j++ { err, stmt = self.dialFunc() if nil != err { log.ErrorLog("kite_store", "POOL_FACTORY|CREATE STMT|INIT|FAIL|%s\n", err) } else { break } } if j >= 3 { return errors.New("POOL_FACTORY|CREATE STMT|INIT|FAIL|%s" + err.Error()) } idlestmt := &IdleStmt{stmt: stmt, expiredTime: (time.Now().Add(self.idletime))} self.idlePool.PushFront(idlestmt) self.numActive++ } return nil }
//apend data func (self *SegmentLog) Append(ol *oplog) error { //if closed if self.isOpen == 0 { return errors.New(fmt.Sprintf("SegmentLog Is Closed!|%s", self.path)) } buff := ol.marshal() tmp := buff for { l, err := self.bw.Write(tmp) if nil != err && err != io.ErrShortWrite { log.ErrorLog("kite_store", "SegmentLog|Append|FAIL|%s|%d/%d", err, l, len(tmp)) return err } else if nil == err { break } else { self.bw.Reset(self.wf) } tmp = tmp[l:] } self.bw.Flush() //line atomic.AddInt64(&self.offset, int64(len(buff))) return nil }
func (self *Segment) loadChunk(c *Chunk) { if c.length-CHUNK_HEADER <= 0 { log.ErrorLog("kite_store", "Segment|LoadChunk|INVALID HEADER|%s|%d|%d", self.name, c.id, c.length) return } data := make([]byte, c.length-CHUNK_HEADER) //seek chunk self.rf.Seek(c.offset+CHUNK_HEADER, 0) self.br.Reset(self.rf) dl, err := io.ReadFull(self.br, data) if nil != err || dl != cap(data) { log.ErrorLog("kite_store", "Segment|LoadChunk|Read Data|FAIL|%s|%s|%d|%d/%d", err, self.name, c.id, c.length, dl) return } c.data = data }
//apend data func (self *Segment) Append(chunks []*Chunk) error { //if closed if self.isOpen == 0 { return errors.New(fmt.Sprintf("Segment Is Closed!|%s", self.name)) } length := int64(0) for _, c := range chunks { c.sid = self.sid c.offset = self.offset + length tmp := c.marshal() for { l, err := self.bw.Write(tmp) length += int64(l) if nil != err && err != io.ErrShortWrite { log.ErrorLog("kite_store", "Segment|Append|FAIL|%s|%d/%d", err, l, len(tmp)) return err } else if nil == err { break } else { self.bw.Reset(self.wf) log.ErrorLog("kite_store", "Segment|Append|FAIL|%s", err) } tmp = tmp[l:] } } //flush self.bw.Flush() // log.DebugLog("Segment|Append|SUCC|%d/%d", l, len(buff)) //tmp cache chunk if nil == self.chunks { self.chunks = make([]*Chunk, 0, 1000) } self.chunks = append(self.chunks, chunks...) //sort // sort.Sort(self.chunks) //move offset self.offset += int64(length) self.byteSize += int32(length) return nil }
//重放当前data的操作日志还原消息状态 func (self *KiteFileStore) replay(ol *oplog) { var body opBody err := json.Unmarshal(ol.Body, &body) if nil != err { log.ErrorLog("kite_store", "KiteFileStore|replay|FAIL|%s|%s", err, ol.Body) return } ob := &body ob.Id = ol.ChunkId l, link, tol := self.hash(ob.MessageId) // log.Debug("KiteFileStore|replay|%s|%s", ob, ol.Op) //如果是更新或者创建,则直接反序列化 if ol.Op == OP_U || ol.Op == OP_C { l.Lock() //直接覆盖 e, ok := tol[ob.MessageId] if ok { e.Value = ob } else { e = link.PushFront(ob) tol[ob.MessageId] = e } link.MoveToFront(e) l.Unlock() } else if ol.Op == OP_D || ol.Op == OP_E { //如果为删除操作直接删除已经保存的op_log l.Lock() e, ok := tol[ob.MessageId] if ok { delete(tol, ob.MessageId) link.Remove(e) } l.Unlock() } else { log.ErrorLog("kite_store", "KiteFileStore|replay|INVALID|%s|%s", ob, ol.Op) } }
func (self *ApnsClient) sendMessage(msg *entry.Message) error { var sendError error //重发逻辑 for i := 0; i < 3; i++ { err, conn := self.factory.Get() if nil != err || nil == conn || !conn.IsAlive() { log.ErrorLog("push_client", "APNSCLIENT|SEND MESSAGE|FAIL|GET CONN|FAIL|%s|%s", err, *msg) sendError = errors.New("GET APNS CONNECTION FAIL") continue } //将当前enchanced发送的数据写入到storage中 if nil != self.storage && msg.MsgType == entry.MESSAGE_TYPE_ENHANCED { //正常发送的记录即可 self.storage.Insert(entry.UmarshalIdentifier(msg), msg) // if rand.Intn(100) == 0 { // log.Printf("APNSCLIENT|sendMessage|RECORD MESSAGE|%s\n", msg) // } } else { //否则丢弃不开启重发........ } //直接发送的没有返回值 sendError = conn.sendMessage(msg) self.sendCounter.Incr(1) if nil != sendError { self.failCounter.Incr(1) //连接有问题直接销毁 releaseErr := self.factory.ReleaseBroken(conn) log.ErrorLog("push_client", "APNSCLIENT|SEND MESSAGE|FAIL|RELEASE BROKEN CONN|FAIL|%s|%s", sendError, releaseErr) } else { //发送成功归还连接 self.factory.Release(conn) break } } return sendError }
func openDb(addr string, shardId int, idleConn, maxConn int) *sql.DB { db, err := sql.Open("mysql", addr+"_"+strconv.Itoa(shardId)) if err != nil { log.ErrorLog("kite_store", "NewKiteMysql|CONNECT FAIL|%s|%s\n", err, addr) panic(err) } db.SetMaxIdleConns(idleConn) db.SetMaxOpenConns(maxConn) return db }
//获取绑定对象的数据 func (self *ZKManager) getBindData(path string) ([]*Binding, error) { bindData, _, _, err := self.session.GetW(path) if nil != err { log.ErrorLog("kite_bind", "ZKManager|getBindData|Binding|FAIL|%s|%s\n", err, path) return nil, err } // log.Printf("ZKManager|getBindData|Binding|SUCC|%s|%s\n", path, string(bindData)) if nil == bindData || len(bindData) <= 0 { return []*Binding{}, nil } else { binding, err := UmarshalBinds(bindData) if nil != err { log.ErrorLog("kite_bind", "ZKManager|getBindData|UmarshalBind|FAIL|%s|%s|%s\n", err, path, string(bindData)) } return binding, err } }
//处理push func (self *ApnsHttpServer) handlePush(out http.ResponseWriter, req *http.Request) { resp := &response{} resp.Status = RESP_STATUS_SUCC if req.Method == "GET" { //返回不支持的请求方式 resp.Status = RESP_STATUS_INVALID_PROTO resp.Error = errors.New("Unsupport Get method Invoke!") } else if req.Method == "POST" { //pushType pushType := req.PostFormValue("pt") //先默认采用Enhanced方式 //接卸对应的token和payload token, payload := self.decodePayload(req, resp) trace := req.PostFormValue("trace") expiredSeconds := req.PostFormValue("expiredSeconds") expiredTime := self.expiredTime if len(expiredSeconds) > 0 { t, err := strconv.ParseInt(expiredSeconds, 10, 32) if nil == err { expiredTime = uint32(t) } } //----------------如果依然是成功状态则证明当前可以发送 if RESP_STATUS_SUCC == resp.Status { func() { defer func() { if re := recover(); nil != re { stack := re.(*errors.Error).ErrorStack() log.ErrorLog("push_handler", "ApnsHttpServer|handlePush|SEND|FAIL|%s|%s|%s", stack, payload, trace) resp.Status = RESP_STATUS_ERROR resp.Error = errors.New(fmt.Sprintf("%s", re)) self.write(out, resp) } }() self.innerSend(pushType, token, payload, resp, expiredTime) self.write(out, resp) log.DebugLog("push_handler", "ApnsHttpServer|handlePush|SUCC|%s|%s|%s", resp, payload, trace) }() } else { log.WarnLog("push_handler", "ApnsHttpServer|handlePush|FAIL|%s|%s|%s", resp, payload, trace) self.write(out, resp) } } }