func (m *Manager) GetTopics() ([]string, error) { topics, err := m.client.Topics() if err != nil { log.Warnf("get topics err : %s", err) } return topics, err }
func (c *httpClient) Send(uri string, data []byte) (err error) { log.Info(string(data)) req, err := http.NewRequest("POST", uri, bytes.NewReader(data)) if err != nil { log.Warnf("new http request err: %v", err) return err } resp, err := c.cli.Do(req) if err != nil { log.Warnf("do http request err: %v", err) return err } defer resp.Body.Close() respData, err := ioutil.ReadAll(resp.Body) if err != nil { log.Warnf("do http request err: %v", err) return err } // TODO check _ = respData return }
func (m *RoamClient) Send(key string, data []byte) (err error) { if len(data) == 0 || string(data) == "[]" { return } sts, err := transToRoamStruct(data) if err != nil { log.Warnf("store profile log err : %v", err) return err } for i := range sts { log.Profile("%s", sts[i]) } return }
func Add(key string, args ...int64) { var pkt *Packet if len(args) == 1 { pkt = &Packet{INCR, key, args[0], 0, 0} } else if len(args) == 2 { pkt = &Packet{INCR_EX, key, args[0], args[1], 0} } else if len(args) == 3 { pkt = &Packet{INCR_EX2, key, args[0], args[1], args[2]} } select { case defaultClient.in <- pkt: default: log.Warnf("metrics chan is full: %s", pkt) } }
func (s *Server) msgReceive(queue string, group string) string { var result string id, data, _, err := s.queue.RecvMessage(queue, group) if err != nil { log.Debugf("msgReceive failed: %s", errors.ErrorStack(err)) result = err.Error() } else { err = s.queue.AckMessage(queue, group, id) if err != nil { log.Warnf("ack message queue:%q group:%q id:%q err:%s", queue, group, id, err) result = err.Error() } else { result = `{"action":"receive","msg":"` + string(data) + `"}` } } return result }
func transToRoamStruct(data []byte) (jsonStrs []string, err error) { var results []*MetricsStat err = json.Unmarshal(data, &results) if err != nil { return nil, err } action := func(st *MetricsStat) []*RoamStruct { ret := make([]*RoamStruct, 0, 2) actions := []string{SENT, RECV} for _, act := range actions { rst := &RoamStruct{ Type: WQS, Queue: st.Queue, Group: st.Group, Action: act, } switch act { case SENT: rst.Total = st.Sent.Total rst.AvgTime = st.Sent.Elapsed case RECV: rst.Total = st.Recv.Total rst.AvgTime = st.Recv.Elapsed } ret = append(ret, rst) } return ret } for _, st := range results { rsts := action(st) for _, rst := range rsts { stData, err := json.Marshal(rst) if err != nil { log.Warnf("transToRoamStruct err : %v", err) continue } jsonStrs = append(jsonStrs, string(stData)) } } return }
func (ms *McServer) connLoop(conn net.Conn) { defer func(conn net.Conn) { log.Debugf("mc client closed :%s", conn.RemoteAddr()) ms.mu.Lock() delete(ms.connPool, conn) ms.mu.Unlock() conn.Close() if err := recover(); err != nil { log.Errorf("mc connLoop panic error: %s", err) } }(conn) br := bufio.NewReaderSize(conn, ms.recvBuffSize) bw := bufio.NewWriterSize(conn, ms.sendBuffSize) for atomic.LoadInt32(&ms.stopping) == 0 { data, err := br.ReadString('\n') if err != nil { if err == io.EOF { return } log.Warnf("mc server ReadLine err:%s", err) return } tokens := strings.Split(strings.TrimSpace(data), " ") cmd := tokens[0] command, exists := commands[cmd] if !exists { command = commandUnkown cmd = "unsupported" } metrics.Add(cmd, 1) err = command(ms.queue, tokens, br, bw) bw.Flush() if err != nil { //command返回错误一定是不能容忍的错误,需要退出循环关闭连接,防止将后续有效数据的格式都破坏掉 log.Errorf("mc bad command:%s", errors.ErrorStack(err)) return } } }
func (m *Monitor) storeStatistic() { t := time.Now().Unix() / 10 * 10 time := strconv.FormatInt(int64(t), 10) snapshot := make(map[string]int64) //reduce hoding mutex druation m.mu.Lock() for k, v := range m.statisticMap { snapshot[k] = v m.statisticMap[k] = 0 } m.mu.Unlock() for k, v := range snapshot { key := k + time _, err := m.redisClient.IncrBy(key, v).Result() if err != nil { log.Warnf("storeStatistic HIncrBy err: %s", err) } } }
func NewConsumer(brokerAddrs []string, config *cluster.Config, topic, group string) (*Consumer, error) { //FIXME: consumer的config是否需要支持配置 consumer, err := cluster.NewConsumer(brokerAddrs, group, []string{topic}, config) if err != nil { log.Errorf("kafka consumer init failed, addrs:%s, err:%v", brokerAddrs, err) return nil, err } go func() { for err := range consumer.Errors() { log.Warnf("consumer err : %v", err) } }() return &Consumer{ topic: topic, group: group, consumer: consumer, padding: 0, partitionHeads: make(map[int32]*ackNode), ackMessages: make(map[int32]map[int64]*ackNode), }, nil }
func NewMetadata(config *config.Config, sconfig *sarama.Config) (*Metadata, error) { zkClient, err := zookeeper.NewZkClient(strings.Split(config.MetaDataZKAddr, ",")) if err != nil { return nil, errors.Trace(err) } zkRoot := config.MetaDataZKRoot if strings.EqualFold(zkRoot, root) { zkRoot = "" } groupConfigPath := fmt.Sprintf("%s%s", zkRoot, groupConfigPathSuffix) queuePath := fmt.Sprintf("%s%s", zkRoot, queuePathSuffix) servicePath := fmt.Sprintf("%s%s", zkRoot, servicePathPrefix) operationPath := fmt.Sprintf("%s%s", zkRoot, operationPathPrefix) err = zkClient.CreateRec(groupConfigPath, "", 0) if err != nil && err != zk.ErrNodeExists { return nil, errors.Trace(err) } err = zkClient.CreateRec(queuePath, "", 0) if err != nil && err != zk.ErrNodeExists { return nil, errors.Trace(err) } err = zkClient.CreateRec(servicePath, "", 0) if err != nil && err != zk.ErrNodeExists { return nil, errors.Trace(err) } err = zkClient.CreateRec(operationPath, "", 0) if err != nil && err != zk.ErrNodeExists { return nil, errors.Trace(err) } manager, err := kafka.NewManager(strings.Split(config.KafkaZKAddr, ","), config.KafkaZKRoot, sconfig) if err != nil { return nil, errors.Trace(err) } metadata := &Metadata{ config: config, zkClient: zkClient, manager: manager, groupConfigPath: groupConfigPath, queuePath: queuePath, servicePath: servicePath, operationPath: operationPath, queueConfigs: make(map[string]QueueConfig), closeCh: make(chan struct{}), } err = metadata.RefreshMetadata() if err != nil { return nil, errors.Trace(err) } go func(m *Metadata) { timeout := time.NewTicker(sconfig.Metadata.RefreshFrequency) for { select { case <-timeout.C: err := m.RefreshMetadata() if err != nil { log.Warnf("timeout refresh metadata err : %s", err) } case <-m.closeCh: timeout.Stop() return } } }(metadata) return metadata, nil }
func (m *Metadata) RefreshMetadata() error { queueConfigs := make(map[string]QueueConfig) err := m.manager.RefreshMetadata() if err != nil { return errors.Trace(err) } queues, _, err := m.zkClient.Children(m.queuePath) if err != nil { return errors.Trace(err) } for _, queue := range queues { _, stat, err := m.zkClient.Get(m.buildQueuePath(queue)) if err != nil { log.Errorf("refresh err : %s", err) return errors.Trace(err) } exist, err := m.manager.ExistTopic(queue) if err != nil { log.Errorf("refresh err : %s", err) return errors.Trace(err) } if !exist { log.Errorf("queue : %q has metadata, but has no topic") continue } queueConfigs[queue] = QueueConfig{ Queue: queue, Ctime: stat.Ctime / 1e3, Length: 0, Groups: make(map[string]GroupConfig), } } groupKeys, _, err := m.zkClient.Children(m.groupConfigPath) if err != nil { return errors.Trace(err) } for _, groupKey := range groupKeys { tokens := strings.Split(groupKey, ".") if len(tokens) != 2 { continue } queueName, groupName := tokens[1], tokens[0] queue, ok := queueConfigs[queueName] if !ok { continue } groupDataPath := fmt.Sprintf("%s/%s", m.groupConfigPath, groupKey) groupData, _, err := m.zkClient.Get(groupDataPath) if err != nil { log.Warnf("get %s err: %s", groupDataPath, err) continue } groupConfig := GroupConfig{} err = json.Unmarshal(groupData, &groupConfig) if err != nil { log.Warnf("Unmarshal %s data err: %s", groupDataPath, err) continue } queue.Groups[groupName] = groupConfig } m.mu.Lock() m.queueConfigs = queueConfigs m.mu.Unlock() return nil }