func init() { file, _ := os.Open("conf/app.json") defer file.Close() decoder := json.NewDecoder(file) AppConf = App{} err := decoder.Decode(&AppConf) if err != nil { ara.Logger().Debug("error: %v", err) } ara.Logger().Debug("%v", AppConf) }
func (c *Controller) initKc() (err error) { ara.Logger().Debug("init kafka client") conn := c.getKafkaConn() c.kc, err = kfk.NewClient(conn, kfk.NewConfig()) if err != nil { ara.Logger().Debug(err.Error()) return } c.kc.Config().ClientID = "jackdaw" ara.Logger().Debug("got a kafka client, closed: %v", c.kc.Closed()) return }
// init zookeeper connection func (c *Controller) initZc() (err error) { ara.Logger().Debug("init zookeeper client") if c.zc != nil && c.zc.State() != zk.StateHasSession { c.zc.Close() } c.zc, _, err = zk.Connect(utils.AppConf.Zookeepers, time.Second, c.loggerOption) if err != nil { return } ara.Logger().Debug("new zookeeper client state: %v", c.zc.State()) return }
func (c *Controller) getLogSize(topic string, pid int32) int64 { // from kafka if c.kc == nil || c.kc.Closed() { err := c.initKc() if err != nil { // TODO send a err message ara.Logger().Debug(err.Error()) panic(err) } } latestOffset, err := c.kc.GetOffset(topic, pid, kfk.OffsetNewest) if err != nil { // TODO send a err message ara.Logger().Debug(err.Error()) panic(err) } ara.Logger().Debug("last offset is: %d", latestOffset) return latestOffset }
// close zookeeper and kafka connection while shutdown func (c *Controller) Release() { if c.kc != nil { err := c.kc.Close() if err != nil { ara.Logger().Debug("close kafka connection failed: %s", err.Error()) } } if c.zc != nil { c.zc.Close() } }
func (c *Controller) ListTopics(w http.ResponseWriter, r *http.Request) { path := "/brokers/topics" topics, err := c.lsChildren(path) if err != nil { utils.WriteError(w, err) return } resp := make(map[string]string, len(topics)) for _, topic := range topics { resp[topic] = "/uri/here?abc=123" } encoder := json.NewEncoder(w) err = encoder.Encode(resp) // b, err := json.Marshal(topics) // if err != nil { // fmt.Println("error:", err) // } // w.Write(b) // from kafka client, err := kfk.NewClient([]string{"127.0.0.1:9092"}, kfk.NewConfig()) if err != nil { panic(err) } defer client.Close() client.Config().ClientID = "jackdaw" latestOffset, err := client.GetOffset("tpk001", 0, kfk.OffsetNewest) if err != nil { panic(err) } ara.Logger().Debug("$$$$$$: %d", latestOffset) // bts, stat, ch, err := zc.GetW(path) // if err != nil { // panic(err) // } // fmt.Printf("%s *** %+v\n", string(bts), stat) // e := <-ch // fmt.Printf("--- %+v\n", e) // if e.Type == zk.EventNodeDataChanged { // watchData(zc) // } }
// request url: /brokers // json from zk looks like: //get /brokers/ids/0 //{"jmx_port":-1,"timestamp":"1446347718036","host":"U","version":1,"port":9092} func (c *Controller) ListBrokers(w http.ResponseWriter, r *http.Request) { resp, err := c.getBrokers() if err != nil { utils.WriteError(w, err) return } encoder := json.NewEncoder(w) err = encoder.Encode(resp) ara.Logger().Debug("%v", resp) // b, err := json.Marshal(topics) // if err != nil { // fmt.Println("error:", err) // } // w.Write(b) }
func (c *Controller) getKafkaConn() []string { brokerMap, err := c.getBrokers() if err != nil { return nil } conn := make([]string, len(brokerMap)) for _, brokerJson := range brokerMap { var b Broker err := json.Unmarshal([]byte(brokerJson), &b) if err != nil { ara.Logger().Debug("error: %v", err) continue } conn = append(conn, b.Host+":"+strconv.Itoa(b.Port)) } return conn }
// implement interface zk.Conn.Logger // TODO output not right: /controller.go:69: Authenticated: id=[94863294880088115 6000], timeout=%!d(MISSING) func (l appLogger) Printf(s string, v ...interface{}) { ara.Logger().Debug(s, v) }
func (c *Controller) ListGroups(w http.ResponseWriter, r *http.Request) { // ls /consumers //[console-consumer-40820] gPath := "/consumers" groups, err := c.lsChildren(gPath) if err != nil { utils.WriteError(w, err) return } ara.Logger().Debug("groups: %v", groups) // return 404 if no group found(that means no consumer exists) if len(groups) <= 0 { ara.Logger().Debug("return 404") utils.Write404(w) return } // map[groupName]map[topicName]map[partitionId]map[string]string (last map is offset, log size and lag etc.) resp := make(map[string]map[string]map[string]map[string]string, len(groups)) for _, g := range groups { //ls /consumers/console-consumer-40820/offsets //[mytopic] topicsPath := path.Join(gPath, g, "offsets") topics, err := c.lsChildren(topicsPath) if err != nil { utils.WriteError(w, err) return } // init topic map topicMap := make(map[string]map[string]map[string]string) for _, topic := range topics { //ls /consumers/console-consumer-40820/offsets/mytopic //[0, 1, 2] partitionPath := path.Join(topicsPath, topic) partitions, err := c.lsChildren(partitionPath) if err != nil { utils.WriteError(w, err) return } // init partition map pMap := make(map[string]map[string]string) for _, pidStr := range partitions { //get /consumers/console-consumer-40820/offsets/mytopic/0 //2 offsetPath := path.Join(partitionPath, pidStr) offsetStr, err := c.getChildren(offsetPath) if err != nil { utils.WriteError(w, err) return } pid64, err := strconv.ParseInt(pidStr, 10, 32) if err != nil { utils.WriteError(w, err) return } pid := int32(pid64) offset, err := strconv.ParseInt(offsetStr, 10, 64) logSize := c.getLogSize(topic, pid) lag := logSize - offset pDataMap := make(map[string]string) pDataMap["offset"] = offsetStr pDataMap["logSize"] = strconv.FormatInt(logSize, 10) pDataMap["lag"] = strconv.FormatInt(lag, 10) pMap[pidStr] = pDataMap } topicMap[topic] = pMap } resp[g] = topicMap } encoder := json.NewEncoder(w) err = encoder.Encode(resp) if err != nil { ara.Logger().Debug(err.Error()) } // b, err := json.Marshal(topics) // if err != nil { // fmt.Println("error:", err) // } // w.Write(b) // bts, stat, ch, err := zc.GetW(path) // if err != nil { // panic(err) // } // fmt.Printf("%s *** %+v\n", string(bts), stat) // e := <-ch // fmt.Printf("--- %+v\n", e) // if e.Type == zk.EventNodeDataChanged { // watchData(zc) // } }