func (t *TsdbPipeLine) watcher(c *Client) { defer c.wg.Done() if c.getStatus() == DISCONNECT { t.hosts <- c.url return } for { read := make([]byte, 1024) _, err := c.conn.Read(read) if t.isClosed() { return } if err != nil { logger.Printf("read from tsdb server error: %v => %v, close it", c.conn, err) c.conn.Close() c.setDisconnect() t.hosts <- c.url break } else { logger.Printf("receive from tsdb server: %s => %s", c.url, string(read)) } } }
func NewSub() *Subscriber { opts := nats.DefaultOptions for _, v := range strings.Split(config.NatsServers, ",") { opts.Servers = append(opts.Servers, fmt.Sprintf("nats://%s", v)) } opts.MaxReconnect = -1 opts.ReconnectWait = 5 * time.Second opts.PingInterval = 15 * time.Second nc, err := opts.Connect() if err != nil { logger.Printf("Can't connect: %v\n", err) } nc.Opts.DisconnectedCB = func(_ *nats.Conn) { logger.Printf("Got disconnected! %v\n", nc.LastError()) } nc.Opts.ReconnectedCB = func(nc *nats.Conn) { logger.Printf("Got reconnected to %v!\n", nc.ConnectedUrl()) } nc.Opts.ClosedCB = func(nc *nats.Conn) { logger.Printf("Nats connection closed!! err: %+v\n", nc.IsClosed()) os.Exit(1) } return &Subscriber{ NatsConn: nc, Group: config.SubGroupName, Topic: config.SubTopicName, } }
func getDomainPath() <-chan DomainBindInfo { var domainBindChan chan DomainBindInfo = make(chan DomainBindInfo) var resp *http.Response var err error go func() { for { resp, err = http.Get(config.DomainBindUrl) if err != nil { logger.Printf("get domain bindings failed with url: %s", config.DomainBindUrl) time.Sleep(time.Second) continue } body, err := ioutil.ReadAll(resp.Body) if err == nil { dp := DomainBindInfo{} err = json.Unmarshal(body, &dp) if err != nil { logger.Printf("unmarshal domain binding json data failed: %+v\n", err.Error()) } else { domainBindChan <- dp time.Sleep(time.Second * 15) } } else { time.Sleep(time.Second) } } }() return domainBindChan }
func (p *Producer) PublishToKafka() { var ( wg sync.WaitGroup successes, errors int ) wg.Add(1) go func() { defer wg.Done() for _ = range p.Worker.Successes() { successes++ } }() wg.Add(1) go func() { defer wg.Done() for err := range p.Worker.Errors() { logger.Printf("error occured when send message to kafka: %s", err) errors++ } }() for { select { case m := <-p.MessageChan: go p.sendMsg(m) } } }
func (t *TsdbPipeLine) newConn(url string) *Client { c := &Client{} t.RLock() client, ok := t.pool[url] t.RUnlock() if ok { client.wg.Wait() c = client if past := time.Since(c.lastAttempt); past < t.Opts.ReconnectWait { time.Sleep(t.Opts.ReconnectWait - past) } } else { c.url = url c.setDisconnect() } conn, err := net.DialTimeout("tcp", url, t.Opts.ConnectTimeout) if err == nil { logger.Printf("connect to tsdb server: %v ok!\n", url) c.conn = conn c.setConnected() } c.lastAttempt = time.Now() t.Lock() defer t.Unlock() t.pool[url] = c return c }
func (r *Raper) createMetricTags(file string) { readLoad := make(chan struct{}, 1) readLoad <- struct{}{} go func() { for { fi, err := os.Stat(file) if err == nil { modTime := fi.ModTime() if time.Since(modTime) < time.Duration(30)*time.Second { readLoad <- struct{}{} } } time.Sleep(time.Duration(25) * time.Second) } }() for { select { case <-readLoad: var out []AnalyzeTemplate var defaultLen int = 512 var content []byte readonce := make([]byte, defaultLen) f, err := os.Open(file) if err == nil { for { length, err := f.Read(readonce) content = append(content, readonce[0:length]...) if err == io.EOF { break } } err = yaml.Unmarshal(content, &out) if err != nil { logger.Errorf("%s is illegal yaml format! %+v", content, err) } } if err != nil { time.Sleep(3 * time.Second) break } logger.Printf("load yaml file ok, result: %#v\n", out) r.mutexTags.Lock() for _, value := range out { r.totalDomainTags[value.Domain] = value.Tags } r.mutexTags.Unlock() case <-r.Dying(): return } } }
func NewPub() *Producer { kafkaservers := strings.Split(config.KafkaServers, ",") conf := sarama.NewConfig() conf.Producer.Return.Successes = true work, err := sarama.NewAsyncProducer(kafkaservers, conf) if err != nil { logger.Printf("Can't connet to kafka broker: %s", err) } return &Producer{ Worker: work, MessageChan: make(chan []byte, 4096), } }
func (t *TsdbPipeLine) upload(c *Client) { defer c.wg.Done() if c.getStatus() == DISCONNECT { return } for { select { case s := <-t.MessageChan: if c.getStatus() == DISCONNECT { t.MessageChan <- s return } _, err := c.conn.Write([]byte(s)) if err != nil { t.MessageChan <- s logger.Printf("write:%s to tsdb:%s error:%#v\n", s, c.url, err) } } } }
func (r *Raper) createAndUploadData(domain string, pathList *domainPathList, dataInfo *specifiledDataInfo) { hostName, _ := os.Hostname() var conftags []string for { select { case <-time.After(time.Duration(dataInfo.sTime-utils.CurrentMilliSecond()+int64(config.UploadFrequency)*1000) * time.Millisecond): dataInfo.mutex.Lock() tsdbMap := dataInfo.data dataInfo.data = make(map[string]*TsdbPutInfo) dataInfo.sTime, _ = utils.CurrentStamp(config.UploadFrequency) dataInfo.mutex.Unlock() pathList.rmux.Lock() countMap := pathList.data pathList.data = make(map[string]int) pathList.rmux.Unlock() finalTsdbMap := make(domainMetricTsdbMap) paths := utils.UTF8Filter(AggregationPath(countMap)) r.mutexBindPath.RLock() domainBinds := r.domainBindPath[domain] r.mutexBindPath.RUnlock() paths = utils.AppendListToList(domainBinds, paths) conftags = r.getDomainTags(domain) var total float64 for key, tsdb := range tsdbMap { oldPath := tsdb.Tags["path"] tsdb.Tags["path"], _ = utils.FindConfigPath(oldPath, paths) _, newKey := utils.TagsToKey(conftags, tsdb.Tags) parts := strings.Split(key, "|") lastPart := parts[len(parts)-1] finalKey := newKey + "|" + lastPart if lastPart == config.UrlCodeMetric { total += tsdb.Value } if _, ok := finalTsdbMap[finalKey]; ok { finalTsdbMap[finalKey].Value += tsdbMap[key].Value } else { finalTsdbMap[finalKey] = tsdbMap[key] } } logger.Printf("domain: %s, total: %d\n", domain, int64(total)) go func() { for _, v := range finalTsdbMap { var data string intPart, frac := math.Modf(v.Value) if frac == 0 { data = fmt.Sprintf("put %s %d %d consumer=%s", v.Metric, v.TimeStamp, int64(intPart), hostName) } else { data = fmt.Sprintf("put %s %d %.2f consumer=%s", v.Metric, v.TimeStamp, v.Value, hostName) } for key, value := range v.Tags { data += fmt.Sprintf(" %s=%s", key, value) } // put test.url.code 1440492540000 2000 domain=api.wandoujia.com path=/sre-test code=200 data += "\n" r.tsdb.MessageChan <- data } }() case <-r.Dying(): return } } }
func (r *Raper) distDomainMertic() { r.domainsChanMap = make(map[string]chan domainMetricTsdbMap) for { select { case AD := <-r.aggreMessageChan: domain, data := AD.domain, AD.data dataInfo := make(domainMetricTsdbMap) r.mutexDomains.Lock() if !utils.StrInStrings(domain, r.domains) { r.domains = append(r.domains, domain) } r.mutexDomains.Unlock() key, tags := "", make(map[string]string) configTags := r.getDomainTags(domain) tags, key = utils.TagsToKey(configTags, data) length, err := utils.ParseFloat64(data["length"]) if err != nil { logger.Printf("parse length error: %+v\n", data) break } requestTime, err := utils.ParseFloat64(data["reqtime"]) if err != nil { logger.Printf("request time parse error: %+v", data) requestTime = 0 } upstreamTime, err := utils.ParseFloat64(data["upstream_resptime"]) if err != nil { upstreamTime = requestTime } /* code 408 means server wait for client request timeout, so ignore here. */ if c := data["code"]; c == "408" || c == "499" && requestTime < config.Code499Timeout { break } for _, v := range config.TotalUrlMetric { k := key + "|" + v if _, ok := dataInfo[k]; !ok { dataInfo[k] = &TsdbPutInfo{ Metric: v, Tags: tags, } } } dataInfo[key+"|"+config.UrlQpsMetric].Value += 1 / float64(config.UploadFrequency) dataInfo[key+"|"+config.UrlCodeMetric].Value += 1 dataInfo[key+"|"+config.UrlUpstreamMetric].Value += upstreamTime * 1000 dataInfo[key+"|"+config.UrlTimeMetric].Value += requestTime * 1000 dataInfo[key+"|"+config.UrlTrafficMetric].Value += length / float64(config.UploadFrequency) var channel chan domainMetricTsdbMap r.mutexDomainMap.RLock() _, ok := r.domainsChanMap[domain] r.mutexDomainMap.RUnlock() if !ok { r.mutexDomainMap.Lock() _, ok := r.domainsChanMap[domain] if !ok { r.domainsChanMap[domain] = make(chan domainMetricTsdbMap) go r.updateDomainMetric(domain, r.domainsChanMap[domain]) } channel = r.domainsChanMap[domain] r.mutexDomainMap.Unlock() } else { r.mutexDomainMap.RLock() channel = r.domainsChanMap[domain] r.mutexDomainMap.RUnlock() } channel <- dataInfo case <-r.Dying(): return } } }
func (r *Raper) analyzeMessage(JsonObj sub.LogJsonObj) { dataMap := make(map[string]string) var method, protocol, path, code, length string line := strings.SplitN(JsonObj.LineText, "\"", 3) if len(line) < 3 { logger.Printf("illegal line:%+v from:%+v", JsonObj.LineText, JsonObj.HostName) return } right := strings.Replace(line[2], "\"", "", -1) logs := strings.Split(right, " ") logs = utils.TripStringsBlank(logs) middle := strings.Split(line[1], " ") if len(logs) < 2 { logger.Printf("illegal right:%+v from:%+v", JsonObj.LineText, JsonObj.HostName) return } else if len(middle) < 3 { logger.Printf("illegal middle:%+v from:%+v", JsonObj.LineText, JsonObj.HostName) return } method, protocol = middle[0], middle[len(middle)-1] if method == "-" { return } path = strings.Split(middle[1], "?")[0] code, length = logs[0], logs[1] //get the http code that bigger than 499 v_code, _ := utils.ParseInt64(code) if v_code >= 499 { v_data, _ := json.Marshal(JsonObj) r.producer.MessageChan <- v_data } for _, leftPart := range logs[2:] { if isKeyValue := strings.Count(leftPart, "="); isKeyValue == 0 { continue } kvPair := strings.Split(leftPart, "=") if kvPair[1] == "-" { continue } dataMap[kvPair[0]] = kvPair[1] } if _, ok := dataMap["host"]; !ok { // logger.Printf("illegal domain:%+v from:%+v:%+v", v.LineText, v.HostName, v.FileName) return } dataMap["method"] = method dataMap["proto"] = protocol dataMap["path"] = path dataMap["code"] = code dataMap["length"] = length dataMap["domain"] = dataMap["host"] dataMap["source"] = JsonObj.HostName delete(dataMap, "host") dataMap = utils.CheckLegalChar(dataMap) r.aggreMessageChan <- &AggregationData{domain: dataMap["domain"], data: dataMap} }