func SendMockOnce() int { if !sema.TryAcquire() { return -1 } defer sema.Release() // not enabled if !g.Config().Sender.Enabled { return 0 } start := time.Now().Unix() cnt, _ := sendMock() end := time.Now().Unix() if g.Config().Debug { log.Printf("sender cron, cnt %d, time %ds, start %s", cnt, end-start, ttime.FormatTs(start)) } // statistics g.SenderCronCnt.Incr() g.SenderLastTs.SetCnt(end - start) g.SenderCnt.IncrBy(int64(cnt)) return cnt }
// internal func makeDbConn() (conn *sql.DB, err error) { conn, err = sql.Open("mysql", g.Config().Config.Dsn) if err != nil { return nil, err } conn.SetMaxIdleConns(int(g.Config().Config.MaxIdle)) err = conn.Ping() return conn, err }
func getThreshold() int32 { lock.RLock() defer lock.RUnlock() cfg := g.Config().Sender.Block if avg < 0 { // gauss not inited return cfg.Threshold } // 3-sigma dt_gauss_3sigma_min := cfg.Gauss3SigmaMin dt_gauss_3sigma_max := cfg.Gauss3SigmaMax threeSigma := 3 * dev if threeSigma < dt_gauss_3sigma_min { threeSigma = dt_gauss_3sigma_min } else if threeSigma > dt_gauss_3sigma_max { threeSigma = dt_gauss_3sigma_max } // threshold gaussThreshold := int32(math.Ceil(avg + threeSigma)) if gaussThreshold < cfg.Threshold { gaussThreshold = cfg.Threshold } return gaussThreshold }
func configCommonRoutes() { http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("ok\n")) }) http.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(fmt.Sprintf("%s\n", g.VERSION))) }) http.HandleFunc("/workdir", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(fmt.Sprintf("%s\n", file.SelfDir()))) }) http.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) { RenderDataJson(w, g.Config()) }) http.HandleFunc("/config/reload", func(w http.ResponseWriter, r *http.Request) { if strings.HasPrefix(r.RemoteAddr, "127.0.0.1") { g.ParseConfig(g.ConfigFile) RenderDataJson(w, "ok") } else { RenderDataJson(w, "no privilege") } }) }
func Start() { if !g.Config().Sender.Enabled { log.Println("sender.Start warning, not enabled") return } startGaussCron() log.Println("sender.Start ok") }
func Start() { if !g.Config().Collector.Enabled { log.Println("collector.Start warning, not enabled") return } StartCollectorCron() log.Println("collector.Start ok") }
func startGaussCron() { if !g.Config().Sender.Block.EnableGauss { log.Println("sender.StartGaussCron warning, not enabled") return } // start gauss cron gaussCron.AddFuncCC("40 */20 * * * ?", func() { start := time.Now().Unix() cnt := calcGaussOnce() end := time.Now().Unix() if g.Config().Debug { log.Printf("gause cron, cnt %d, time %ds, start %s\n", cnt, end-start, ttime.FormatTs(start)) } }, 1) gaussCron.Start() log.Println("sender.StartGaussCron ok") }
func Start() { if !g.Config().Config.Enabled { log.Println("config.Start warning, not enabled") return } service.InitDB() StartNdConfigCron() log.Println("config.Start ok") }
func startHttpServer() { if !g.Config().Http.Enabled { return } addr := g.Config().Http.Listen if addr == "" { return } configRoutes() s := &http.Server{ Addr: addr, MaxHeaderBytes: 1 << 30, } log.Println("http.startHttpServer ok, listening", addr) log.Fatalln(s.ListenAndServe()) }
func collectDataOnce() int { keys := config.Keys() keysLen := len(keys) // 并发+同步控制 cfg := g.Config().Collector concurrent := int(cfg.Concurrent) if concurrent < 1 || concurrent > 50 { concurrent = 10 } sema := tsema.NewSemaphore(concurrent) batch := int(cfg.Batch) if batch < 100 || batch > 1000 { batch = 200 //batch不能太小, 否则channel将会很大 } batchCnt := (keysLen + batch - 1) / batch rch := make(chan int, batchCnt+1) i := 0 for i < keysLen { leftLen := keysLen - i fetchSize := batch // 每次处理batch个配置 if leftLen < fetchSize { fetchSize = leftLen } fetchKeys := keys[i : i+fetchSize] // 并发collect数据 sema.Acquire() go func(keys []string, keySize int) { defer sema.Release() size, _ := fetchItemsAndStore(keys, keySize) rch <- size }(fetchKeys, fetchSize) i += fetchSize } collectCnt := 0 for i := 0; i < batchCnt; i++ { select { case cnt := <-rch: collectCnt += cnt } } return collectCnt }
func calcGaussOnce() int { values := fetchRawItems() size := len(values) if size < 100 { return size } // gauss myavg, mydev := gaussDistribution(values) // filter nvals := make([]float64, 0) filter := mydev if filter < g.Config().Sender.Block.GaussFilter { //防止过度的过滤 filter = g.Config().Sender.Block.GaussFilter } for _, val := range values { if (val-myavg) > filter || val-myavg < (-filter) { continue } nvals = append(nvals, val) } if len(nvals) < 100 { return size } // gauss myavg, mydev = gaussDistribution(nvals) lock.Lock() defer lock.Unlock() avg = myavg dev = mydev log.Printf("gause status, avg %f, dev %f\n", avg, dev) return size }
func StartNdConfigCron() { ndconfigCron.AddFuncCC(ndconfigCronSpec, func() { start := time.Now().Unix() cnt, _ := syncNdConfig() end := time.Now().Unix() if g.Config().Debug { log.Printf("config cron, cnt %d, time %ds, start %s\n", cnt, end-start, ttime.FormatTs(start)) } // statistics g.ConfigCronCnt.Incr() g.ConfigLastTs.SetCnt(end - start) g.ConfigLastCnt.SetCnt(int64(cnt)) }, 1) ndconfigCron.Start() }
func StartCollectorCron() { collectorCron.AddFuncCC("*/20 * * * * ?", func() { start := time.Now().Unix() cnt := collectDataOnce() end := time.Now().Unix() if g.Config().Debug { log.Printf("collect cron, cnt %d, time %ds, start %s\n", cnt, end-start, ttime.FormatTs(start)) } // statistics g.CollectorCronCnt.Incr() g.CollectorLastTs.SetCnt(end - start) g.CollectorLastCnt.SetCnt(int64(cnt)) g.CollectorCnt.IncrBy(int64(cnt)) }, 1) collectorCron.Start() }
func StartJudgeCron() { judgeCron.AddFuncCC(judgeCronSpec, func() { start := time.Now().Unix() judge() end := time.Now().Unix() if g.Config().Debug { log.Printf("judge cron, time %ds, start %s\n", end-start, ttime.FormatTs(start)) } // statistics g.JudgeCronCnt.Incr() g.JudgeLastTs.SetCnt(end - start) // trigger sender sender.SendMockOnceAsync() }, 1) judgeCron.Start() }
func sendItemsToTransfer(items []*cmodel.JsonMetaData, size int, httpcliname string, connT, reqT time.Duration) (cnt int, errt error) { if size < 1 { return } cfg := g.Config() transUlr := fmt.Sprintf("http://%s/api/push", cfg.Sender.TransferAddr) hcli := thttpclient.GetHttpClient(httpcliname, connT, reqT) // form request args itemsBody, err := json.Marshal(items) if err != nil { log.Println(transUlr+", format body error,", err) errt = err return } // fetch items req, err := http.NewRequest("POST", transUlr, bytes.NewBuffer(itemsBody)) req.Header.Set("Content-Type", "application/json; charset=UTF-8") req.Header.Set("Connection", "close") postResp, err := hcli.Do(req) if err != nil { log.Println(transUlr+", post to dest error,", err) errt = err return } defer postResp.Body.Close() if postResp.StatusCode/100 != 2 { log.Println(transUlr+", post to dest, bad response,", postResp.Body) errt = fmt.Errorf("request failed, %s", postResp.Body) return } return size, nil }
func sendMock() (cnt int, errt error) { // check nodata flood cfgsize := config.Size() ndsize := MockMap.Size() if cfgsize < 1 { return } rate := int32(100 * ndsize / cfgsize) threshold := getThreshold() if g.Config().Debug { log.Printf("nodata threshold: %d", threshold) } // statistics g.FloodRate.SetCnt(int64(rate)) g.Threshold.SetCnt(int64(threshold)) if g.Config().Sender.Block.Enabled { if rate > threshold || g.Config().Sender.Block.SetBlock { // nodata flooding, blocking log.Printf("nodata blocking: flood rate %d, threshold %d", int(rate), int(threshold)) // statistics g.Blocking.SetCnt(1) // clear send buffer MockMap.Clear() return 0, nil } } // statistics g.Blocking.SetCnt(0) cfg := g.Config().Sender batch := int(cfg.Batch) connTimeout := cfg.ConnectTimeout requTimeout := cfg.RequestTimeout // send mock to transfer mocks := MockMap.Slice() MockMap.Clear() mockSize := len(mocks) i := 0 for i < mockSize { leftLen := mockSize - i sendSize := batch if leftLen < sendSize { sendSize = leftLen } fetchMocks := mocks[i : i+sendSize] i += sendSize items := make([]*cmodel.JsonMetaData, 0) for _, val := range fetchMocks { if val == nil { continue } items = append(items, val.(*cmodel.JsonMetaData)) } cntonce, err := sendItemsToTransfer(items, len(items), "nodata.mock", time.Millisecond*time.Duration(connTimeout), time.Millisecond*time.Duration(requTimeout)) if err == nil { if g.Config().Debug { log.Println("send items:", items) } cnt += cntonce } } return cnt, nil }
func fetchItemsAndStore(fetchKeys []string, fetchSize int) (size int, errt error) { if fetchSize < 1 { return } cfg := g.Config() queryUlr := fmt.Sprintf("http://%s/graph/last", cfg.Query.QueryAddr) hcli := thttpclient.GetHttpClient("nodata.collector", time.Millisecond*time.Duration(cfg.Query.ConnectTimeout), time.Millisecond*time.Duration(cfg.Query.RequestTimeout)) // form request args args := make([]*cmodel.GraphLastParam, 0) for _, key := range fetchKeys { ndcfg, found := config.GetNdConfig(key) if !found { continue } endpoint := ndcfg.Endpoint counter := cutils.Counter(ndcfg.Metric, ndcfg.Tags) arg := &cmodel.GraphLastParam{endpoint, counter} args = append(args, arg) } if len(args) < 1 { return } argsBody, err := json.Marshal(args) if err != nil { log.Println(queryUlr+", format body error,", err) errt = err return } // fetch items req, err := http.NewRequest("POST", queryUlr, bytes.NewBuffer(argsBody)) req.Header.Set("Content-Type", "application/json; charset=UTF-8") req.Header.Set("Connection", "close") postResp, err := hcli.Do(req) if err != nil { log.Println(queryUlr+", post to dest error,", err) errt = err return } defer postResp.Body.Close() if postResp.StatusCode/100 != 2 { log.Println(queryUlr+", post to dest, bad response,", postResp.Body) errt = fmt.Errorf("request failed, %s", postResp.Body) return } body, err := ioutil.ReadAll(postResp.Body) if err != nil { log.Println(queryUlr+", read response error,", err) errt = err return } resp := []*cmodel.GraphLastResp{} err = json.Unmarshal(body, &resp) if err != nil { log.Println(queryUlr+", unmarshal error,", err) errt = err return } // store items fts := time.Now().Unix() for _, glr := range resp { //log.Printf("collect:%v\n", glr) if glr == nil || glr.Value == nil { continue } AddItem(cutils.PK2(glr.Endpoint, glr.Counter), NewDataItem(glr.Value.Timestamp, float64(glr.Value.Value), "OK", fts)) } return len(resp), nil }
func fetchRawItems() (values []float64) { cfg := g.Config() queryUlr := fmt.Sprintf("http://%s/graph/history", cfg.Query.QueryAddr) hcli := thttpclient.GetHttpClient("nodata.gauss", time.Millisecond*time.Duration(cfg.Query.ConnectTimeout), time.Millisecond*time.Duration(cfg.Query.RequestTimeout)) // form request args nowTs := time.Now().Unix() endTs := nowTs - nowTs%1200 startTs := endTs - 24*3600*5 //用5天的数据,做高斯拟合 hostname, _ := os.Hostname() if len(cfg.Sender.Block.Hostname) > 0 { hostname = cfg.Sender.Block.Hostname } fcounter := "FloodRate/module=nodata,pdl=falcon,port=6090,type=statistics" if len(cfg.Sender.Block.FloodCounter) > 0 { fcounter = cfg.Sender.Block.FloodCounter } endpointCounters := make([]cmodel.GraphInfoParam, 0) endpointCounters = append(endpointCounters, cmodel.GraphInfoParam{Endpoint: hostname, Counter: fcounter}) args := GraphHistoryParam{Start: startTs, End: endTs, CF: "AVERAGE", EndpointCounters: endpointCounters} argsBody, err := json.Marshal(args) if err != nil { log.Println(queryUlr+", format body error,", err) return } // fetch items req, err := http.NewRequest("POST", queryUlr, bytes.NewBuffer(argsBody)) req.Header.Set("Content-Type", "application/json; charset=UTF-8") req.Header.Set("Connection", "close") postResp, err := hcli.Do(req) if err != nil { log.Println(queryUlr+", post to dest error,", err) return } defer postResp.Body.Close() if postResp.StatusCode/100 != 2 { log.Println(queryUlr+", post to dest, bad response,", postResp.Body) return } body, err := ioutil.ReadAll(postResp.Body) if err != nil { log.Println(queryUlr+", read response error,", err) return } resp := make([]*cmodel.GraphQueryResponse, 0) err = json.Unmarshal(body, &resp) if err != nil { log.Println(queryUlr+", unmarshal error,", err) return } if len(resp) != 1 || resp[0] == nil { return } // store items values = make([]float64, 0) for _, glr := range resp[0].Values { if glr == nil || math.IsNaN(float64(glr.Value)) { continue } values = append(values, float64(glr.Value)) } return values }