// match a metric with rules, and return matched rules. // Details: // 1. If no rules matched, return false. // 2. If any black patterns matched, return false. // 3. Else, return true and matched rules. func (d *Detector) match(m *models.Metric) (bool, []*models.Rule) { // Check rules. timer := util.NewTimer() // Filter timer rules := d.flt.MatchedRules(m) elapsed := timer.Elapsed() health.AddFilterCost(elapsed) if len(rules) == 0 { // Hit no rules. return false, rules } // Check blacklist. for _, p := range d.cfg.Detector.BlackList { ok, err := filepath.Match(p, m.Name) if err != nil { // Invalid black pattern. log.Errorf("invalid black pattern: %s, %v", p, err) continue } if ok { // Hit black pattern. log.Debugf("%s hit black pattern %s", m.Name, p) return false, rules } } return true, rules // OK }
// Process the input metric. // // 1. Match metric with rules. // 2. Detect the metric with matched rules. // func (d *Detector) process(m *models.Metric) { health.IncrNumMetricIncomed(1) timer := util.NewTimer() // Match ok, rules := d.match(m) if !ok { // Not matched. return } // Detect err := d.detect(m, rules) if err != nil { log.Error("detect: %v, skipping..", err) return } health.IncrNumMetricDetected(1) // Output if len(m.TestedRules) > 0 { // Test ok. d.output(m) } // Time end. elapsed := timer.Elapsed() if elapsed > timeout { log.Warn("detection is slow: %.2fms", elapsed) } health.AddDetectionCost(elapsed) }
// process the input metric. // Steps: // 1. Match metric with all rules. // 2. Detect the metric with matched rules. // 3. Output detection results to receivers. func (d *Detector) process(m *models.Metric, shouldAdjustIdle bool) { health.IncrNumMetricIncomed(1) timer := util.NewTimer() // Detection cost timer // Match ok, rules := d.match(m) if !ok { return } if shouldAdjustIdle { d.adjustIdleM(m, rules) } // Detect evs, err := d.detect(m, rules) if err != nil { log.Errorf("detect: %v, skipping..", err) return } health.IncrNumMetricDetected(1) // Output for _, ev := range evs { d.output(ev) } // Time end. elapsed := timer.Elapsed() if elapsed > float64(d.cfg.Detector.WarningTimeout) { log.Warnf("detection is slow: %.2fms", elapsed) } health.AddDetectionCost(elapsed) }
// Get history values for the input metric, will only fetch the history // values with the same phase around this timestamp, within an filter // offset. func (d *Detector) values(m *models.Metric, fz bool) ([]float64, error) { timer := util.NewTimer() defer func() { elapsed := timer.Elapsed() health.AddQueryCost(elapsed) }() offset := uint32(d.cfg.Detector.FilterOffset * float64(d.cfg.Period)) expiration := d.cfg.Expiration period := d.cfg.Period ftimes := d.cfg.Detector.FilterTimes // Get values with the same phase. n := 0 // number of goroutines to luanch ch := make(chan metricGetResult) var stamp uint32 if d.cfg.Detector.UsingRecentDataPoints { stamp = m.Stamp } else { stamp = m.Stamp - period } for ; stamp+expiration > m.Stamp; stamp -= period { start := stamp - offset stop := stamp + offset // Range (m.Stamp,m.Stamp+offset) has no data as it is the future if stamp == m.Stamp { stop = m.Stamp } go func() { ms, err := d.db.Metric.Get(m.Name, m.Link, start, stop) ch <- metricGetResult{err, ms, start, stop} }() n++ if n >= ftimes { break } } // Concat chunks. var vals []float64 var err error for i := 0; i < n; i++ { r := <-ch if r.err != nil { // Record error but DONOT return directly. // Must receive n times from ch, otherwise the goroutine will // be hanged and the ch won't be gc, yet memory leaks. err = r.err continue } if err != nil { continue } // Append to values. if !fz { for j := 0; j < len(r.ms); j++ { vals = append(vals, r.ms[j].Value) } } else { // Fill blank with zeros. vals = append(vals, d.fill0(r.ms, r.start, r.stop)...) } } return vals, err }
// Get history values for the input metric, will only fetch the history // values with the same phase around this timestamp, within an filter // offset. func (d *Detector) values(m *models.Metric, fz bool) ([]float64, error) { timer := util.NewTimer() defer func() { elapsed := timer.Elapsed() health.AddQueryCost(elapsed) }() offset := uint32(d.cfg.Detector.FilterOffset * float64(d.cfg.Period)) expiration := d.cfg.Expiration period := d.cfg.Period ftimes := d.cfg.Detector.FilterTimes k := int(math.Ceil(float64(expiration) / float64(period))) s := rand.Perm(k)[:ftimes] mp := make(map[int]bool, ftimes) for _, v := range s { mp[v] = true } // Get values with the same phase. n := 0 // number of goroutines to luanch i := 0 // index of the following loop ch := make(chan metricGetResult) for stamp := m.Stamp; stamp+expiration > m.Stamp; stamp -= period { start := stamp - offset stop := stamp + offset if mp[i] { go func() { ms, err := d.db.Metric.Get(m.Name, start, stop) ch <- metricGetResult{err, ms, start, stop} }() n++ } i++ } // Concat chunks. var vals []float64 var err error for i := 0; i < n; i++ { r := <-ch if r.err != nil { // Record error but DONOT return directly. // Must receive n times from ch, otherwise the goroutine will // be hanged and the ch won't be gc, yet memory leaks. err = r.err continue } if err != nil { continue } // Append to values. if !fz { for j := 0; j < len(r.ms); j++ { vals = append(vals, r.ms[j].Value) } } else { // Fill blank with zeros. vals = append(vals, d.fill0(r.ms, r.start, r.stop)...) } } if err != nil { // Unexcepted error return vals, err } // Append m vals = append(vals, m.Value) return vals, nil }