func assertNonMatchedMetrics(nonMatchingMetrics []string) { It("should be properly counted", func() { filter.UpdateProcessingMetrics() Expect(int(filter.TotalMetricsReceived.Count())).To(Equal(len(nonMatchingMetrics))) Expect(int(filter.ValidMetricsReceived.Count())).To(Equal(len(nonMatchingMetrics))) Expect(int(filter.MatchingMetricsReceived.Count())).To(Equal(0)) }) It("should not appear in cache", func() { c := db.Pool.Get() defer c.Close() for _, metric := range nonMatchingMetrics { metricDbKey := filter.GetMetricDbKey(metric) count, err := redis.Int(c.Do("ZCOUNT", metricDbKey, "-inf", "+inf")) Expect(err).ShouldNot(HaveOccurred()) Expect(count).To(Equal(0)) retentionDbKey := filter.GetMetricRetentionDbKey(metric) result, err := c.Do("GET", retentionDbKey) Expect(err).ShouldNot(HaveOccurred()) Expect(result).To(BeNil()) } }) }
func serve(l net.Listener) { var wg sync.WaitGroup ch := make(chan *filter.MatchedMetric, 10) wg.Add(1) go func() { defer wg.Done() cache.Save(ch, func(buffer []*filter.MatchedMetric) { if err := cache.SavePoints(buffer, db); err != nil { log.Printf("failed to save value in cache: %s", err) } }) }() go func() { for { filter.UpdateProcessingMetrics() time.Sleep(time.Second) } }() for { conn, err := l.Accept() if err != nil { if goagain.IsErrClosing(err) { break } log.Printf("failed to accept connection: %s", err.Error()) continue } go handleConnection(conn, ch) } close(ch) wg.Wait() }
func assertMatchedMetrics(matchingMetrics []string) { It("should be properly counted", func() { filter.UpdateProcessingMetrics() Expect(int(filter.TotalMetricsReceived.Count())).To(Equal(len(matchingMetrics))) Expect(int(filter.ValidMetricsReceived.Count())).To(Equal(len(matchingMetrics))) Expect(int(filter.MatchingMetricsReceived.Count())).To(Equal(len(matchingMetrics))) }) It("should appear in cache", func() { c := db.Pool.Get() defer c.Close() for _, metric := range matchingMetrics { dbKey := filter.GetMetricDbKey(metric) count, err := redis.Int(c.Do("ZCOUNT", dbKey, "-inf", "+inf")) Expect(err).ShouldNot(HaveOccurred()) Expect(count).To(Equal(1)) } }) It("should have correct retention", func() { c := db.Pool.Get() defer c.Close() for _, metric := range matchingMetrics { retention := 120 if strings.HasPrefix(metric, "Simple") { retention = 60 } else if strings.HasSuffix(metric, "suf") { retention = 1200 } dbKey := filter.GetMetricRetentionDbKey(metric) result, err := redis.Int(c.Do("GET", dbKey)) Expect(err).ShouldNot(HaveOccurred()) Expect(result).To(Equal(retention)) } }) It("should have timestamp rounded to nearest retention", func() { c := db.Pool.Get() defer c.Close() for _, metric := range matchingMetrics { timestamp := "1234567920" if strings.HasPrefix(metric, "Simple") { timestamp = "1234567920" } else if strings.HasSuffix(metric, "suf") { timestamp = "1234568400" } dbKey := filter.GetMetricDbKey(metric) values, err := redis.Strings(c.Do("ZRANGE", dbKey, 0, -1, "WITHSCORES")) Expect(err).ShouldNot(HaveOccurred()) Expect(len(values)).To(Equal(2)) Expect(values[1]).To(Equal(timestamp)) } }) }
patterns = filter.NewPatternStorage() patterns.DoRefresh(db) cache = &filter.CacheStorage{} cache.BuildRetentions(bufio.NewScanner(strings.NewReader(testRetentions))) }) Context("When invalid metric arrives", func() { BeforeEach(func() { for _, metric := range invalidRawMetrics { process(metric) } }) It("should be properly counted", func() { filter.UpdateProcessingMetrics() Expect(int(filter.TotalMetricsReceived.Count())).To(Equal(len(invalidRawMetrics))) Expect(int(filter.ValidMetricsReceived.Count())).To(Equal(0)) Expect(int(filter.MatchingMetricsReceived.Count())).To(Equal(0)) }) }) Context("When valid non-matching metric arrives", func() { Context("When metric arrives without timestamp", func() { BeforeEach(func() { for _, metric := range nonMatchingMetrics { process(metric + " 12") } }) assertNonMatchedMetrics(nonMatchingMetrics)