func checkPointBatcherStats(t *testing.T, b *tsdb.PointBatcher, batchTotal, pointTotal, sizeTotal, timeoutTotal int) { stats := b.Stats() if batchTotal != -1 && stats.BatchTotal != uint64(batchTotal) { t.Errorf("batch total stat is incorrect: %d", stats.BatchTotal) } if pointTotal != -1 && stats.PointTotal != uint64(pointTotal) { t.Errorf("point total stat is incorrect: %d", stats.PointTotal) } if sizeTotal != -1 && stats.SizeTotal != uint64(sizeTotal) { t.Errorf("size total stat is incorrect: %d", stats.SizeTotal) } if timeoutTotal != -1 && stats.TimeoutTotal != uint64(timeoutTotal) { t.Errorf("timeout total stat is incorrect: %d", stats.TimeoutTotal) } }
// processBatches continually drains the given batcher and writes the batches to the database. func (s *Service) processBatches(batcher *tsdb.PointBatcher) { defer s.wg.Done() for { select { case batch := <-batcher.Out(): if err := s.PointsWriter.WritePoints(&cluster.WritePointsRequest{ Database: s.database, RetentionPolicy: "", ConsistencyLevel: s.consistencyLevel, Points: batch, }); err != nil { s.logger.Printf("failed to write point batch to database %q: %s", s.database, err) } case <-s.done: return } } }
// processBatches continually drains the given batcher and writes the batches to the database. func (s *Service) processBatches(batcher *tsdb.PointBatcher) { defer s.wg.Done() for { select { case batch := <-batcher.Out(): if err := s.PointsWriter.WritePoints(&cluster.WritePointsRequest{ Database: s.database, RetentionPolicy: "", ConsistencyLevel: s.consistencyLevel, Points: batch, }); err == nil { s.statMap.Add(statBatchesTrasmitted, 1) s.statMap.Add(statPointsTransmitted, int64(len(batch))) } else { s.logger.Printf("failed to write point batch to database %q: %s", s.database, err) s.statMap.Add(statBatchesTransmitFail, 1) } case <-s.done: return } } }