// findUnindexedMetrics scours the metric membership index for each given Metric // in the keyspace and returns a map of Fingerprint-Metric pairs that are // absent. func (l *LevelDBMetricPersistence) findUnindexedMetrics(candidates map[model.Fingerprint]model.Metric) (unindexed map[model.Fingerprint]model.Metric, err error) { begin := time.Now() defer func() { duration := time.Since(begin) recordOutcome(duration, err, map[string]string{operation: findUnindexedMetrics, result: success}, map[string]string{operation: findUnindexedMetrics, result: failure}) }() unindexed = make(map[model.Fingerprint]model.Metric) // Determine which metrics are unknown in the database. for fingerprint, metric := range candidates { var ( dto = model.MetricToDTO(metric) indexHas, err = l.hasIndexMetric(dto) ) if err != nil { panic(err) } if !indexHas { unindexed[fingerprint] = metric } } return }
// indexFingerprints updates all of the Fingerprint to Metric reverse lookups // in the index and then bulk updates. // // This operation is idempotent. func (l *LevelDBMetricPersistence) indexFingerprints(metrics map[model.Fingerprint]model.Metric) (err error) { begin := time.Now() defer func() { duration := time.Since(begin) recordOutcome(duration, err, map[string]string{operation: indexFingerprints, result: success}, map[string]string{operation: indexFingerprints, result: failure}) }() batch := leveldb.NewBatch() defer batch.Close() for fingerprint, metric := range metrics { key := coding.NewProtocolBuffer(fingerprint.ToDTO()) value := coding.NewProtocolBuffer(model.MetricToDTO(metric)) batch.Put(key, value) } err = l.fingerprintToMetrics.Commit(batch) if err != nil { panic(err) } return }
// indexMetrics takes groups of samples, determines which ones contain metrics // that are unknown to the storage stack, and then proceeds to update all // affected indices. func (l *LevelDBMetricPersistence) indexMetrics(fingerprints map[model.Fingerprint]model.Metric) (err error) { begin := time.Now() defer func() { duration := time.Since(begin) recordOutcome(duration, err, map[string]string{operation: indexMetrics, result: success}, map[string]string{operation: indexMetrics, result: failure}) }() var ( absentMetrics map[model.Fingerprint]model.Metric ) absentMetrics, err = l.findUnindexedMetrics(fingerprints) if err != nil { panic(err) } if len(absentMetrics) == 0 { return } // TODO: For the missing fingerprints, determine what label names and pairs // are absent and act accordingly and append fingerprints. var ( doneBuildingLabelNameIndex = make(chan error) doneBuildingLabelPairIndex = make(chan error) doneBuildingFingerprintIndex = make(chan error) ) go func() { doneBuildingLabelNameIndex <- l.indexLabelNames(absentMetrics) }() go func() { doneBuildingLabelPairIndex <- l.indexLabelPairs(absentMetrics) }() go func() { doneBuildingFingerprintIndex <- l.indexFingerprints(absentMetrics) }() makeTopLevelIndex := true err = <-doneBuildingLabelNameIndex if err != nil { panic(err) makeTopLevelIndex = false } err = <-doneBuildingLabelPairIndex if err != nil { panic(err) makeTopLevelIndex = false } err = <-doneBuildingFingerprintIndex if err != nil { panic(err) makeTopLevelIndex = false } // If any of the preceding operations failed, we will have inconsistent // indices. Thusly, the Metric membership index should NOT be updated, as // its state is used to determine whether to bulk update the other indices. // Given that those operations are idempotent, it is OK to repeat them; // however, it will consume considerable amounts of time. if makeTopLevelIndex { batch := leveldb.NewBatch() defer batch.Close() // WART: We should probably encode simple fingerprints. for _, metric := range absentMetrics { key := coding.NewProtocolBuffer(model.MetricToDTO(metric)) batch.Put(key, key) } err := l.metricMembershipIndex.Commit(batch) if err != nil { panic(err) // Not critical. log.Println(err) } } return }