示例#1
0
// Apply implements the Processor interface.
func (p *CompactionProcessor) Apply(sampleIterator leveldb.Iterator, samplesPersistence raw.Persistence, stopAt clientmodel.Timestamp, fingerprint *clientmodel.Fingerprint) (lastCurated clientmodel.Timestamp, err error) {
	var pendingBatch raw.Batch

	defer func() {
		if pendingBatch != nil {
			pendingBatch.Close()
		}
	}()

	var pendingMutations = 0
	var pendingSamples metric.Values
	var unactedSamples metric.Values
	var lastTouchedTime clientmodel.Timestamp
	var keyDropped bool

	sampleKey, _ := p.sampleKeys.Get()
	defer p.sampleKeys.Give(sampleKey)

	sampleKeyDto, _ := p.dtoSampleKeys.Get()
	defer p.dtoSampleKeys.Give(sampleKeyDto)

	if err = sampleIterator.Key(sampleKeyDto); err != nil {
		return
	}

	sampleKey.Load(sampleKeyDto)

	unactedSamples = unmarshalValues(sampleIterator.RawValue(), nil)

	for lastCurated.Before(stopAt) && lastTouchedTime.Before(stopAt) && sampleKey.Fingerprint.Equal(fingerprint) {
		switch {
		// Furnish a new pending batch operation if none is available.
		case pendingBatch == nil:
			pendingBatch = leveldb.NewBatch()

		// If there are no sample values to extract from the datastore, let's
		// continue extracting more values to use.  We know that the time.Before()
		// block would prevent us from going into unsafe territory.
		case len(unactedSamples) == 0:
			if !sampleIterator.Next() {
				return lastCurated, fmt.Errorf("illegal condition: invalid iterator on continuation")
			}

			keyDropped = false

			if err = sampleIterator.Key(sampleKeyDto); err != nil {
				return
			}
			sampleKey.Load(sampleKeyDto)
			if !sampleKey.Fingerprint.Equal(fingerprint) {
				break
			}

			unactedSamples = unmarshalValues(sampleIterator.RawValue(), nil)

		// If the number of pending mutations exceeds the allowed batch amount,
		// commit to disk and delete the batch.  A new one will be recreated if
		// necessary.
		case pendingMutations >= p.maximumMutationPoolBatch:
			err = samplesPersistence.Commit(pendingBatch)
			if err != nil {
				return
			}

			pendingMutations = 0

			pendingBatch.Close()
			pendingBatch = nil

		case len(pendingSamples) == 0 && len(unactedSamples) >= p.minimumGroupSize:
			lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
			unactedSamples = metric.Values{}

		case len(pendingSamples)+len(unactedSamples) < p.minimumGroupSize:
			if !keyDropped {
				k := &dto.SampleKey{}
				sampleKey.Dump(k)
				pendingBatch.Drop(k)

				keyDropped = true
			}
			pendingSamples = append(pendingSamples, unactedSamples...)
			lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
			unactedSamples = metric.Values{}
			pendingMutations++

		// If the number of pending writes equals the target group size
		case len(pendingSamples) == p.minimumGroupSize:
			k := &dto.SampleKey{}
			newSampleKey := buildSampleKey(fingerprint, pendingSamples)
			newSampleKey.Dump(k)
			b := marshalValues(pendingSamples, nil)
			pendingBatch.PutRaw(k, b)

			pendingMutations++
			lastCurated = newSampleKey.FirstTimestamp
			if len(unactedSamples) > 0 {
				if !keyDropped {
					sampleKey.Dump(k)
					pendingBatch.Drop(k)
					keyDropped = true
				}

				if len(unactedSamples) > p.minimumGroupSize {
					pendingSamples = unactedSamples[:p.minimumGroupSize]
					unactedSamples = unactedSamples[p.minimumGroupSize:]
					lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
				} else {
					pendingSamples = unactedSamples
					lastTouchedTime = pendingSamples[len(pendingSamples)-1].Timestamp
					unactedSamples = metric.Values{}
				}
			}

		case len(pendingSamples)+len(unactedSamples) >= p.minimumGroupSize:
			if !keyDropped {
				k := &dto.SampleKey{}
				sampleKey.Dump(k)
				pendingBatch.Drop(k)
				keyDropped = true
			}
			remainder := p.minimumGroupSize - len(pendingSamples)
			pendingSamples = append(pendingSamples, unactedSamples[:remainder]...)
			unactedSamples = unactedSamples[remainder:]
			if len(unactedSamples) == 0 {
				lastTouchedTime = pendingSamples[len(pendingSamples)-1].Timestamp
			} else {
				lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp
			}
			pendingMutations++
		default:
			err = fmt.Errorf("unhandled processing case")
		}
	}

	if len(unactedSamples) > 0 || len(pendingSamples) > 0 {
		pendingSamples = append(pendingSamples, unactedSamples...)
		k := &dto.SampleKey{}
		newSampleKey := buildSampleKey(fingerprint, pendingSamples)
		newSampleKey.Dump(k)
		b := marshalValues(pendingSamples, nil)
		pendingBatch.PutRaw(k, b)
		pendingSamples = metric.Values{}
		pendingMutations++
		lastCurated = newSampleKey.FirstTimestamp
	}

	// This is not deferred due to the off-chance that a pre-existing commit
	// failed.
	if pendingBatch != nil && pendingMutations > 0 {
		err = samplesPersistence.Commit(pendingBatch)
		if err != nil {
			return
		}
	}

	return
}
示例#2
0
// Apply implements the Processor interface.
func (p *DeletionProcessor) Apply(sampleIterator leveldb.Iterator, samplesPersistence raw.Persistence, stopAt clientmodel.Timestamp, fingerprint *clientmodel.Fingerprint) (lastCurated clientmodel.Timestamp, err error) {
	var pendingBatch raw.Batch

	defer func() {
		if pendingBatch != nil {
			pendingBatch.Close()
		}
	}()

	sampleKeyDto, _ := p.dtoSampleKeys.Get()
	defer p.dtoSampleKeys.Give(sampleKeyDto)

	sampleKey, _ := p.sampleKeys.Get()
	defer p.sampleKeys.Give(sampleKey)

	if err = sampleIterator.Key(sampleKeyDto); err != nil {
		return
	}
	sampleKey.Load(sampleKeyDto)

	sampleValues := unmarshalValues(sampleIterator.RawValue(), nil)

	pendingMutations := 0

	for lastCurated.Before(stopAt) && sampleKey.Fingerprint.Equal(fingerprint) {
		switch {
		// Furnish a new pending batch operation if none is available.
		case pendingBatch == nil:
			pendingBatch = leveldb.NewBatch()

		// If there are no sample values to extract from the datastore,
		// let's continue extracting more values to use.  We know that
		// the time.Before() block would prevent us from going into
		// unsafe territory.
		case len(sampleValues) == 0:
			if !sampleIterator.Next() {
				return lastCurated, fmt.Errorf("illegal condition: invalid iterator on continuation")
			}

			if err = sampleIterator.Key(sampleKeyDto); err != nil {
				return
			}
			sampleKey.Load(sampleKeyDto)

			sampleValues = unmarshalValues(sampleIterator.RawValue(), nil)

		// If the number of pending mutations exceeds the allowed batch
		// amount, commit to disk and delete the batch.  A new one will
		// be recreated if necessary.
		case pendingMutations >= p.maximumMutationPoolBatch:
			err = samplesPersistence.Commit(pendingBatch)
			if err != nil {
				return
			}

			pendingMutations = 0

			pendingBatch.Close()
			pendingBatch = nil

		case !sampleKey.MayContain(stopAt):
			k := &dto.SampleKey{}
			sampleKey.Dump(k)
			pendingBatch.Drop(k)
			lastCurated = sampleKey.LastTimestamp
			sampleValues = metric.Values{}
			pendingMutations++

		case sampleKey.MayContain(stopAt):
			k := &dto.SampleKey{}
			sampleKey.Dump(k)
			pendingBatch.Drop(k)
			pendingMutations++

			sampleValues = sampleValues.TruncateBefore(stopAt)
			if len(sampleValues) > 0 {
				k := &dto.SampleKey{}
				sampleKey = buildSampleKey(fingerprint, sampleValues)
				sampleKey.Dump(k)
				lastCurated = sampleKey.FirstTimestamp
				v := marshalValues(sampleValues, nil)
				pendingBatch.PutRaw(k, v)
				pendingMutations++
			} else {
				lastCurated = sampleKey.LastTimestamp
			}

		default:
			err = fmt.Errorf("unhandled processing case")
		}
	}

	// This is not deferred due to the off-chance that a pre-existing commit
	// failed.
	if pendingBatch != nil && pendingMutations > 0 {
		err = samplesPersistence.Commit(pendingBatch)
		if err != nil {
			return
		}
	}

	return
}