func testDumpToWriter(t test.Tester) {
	type input struct {
		metrics map[string]metrics.Metric
	}

	var scenarios = []struct {
		in  input
		out []byte
	}{
		{
			out: []byte("[]"),
		},
		{
			in: input{
				metrics: map[string]metrics.Metric{
					"foo": metrics.NewCounter(),
				},
			},
			out: []byte("[{\"baseLabels\":{\"label_foo\":\"foo\",\"name\":\"foo\"},\"docstring\":\"metric foo\",\"metric\":{\"type\":\"counter\",\"value\":[]}}]"),
		},
		{
			in: input{
				metrics: map[string]metrics.Metric{
					"foo": metrics.NewCounter(),
					"bar": metrics.NewCounter(),
				},
			},
			out: []byte("[{\"baseLabels\":{\"label_bar\":\"bar\",\"name\":\"bar\"},\"docstring\":\"metric bar\",\"metric\":{\"type\":\"counter\",\"value\":[]}},{\"baseLabels\":{\"label_foo\":\"foo\",\"name\":\"foo\"},\"docstring\":\"metric foo\",\"metric\":{\"type\":\"counter\",\"value\":[]}}]"),
		},
	}

	for i, scenario := range scenarios {
		registry := NewRegistry()

		for name, metric := range scenario.in.metrics {
			err := registry.Register(name, fmt.Sprintf("metric %s", name), map[string]string{fmt.Sprintf("label_%s", name): name}, metric)
			if err != nil {
				t.Errorf("%d. encountered error while registering metric %s", i, err)
			}
		}

		actual := &bytes.Buffer{}

		err := registry.dumpToWriter(actual)
		if err != nil {
			t.Errorf("%d. encountered error while dumping %s", i, err)
		}

		if !bytes.Equal(scenario.out, actual.Bytes()) {
			t.Errorf("%d. expected %q for dumping, got %q", i, scenario.out, actual.Bytes())
		}
	}
}
func main() {
	flag.Parse()

	rpc_latency := metrics.NewHistogram(&metrics.HistogramSpecification{
		Starts:                metrics.EquallySizedBucketsFor(0, 200, 4),
		BucketBuilder:         metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 50),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
	})

	rpc_calls := metrics.NewCounter()

	metrics := registry.NewRegistry()

	metrics.Register("rpc_latency_microseconds", "RPC latency.", registry.NilLabels, rpc_latency)
	metrics.Register("rpc_calls_total", "RPC calls.", registry.NilLabels, rpc_calls)

	go func() {
		for {
			rpc_latency.Add(map[string]string{"service": "foo"}, rand.Float64()*200)
			rpc_calls.Increment(map[string]string{"service": "foo"})

			rpc_latency.Add(map[string]string{"service": "bar"}, (rand.NormFloat64()*10.0)+100.0)
			rpc_calls.Increment(map[string]string{"service": "bar"})

			rpc_latency.Add(map[string]string{"service": "zed"}, rand.ExpFloat64())
			rpc_calls.Increment(map[string]string{"service": "zed"})

			time.Sleep(100 * time.Millisecond)
		}
	}()

	exporter := metrics.YieldExporter()

	http.Handle("/metrics.json", exporter)
	http.ListenAndServe(listeningAddress, nil)
}
package registry

import (
	"github.com/prometheus/client_golang/maths"
	"github.com/prometheus/client_golang/metrics"
	"time"
)

/*
Boilerplate metrics about the metrics reporting subservice.  These are only
exposed if the DefaultRegistry's exporter is hooked into the HTTP request
handler.
*/
var (
	marshalErrorCount = metrics.NewCounter()
	dumpErrorCount    = metrics.NewCounter()

	requestCount          = metrics.NewCounter()
	requestLatencyBuckets = metrics.LogarithmicSizedBucketsFor(0, 1000)
	requestLatency        = metrics.NewHistogram(&metrics.HistogramSpecification{
		Starts:                requestLatencyBuckets,
		BucketBuilder:         metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(50, maths.Average), 1000),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.9, 0.99},
	})

	startTime = metrics.NewGauge()
)

func init() {
	startTime.Set(nil, float64(time.Now().Unix()))
	alive       = "alive"
	failure     = "failure"
	outcome     = "outcome"
	state       = "state"
	success     = "success"
	unreachable = "unreachable"
)

var (
	networkLatencyHistogram = &metrics.HistogramSpecification{
		Starts:                metrics.LogarithmicSizedBucketsFor(0, 1000),
		BucketBuilder:         metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 100),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
	}

	targetOperationLatencies = metrics.NewHistogram(networkLatencyHistogram)

	retrievalDurations = metrics.NewHistogram(&metrics.HistogramSpecification{
		Starts:                metrics.LogarithmicSizedBucketsFor(0, 10000),
		BucketBuilder:         metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 100),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99}})

	targetOperations = metrics.NewCounter()
)

func init() {
	registry.Register("prometheus_target_operations_total", "The total numbers of operations of the various targets that are being monitored.", registry.NilLabels, targetOperations)
	registry.Register("prometheus_target_operation_latency_ms", "The latencies for various target operations.", registry.NilLabels, targetOperationLatencies)
	registry.Register("prometheus_targetpool_duration_ms", "The durations for each TargetPool to retrieve state from all included entities.", registry.NilLabels, retrievalDurations)
}
	rebuildDiskFrontier         = "rebuild_disk_frontier"
	refreshHighWatermarks       = "refresh_high_watermarks"
	renderView                  = "render_view"
	setLabelNameFingerprints    = "set_label_name_fingerprints"
	setLabelPairFingerprints    = "set_label_pair_fingerprints"
	writeMemory                 = "write_memory"
)

var (
	diskLatencyHistogram = &metrics.HistogramSpecification{
		Starts:                metrics.LogarithmicSizedBucketsFor(0, 5000),
		BucketBuilder:         metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 100),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
	}

	curationDuration          = metrics.NewCounter()
	curationDurations         = metrics.NewHistogram(diskLatencyHistogram)
	storageOperations         = metrics.NewCounter()
	storageOperationDurations = metrics.NewCounter()
	storageLatency            = metrics.NewHistogram(diskLatencyHistogram)
	queueSizes                = metrics.NewGauge()
)

func recordOutcome(duration time.Duration, err error, success, failure map[string]string) {
	labels := success
	if err != nil {
		labels = failure
	}

	storageOperations.Increment(labels)
	asFloat := float64(duration / time.Microsecond)