func main() {
	flag.Parse()

	rpc_latency := metrics.NewHistogram(&metrics.HistogramSpecification{
		Starts:                metrics.EquallySizedBucketsFor(0, 200, 4),
		BucketBuilder:         metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 50),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
	})

	rpc_calls := metrics.NewCounter()

	metrics := registry.NewRegistry()

	metrics.Register("rpc_latency_microseconds", "RPC latency.", registry.NilLabels, rpc_latency)
	metrics.Register("rpc_calls_total", "RPC calls.", registry.NilLabels, rpc_calls)

	go func() {
		for {
			rpc_latency.Add(map[string]string{"service": "foo"}, rand.Float64()*200)
			rpc_calls.Increment(map[string]string{"service": "foo"})

			rpc_latency.Add(map[string]string{"service": "bar"}, (rand.NormFloat64()*10.0)+100.0)
			rpc_calls.Increment(map[string]string{"service": "bar"})

			rpc_latency.Add(map[string]string{"service": "zed"}, rand.ExpFloat64())
			rpc_calls.Increment(map[string]string{"service": "zed"})

			time.Sleep(100 * time.Millisecond)
		}
	}()

	exporter := metrics.YieldExporter()

	http.Handle("/metrics.json", exporter)
	http.ListenAndServe(listeningAddress, nil)
}
)

/*
Boilerplate metrics about the metrics reporting subservice.  These are only
exposed if the DefaultRegistry's exporter is hooked into the HTTP request
handler.
*/
var (
	marshalErrorCount = metrics.NewCounter()
	dumpErrorCount    = metrics.NewCounter()

	requestCount          = metrics.NewCounter()
	requestLatencyBuckets = metrics.LogarithmicSizedBucketsFor(0, 1000)
	requestLatency        = metrics.NewHistogram(&metrics.HistogramSpecification{
		Starts:                requestLatencyBuckets,
		BucketBuilder:         metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(50, maths.Average), 1000),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.9, 0.99},
	})

	startTime = metrics.NewGauge()
)

func init() {
	startTime.Set(nil, float64(time.Now().Unix()))

	DefaultRegistry.Register("telemetry_requests_metrics_total", "A counter of the total requests made against the telemetry system.", NilLabels, requestCount)
	DefaultRegistry.Register("telemetry_requests_metrics_latency_microseconds", "A histogram of the response latency for requests made against the telemetry system.", NilLabels, requestLatency)

	DefaultRegistry.Register("instance_start_time_seconds", "The time at which the current instance started (UTC).", NilLabels, startTime)
}
	alive       = "alive"
	failure     = "failure"
	outcome     = "outcome"
	state       = "state"
	success     = "success"
	unreachable = "unreachable"
)

var (
	networkLatencyHistogram = &metrics.HistogramSpecification{
		Starts:                metrics.LogarithmicSizedBucketsFor(0, 1000),
		BucketBuilder:         metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 100),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
	}

	targetOperationLatencies = metrics.NewHistogram(networkLatencyHistogram)

	retrievalDurations = metrics.NewHistogram(&metrics.HistogramSpecification{
		Starts:                metrics.LogarithmicSizedBucketsFor(0, 10000),
		BucketBuilder:         metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 100),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99}})

	targetOperations = metrics.NewCounter()
)

func init() {
	registry.Register("prometheus_target_operations_total", "The total numbers of operations of the various targets that are being monitored.", registry.NilLabels, targetOperations)
	registry.Register("prometheus_target_operation_latency_ms", "The latencies for various target operations.", registry.NilLabels, targetOperationLatencies)
	registry.Register("prometheus_targetpool_duration_ms", "The durations for each TargetPool to retrieve state from all included entities.", registry.NilLabels, retrievalDurations)
}
	refreshHighWatermarks       = "refresh_high_watermarks"
	renderView                  = "render_view"
	setLabelNameFingerprints    = "set_label_name_fingerprints"
	setLabelPairFingerprints    = "set_label_pair_fingerprints"
	writeMemory                 = "write_memory"
)

var (
	diskLatencyHistogram = &metrics.HistogramSpecification{
		Starts:                metrics.LogarithmicSizedBucketsFor(0, 5000),
		BucketBuilder:         metrics.AccumulatingBucketBuilder(metrics.EvictAndReplaceWith(10, maths.Average), 100),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
	}

	curationDuration          = metrics.NewCounter()
	curationDurations         = metrics.NewHistogram(diskLatencyHistogram)
	storageOperations         = metrics.NewCounter()
	storageOperationDurations = metrics.NewCounter()
	storageLatency            = metrics.NewHistogram(diskLatencyHistogram)
	queueSizes                = metrics.NewGauge()
)

func recordOutcome(duration time.Duration, err error, success, failure map[string]string) {
	labels := success
	if err != nil {
		labels = failure
	}

	storageOperations.Increment(labels)
	asFloat := float64(duration / time.Microsecond)
	storageLatency.Add(labels, asFloat)