示例#1
0
文件: metrics.go 项目: CliffYuan/etcd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package backend

import "github.com/prometheus/client_golang/prometheus"

var (
	commitDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: "etcd",
		Subsystem: "disk",
		Name:      "backend_commit_duration_seconds",
		Help:      "The latency distributions of commit called by backend.",
		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
	})
)

func init() {
	prometheus.MustRegister(commitDurations)
}
示例#2
0
文件: metrics.go 项目: pingcap/tidb
			Namespace: "tidb",
			Subsystem: "ddl",
			Name:      "waiting_jobs",
			Help:      "Gauge of jobs.",
		}, []string{"type", "action"})

	// handle job result state.
	handleJobSucc      = "handle_job_succ"
	handleJobFailed    = "handle_job_failed"
	handleJobHistogram = prometheus.NewHistogramVec(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "ddl",
			Name:      "handle_job_duration_seconds",
			Help:      "Bucketed histogram of processing time (s) of handle jobs",
			Buckets:   prometheus.ExponentialBuckets(0.01, 2, 20),
		}, []string{"type", "action", "result_state"})

	// handle batch data type.
	batchAddCol              = "batch_add_col"
	batchAddIdx              = "batch_add_idx"
	batchDelData             = "batch_del_data"
	batchHandleDataHistogram = prometheus.NewHistogramVec(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "ddl",
			Name:      "batch_add_or_del_data_succ",
			Help:      "Bucketed histogram of processing time (s) of batch handle data",
			Buckets:   prometheus.ExponentialBuckets(0.001, 2, 20),
		}, []string{"handle_data_type"})
)
示例#3
0
文件: file.go 项目: coreos/torus
	promFileSyncs = prometheus.NewCounterVec(prometheus.CounterOpts{
		Name: "torus_server_file_syncs",
		Help: "Number of times a file has been synced on this server",
	}, []string{"volume"})
	promFileChangedSyncs = prometheus.NewCounterVec(prometheus.CounterOpts{
		Name: "torus_server_file_changed_syncs",
		Help: "Number of times a file has been synced on this server, and the file has changed underneath it",
	}, []string{"volume"})
	promFileWrittenBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
		Name: "torus_server_file_written_bytes",
		Help: "Number of bytes written to a file on this server",
	}, []string{"volume"})
	promFileBlockRead = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "torus_server_file_block_read_us",
		Help:    "Histogram of ms taken to read a block through the layers and into the file abstraction",
		Buckets: prometheus.ExponentialBuckets(50.0, 2, 20),
	})
	promFileBlockWrite = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "torus_server_file_block_write_us",
		Help:    "Histogram of ms taken to write a block through the layers and into the file abstraction",
		Buckets: prometheus.ExponentialBuckets(50.0, 2, 20),
	})
)

func init() {
	prometheus.MustRegister(promOpenINodes)
	prometheus.MustRegister(promOpenFiles)
	prometheus.MustRegister(promFileSyncs)
	prometheus.MustRegister(promFileChangedSyncs)
	prometheus.MustRegister(promFileWrittenBytes)
	prometheus.MustRegister(promFileBlockRead)
示例#4
0
var (
	// TODO(a-robinson): Add unit tests for the handling of these metrics once
	// the upstream library supports it.
	requestCounter = prometheus.NewCounterVec(
		prometheus.CounterOpts{
			Name: "apiserver_request_count",
			Help: "Counter of apiserver requests broken out for each verb, API resource, client, and HTTP response code.",
		},
		[]string{"verb", "resource", "client", "code"},
	)
	requestLatencies = prometheus.NewHistogramVec(
		prometheus.HistogramOpts{
			Name: "apiserver_request_latencies",
			Help: "Response latency distribution in microseconds for each verb, resource and client.",
			// Use buckets ranging from 125 ms to 8 seconds.
			Buckets: prometheus.ExponentialBuckets(125000, 2.0, 7),
		},
		[]string{"verb", "resource"},
	)
	requestLatenciesSummary = prometheus.NewSummaryVec(
		prometheus.SummaryOpts{
			Name: "apiserver_request_latencies_summary",
			Help: "Response latency summary in microseconds for each verb and resource.",
		},
		[]string{"verb", "resource"},
	)
)

// Register all metrics.
func Register() {
	prometheus.MustRegister(requestCounter)
示例#5
0
文件: s.go 项目: jaqx0r/blts
	"github.com/prometheus/client_golang/prometheus"
)

var (
	port = flag.String("port", "8000", "Port to listen on.")
)

var (
	requests = prometheus.NewCounter(prometheus.CounterOpts{
		Name: "requests", Help: "total requests received"})
	errors = prometheus.NewCounterVec(prometheus.CounterOpts{
		Name: "errors", Help: "total errors served"}, []string{"code"})
	latency_ms = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "latency_ms",
		Help:    "request latency in milliseconds",
		Buckets: prometheus.ExponentialBuckets(1, 2, 20)})
	backend_latency_ms = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "backend_latency_ms",
		Help:    "request latency in milliseconds",
		Buckets: prometheus.ExponentialBuckets(1, 2, 20)})
)

func init() {
	prometheus.MustRegister(requests)
	prometheus.MustRegister(errors)
	prometheus.MustRegister(latency_ms)
	prometheus.MustRegister(backend_latency_ms)
}

var (
	randLock sync.Mutex
示例#6
0
文件: metrics.go 项目: XuHuaiyu/tidb
// See the License for the specific language governing permissions and
// limitations under the License.

package metrics

import (
	"time"

	"github.com/prometheus/client_golang/prometheus"
)

var (
	queryMetric = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "query",
			Name:      "handle_query_duration_seconds",
			Help:      "Bucketed histogram of processing time (s) of handled queries.",
			Buckets:   prometheus.ExponentialBuckets(0.0005, 2, 13),
		})
)

// Query is used for add query cost time into metrics.
func Query(costTime time.Duration) {
	queryMetric.Observe(float64(costTime))
}

func init() {
	prometheus.MustRegister(queryMetric)
}
示例#7
0
文件: metrics.go 项目: 40a/bootkube
	pendingEventsGauge = prometheus.NewGauge(
		prometheus.GaugeOpts{
			Namespace: "etcd",
			Subsystem: "storage",
			Name:      "pending_events_total",
			Help:      "Total number of pending events to be sent.",
		})

	indexCompactionPauseDurations = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "etcd",
			Subsystem: "storage",
			Name:      "index_compaction_pause_duration_milliseconds",
			Help:      "Bucketed histogram of index compaction pause duration.",
			// 0.5ms -> 1second
			Buckets: prometheus.ExponentialBuckets(0.5, 2, 12),
		})

	dbCompactionPauseDurations = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "etcd",
			Subsystem: "storage",
			Name:      "db_compaction_pause_duration_milliseconds",
			Help:      "Bucketed histogram of db compaction pause duration.",
			// 1ms -> 4second
			Buckets: prometheus.ExponentialBuckets(1, 2, 13),
		})

	dbCompactionTotalDurations = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "etcd",
示例#8
0
	"time"

	"github.com/prometheus/client_golang/prometheus"
)

const schedulerSubsystem = "scheduler"

var BindingSaturationReportInterval = 1 * time.Second

var (
	E2eSchedulingLatency = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Subsystem: schedulerSubsystem,
			Name:      "e2e_scheduling_latency_microseconds",
			Help:      "E2e scheduling latency (scheduling algorithm + binding)",
			Buckets:   prometheus.ExponentialBuckets(1000, 2, 15),
		},
	)
	SchedulingAlgorithmLatency = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Subsystem: schedulerSubsystem,
			Name:      "scheduling_algorithm_latency_microseconds",
			Help:      "Scheduling algorithm latency",
			Buckets:   prometheus.ExponentialBuckets(1000, 2, 15),
		},
	)
	BindingLatency = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Subsystem: schedulerSubsystem,
			Name:      "binding_latency_microseconds",
			Help:      "Binding latency",
示例#9
0
文件: metrics.go 项目: prepor/lucky
package lucky

import (
	"github.com/prometheus/client_golang/prometheus"
)

var (
	RequestsHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{
		Name:    "lucky_requests_ms",
		Help:    "Lucky requests",
		Buckets: prometheus.ExponentialBuckets(1, 5, 6),
	},
		[]string{"backend", "method"})
	BackendsGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
		Name: "lucky_backends",
		Help: "Lucky backends",
	},
		[]string{"backend"})

	FrontendsGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
		Name: "lucky_frontends",
		Help: "Lucky frontends",
	},
		[]string{"frontend", "type"})
)

func init() {
	prometheus.MustRegister(RequestsHistogram)
	prometheus.MustRegister(BackendsGauge)
	prometheus.MustRegister(FrontendsGauge)
}
示例#10
0
文件: metrics.go 项目: pingcap/tidb
	txnCmdCounter = prometheus.NewCounterVec(
		prometheus.CounterOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "txn_cmd_total",
			Help:      "Counter of txn commands.",
		}, []string{"type"})

	txnCmdHistogram = prometheus.NewHistogramVec(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "txn_cmd_seconds",
			Help:      "Bucketed histogram of processing time of txn cmds.",
			Buckets:   prometheus.ExponentialBuckets(0.0005, 2, 18),
		}, []string{"type"})

	backoffCounter = prometheus.NewCounterVec(
		prometheus.CounterOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "backoff_total",
			Help:      "Counter of backoff.",
		}, []string{"type"})

	backoffHistogram = prometheus.NewHistogramVec(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "backoff_seconds",
示例#11
0
文件: metrics.go 项目: jmptrader/tidb
	copBuildTaskHistogram = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "cop_buildtask_seconds",
			Help:      "Coprocessor buildTask cost time.",
		})

	copTaskLenHistogram = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "cop_task_len",
			Help:      "Coprocessor task length.",
			Buckets:   prometheus.ExponentialBuckets(1, 2, 11),
		})

	coprocessorCounter = prometheus.NewCounterVec(
		prometheus.CounterOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "coprocessor_actions_total",
			Help:      "Counter of coprocessor actions.",
		}, []string{"type"})

	gcWorkerCounter = prometheus.NewCounterVec(
		prometheus.CounterOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "gc_worker_actions_total",
示例#12
0
	"github.com/tylerb/graceful"

	"github.com/chihaya/chihaya/frontend"
	"github.com/chihaya/chihaya/middleware"
)

func init() {
	prometheus.MustRegister(promResponseDurationMilliseconds)
	recordResponseDuration("action", nil, time.Second)
}

var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
	prometheus.HistogramOpts{
		Name:    "chihaya_http_response_duration_milliseconds",
		Help:    "The duration of time it takes to receive and write a response to an API request",
		Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
	},
	[]string{"action", "error"},
)

// recordResponseDuration records the duration of time to respond to a Request
// in milliseconds .
func recordResponseDuration(action string, err error, duration time.Duration) {
	var errString string
	if err != nil {
		errString = err.Error()
	}

	promResponseDurationMilliseconds.
		WithLabelValues(action, errString).
		Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
	// class (1xx, 2xx, ...). This creates a fair amount of time series on
	// the Prometheus server. Usually, you would track the duration of
	// serving HTTP request without partitioning by outcome. Do something
	// like this only if needed. Also note how only status classes are
	// tracked, not every single status code. The latter would create an
	// even larger amount of time series. Request counters partitioned by
	// status code are usually OK as each counter only creates one time
	// series. Histograms are way more expensive, so partition with care and
	// only where you really need separate latency tracking. Partitioning by
	// status class is only an example. In concrete cases, other partitions
	// might make more sense.
	apiRequestDuration = prometheus.NewHistogramVec(
		prometheus.HistogramOpts{
			Name:    "api_request_duration_seconds",
			Help:    "Histogram for the request duration of the public API, partitioned by status class.",
			Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),
		},
		[]string{"status_class"},
	)
)

func handler(w http.ResponseWriter, r *http.Request) {
	status := http.StatusOK
	// The ObserverFunc gets called by the deferred ObserveDuration and
	// decides wich Histogram's Observe method is called.
	timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) {
		switch {
		case status >= 500: // Server error.
			apiRequestDuration.WithLabelValues("5xx").Observe(v)
		case status >= 400: // Client error.
			apiRequestDuration.WithLabelValues("4xx").Observe(v)