コード例 #1
0
ファイル: examples_test.go プロジェクト: djbarber/ipfs-hack
func ExampleGaugeVec() {
	binaryVersion := flag.String("binary_version", "debug", "Version of the binary: debug, canary, production.")
	flag.Parse()

	opsQueued := prometheus.NewGaugeVec(
		prometheus.GaugeOpts{
			Namespace:   "our_company",
			Subsystem:   "blob_storage",
			Name:        "ops_queued",
			Help:        "Number of blob storage operations waiting to be processed, partitioned by user and type.",
			ConstLabels: prometheus.Labels{"binary_version": *binaryVersion},
		},
		[]string{
			// Which user has requested the operation?
			"user",
			// Of what type is the operation?
			"type",
		},
	)
	prometheus.MustRegister(opsQueued)

	// Increase a value using compact (but order-sensitive!) WithLabelValues().
	opsQueued.WithLabelValues("bob", "put").Add(4)
	// Increase a value with a map using WithLabels. More verbose, but order
	// doesn't matter anymore.
	opsQueued.With(prometheus.Labels{"type": "delete", "user": "******"}).Inc()
}
コード例 #2
0
// NewClusterManager creates the two metric vectors OOMCount and RAMUsage. Note
// that the zone is set as a ConstLabel. (It's different in each instance of the
// ClusterManager, but constant over the lifetime of an instance.) The reported
// values are partitioned by host, which is therefore a variable label.
func NewClusterManager(zone string) *ClusterManager {
	return &ClusterManager{
		Zone: zone,
		OOMCount: prometheus.NewCounterVec(
			prometheus.CounterOpts{
				Subsystem:   "clustermanager",
				Name:        "oom_count",
				Help:        "number of OOM crashes",
				ConstLabels: prometheus.Labels{"zone": zone},
			},
			[]string{"host"},
		),
		RAMUsage: prometheus.NewGaugeVec(
			prometheus.GaugeOpts{
				Subsystem:   "clustermanager",
				Name:        "ram_usage_bytes",
				Help:        "RAM usage as reported to the cluster manager",
				ConstLabels: prometheus.Labels{"zone": zone},
			},
			[]string{"host"},
		),
	}
}
コード例 #3
0
ファイル: swarm.go プロジェクト: djbarber/ipfs-hack
	pst "github.com/djbarber/ipfs-hack/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer"
	psy "github.com/djbarber/ipfs-hack/Godeps/_workspace/src/github.com/jbenet/go-stream-muxer/yamux"
	"github.com/djbarber/ipfs-hack/Godeps/_workspace/src/github.com/jbenet/goprocess"
	goprocessctx "github.com/djbarber/ipfs-hack/Godeps/_workspace/src/github.com/jbenet/goprocess/context"
	prom "github.com/djbarber/ipfs-hack/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus"
	mafilter "github.com/djbarber/ipfs-hack/Godeps/_workspace/src/github.com/whyrusleeping/multiaddr-filter"
	context "github.com/djbarber/ipfs-hack/Godeps/_workspace/src/golang.org/x/net/context"
)

var log = logging.Logger("swarm2")

var PSTransport pst.Transport

var peersTotal = prom.NewGaugeVec(prom.GaugeOpts{
	Namespace: "ipfs",
	Subsystem: "p2p",
	Name:      "peers_total",
	Help:      "Number of connected peers",
}, []string{"peer_id"})

func init() {
	tpt := *psy.DefaultTransport
	tpt.MaxStreamWindowSize = 512 * 1024
	PSTransport = &tpt
}

// Swarm is a connection muxer, allowing connections to other peers to
// be opened and closed, while still using the same Chan for all
// communication. The Chan sends/receives Messages, which note the
// destination or source Peer.
//
// Uses peerstream.Swarm