func makeServerMetrics() ServerMetrics { return ServerMetrics{ Conns: metric.NewCounter(MetaConns), BytesInCount: metric.NewCounter(MetaBytesIn), BytesOutCount: metric.NewCounter(MetaBytesOut), } }
func BenchmarkDecodeBinaryDecimal(b *testing.B) { wbuf := writeBuffer{bytecount: metric.NewCounter()} expected := new(parser.DDecimal) expected.SetString("-1728718718271827121233.1212121212") wbuf.writeBinaryDatum(expected) rbuf := readBuffer{msg: wbuf.wrapped.Bytes()} plen, err := rbuf.getUint32() if err != nil { b.Fatal(err) } bytes, err := rbuf.getBytes(int(plen)) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { b.StartTimer() got, err := decodeOidDatum(oid.T_numeric, formatBinary, bytes) b.StopTimer() if err != nil { b.Fatal(err) } else if got.Compare(expected) != 0 { b.Fatalf("expected %s, got %s", expected, got) } } }
func BenchmarkWriteBinaryDecimal(b *testing.B) { buf := writeBuffer{bytecount: metric.NewCounter()} dec := new(parser.DDecimal) dec.SetString("-1728718718271827121233.1212121212") // Warm up the buffer. buf.writeBinaryDatum(dec) buf.wrapped.Reset() b.ResetTimer() for i := 0; i < b.N; i++ { b.StartTimer() buf.writeBinaryDatum(dec) b.StopTimer() buf.wrapped.Reset() } }
// NewExecutor creates an Executor and registers a callback on the // system config. func NewExecutor(cfg ExecutorConfig, stopper *stop.Stopper) *Executor { exec := &Executor{ cfg: cfg, reCache: parser.NewRegexpCache(512), Latency: metric.NewLatency(MetaLatency), TxnBeginCount: metric.NewCounter(MetaTxnBegin), TxnCommitCount: metric.NewCounter(MetaTxnCommit), TxnAbortCount: metric.NewCounter(MetaTxnAbort), TxnRollbackCount: metric.NewCounter(MetaTxnRollback), SelectCount: metric.NewCounter(MetaSelect), UpdateCount: metric.NewCounter(MetaUpdate), InsertCount: metric.NewCounter(MetaInsert), DeleteCount: metric.NewCounter(MetaDelete), DdlCount: metric.NewCounter(MetaDdl), MiscCount: metric.NewCounter(MetaMisc), QueryCount: metric.NewCounter(MetaQuery), } exec.systemConfigCond = sync.NewCond(exec.systemConfigMu.RLocker()) gossipUpdateC := cfg.Gossip.RegisterSystemConfigChannel() stopper.RunWorker(func() { for { select { case <-gossipUpdateC: sysCfg, _ := cfg.Gossip.GetSystemConfig() exec.updateSystemConfig(sysCfg) case <-stopper.ShouldStop(): return } } }) return exec }
func newStoreMetrics() *StoreMetrics { storeRegistry := metric.NewRegistry() sm := &StoreMetrics{ registry: storeRegistry, // Replica metrics. ReplicaCount: metric.NewCounter(metaReplicaCount), ReservedReplicaCount: metric.NewCounter(metaReservedReplicaCount), RaftLeaderCount: metric.NewGauge(metaRaftLeaderCount), RaftLeaderNotLeaseHolderCount: metric.NewGauge(metaRaftLeaderNotLeaseHolderCount), LeaseHolderCount: metric.NewGauge(metaLeaseHolderCount), // Range metrics. AvailableRangeCount: metric.NewGauge(metaAvailableRangeCount), // Replication metrics. ReplicaAllocatorNoopCount: metric.NewGauge(metaReplicaAllocatorNoopCount), ReplicaAllocatorRemoveCount: metric.NewGauge(metaReplicaAllocatorRemoveCount), ReplicaAllocatorAddCount: metric.NewGauge(metaReplicaAllocatorAddCount), ReplicaAllocatorRemoveDeadCount: metric.NewGauge(metaReplicaAllocatorRemoveDeadCount), // Lease request metrics. LeaseRequestSuccessCount: metric.NewCounter(metaLeaseRequestSuccessCount), LeaseRequestErrorCount: metric.NewCounter(metaLeaseRequestErrorCount), // Storage metrics. LiveBytes: metric.NewGauge(metaLiveBytes), KeyBytes: metric.NewGauge(metaKeyBytes), ValBytes: metric.NewGauge(metaValBytes), IntentBytes: metric.NewGauge(metaIntentBytes), LiveCount: metric.NewGauge(metaLiveCount), KeyCount: metric.NewGauge(metaKeyCount), ValCount: metric.NewGauge(metaValCount), IntentCount: metric.NewGauge(metaIntentCount), IntentAge: metric.NewGauge(metaIntentAge), GcBytesAge: metric.NewGauge(metaGcBytesAge), LastUpdateNanos: metric.NewGauge(metaLastUpdateNanos), Capacity: metric.NewGauge(metaCapacity), Available: metric.NewGauge(metaAvailable), Reserved: metric.NewCounter(metaReserved), SysBytes: metric.NewGauge(metaSysBytes), SysCount: metric.NewGauge(metaSysCount), // RocksDB metrics. RdbBlockCacheHits: metric.NewGauge(metaRdbBlockCacheHits), RdbBlockCacheMisses: metric.NewGauge(metaRdbBlockCacheMisses), RdbBlockCacheUsage: metric.NewGauge(metaRdbBlockCacheUsage), RdbBlockCachePinnedUsage: metric.NewGauge(metaRdbBlockCachePinnedUsage), RdbBloomFilterPrefixChecked: metric.NewGauge(metaRdbBloomFilterPrefixChecked), RdbBloomFilterPrefixUseful: metric.NewGauge(metaRdbBloomFilterPrefixUseful), RdbMemtableHits: metric.NewGauge(metaRdbMemtableHits), RdbMemtableMisses: metric.NewGauge(metaRdbMemtableMisses), RdbMemtableTotalSize: metric.NewGauge(metaRdbMemtableTotalSize), RdbFlushes: metric.NewGauge(metaRdbFlushes), RdbCompactions: metric.NewGauge(metaRdbCompactions), RdbTableReadersMemEstimate: metric.NewGauge(metaRdbTableReadersMemEstimate), RdbReadAmplification: metric.NewGauge(metaRdbReadAmplification), // Range event metrics. RangeSplits: metric.NewCounter(metaRangeSplits), RangeAdds: metric.NewCounter(metaRangeAdds), RangeRemoves: metric.NewCounter(metaRangeRemoves), RangeSnapshotsGenerated: metric.NewCounter(metaRangeSnapshotsGenerated), RangeSnapshotsNormalApplied: metric.NewCounter(metaRangeSnapshotsNormalApplied), RangeSnapshotsPreemptiveApplied: metric.NewCounter(metaRangeSnapshotsPreemptiveApplied), // Raft processing metrics. RaftTicks: metric.NewCounter(metaRaftTicks), RaftSelectDurationNanos: metric.NewCounter(metaRaftSelectDurationNanos), RaftWorkingDurationNanos: metric.NewCounter(metaRaftWorkingDurationNanos), RaftTickingDurationNanos: metric.NewCounter(metaRaftTickingDurationNanos), // Raft message metrics. RaftRcvdMsgProp: metric.NewCounter(metaRaftRcvdProp), RaftRcvdMsgApp: metric.NewCounter(metaRaftRcvdApp), RaftRcvdMsgAppResp: metric.NewCounter(metaRaftRcvdAppResp), RaftRcvdMsgVote: metric.NewCounter(metaRaftRcvdVote), RaftRcvdMsgVoteResp: metric.NewCounter(metaRaftRcvdVoteResp), RaftRcvdMsgSnap: metric.NewCounter(metaRaftRcvdSnap), RaftRcvdMsgHeartbeat: metric.NewCounter(metaRaftRcvdHeartbeat), RaftRcvdMsgHeartbeatResp: metric.NewCounter(metaRaftRcvdHeartbeatResp), RaftRcvdMsgTransferLeader: metric.NewCounter(metaRaftRcvdTransferLeader), RaftRcvdMsgTimeoutNow: metric.NewCounter(metaRaftRcvdTimeoutNow), raftRcvdMessages: make(map[raftpb.MessageType]*metric.Counter, len(raftpb.MessageType_name)), RaftEnqueuedPending: metric.NewGauge(metaRaftEnqueuedPending), } sm.raftRcvdMessages[raftpb.MsgProp] = sm.RaftRcvdMsgProp sm.raftRcvdMessages[raftpb.MsgApp] = sm.RaftRcvdMsgApp sm.raftRcvdMessages[raftpb.MsgAppResp] = sm.RaftRcvdMsgAppResp sm.raftRcvdMessages[raftpb.MsgVote] = sm.RaftRcvdMsgVote sm.raftRcvdMessages[raftpb.MsgVoteResp] = sm.RaftRcvdMsgVoteResp sm.raftRcvdMessages[raftpb.MsgSnap] = sm.RaftRcvdMsgSnap sm.raftRcvdMessages[raftpb.MsgHeartbeat] = sm.RaftRcvdMsgHeartbeat sm.raftRcvdMessages[raftpb.MsgHeartbeatResp] = sm.RaftRcvdMsgHeartbeatResp sm.raftRcvdMessages[raftpb.MsgTransferLeader] = sm.RaftRcvdMsgTransferLeader sm.raftRcvdMessages[raftpb.MsgTimeoutNow] = sm.RaftRcvdMsgTimeoutNow storeRegistry.AddMetricStruct(sm) return sm }
// TestMetricsRecorder verifies that the metrics recorder properly formats the // statistics from various registries, both for Time Series and for Status // Summaries. func TestMetricsRecorder(t *testing.T) { defer leaktest.AfterTest(t)() // ======================================== // Construct a series of fake descriptors for use in test. // ======================================== nodeDesc := roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(1), } storeDesc1 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(1), Capacity: roachpb.StoreCapacity{ Capacity: 100, Available: 50, }, } storeDesc2 := roachpb.StoreDescriptor{ StoreID: roachpb.StoreID(2), Capacity: roachpb.StoreCapacity{ Capacity: 200, Available: 75, }, } // ======================================== // Create registries and add them to the recorder (two node-level, two // store-level). // ======================================== reg1 := metric.NewRegistry() store1 := fakeStore{ storeID: roachpb.StoreID(1), desc: storeDesc1, registry: metric.NewRegistry(), } store2 := fakeStore{ storeID: roachpb.StoreID(2), desc: storeDesc2, registry: metric.NewRegistry(), } manual := hlc.NewManualClock(100) recorder := NewMetricsRecorder(hlc.NewClock(manual.UnixNano)) recorder.AddStore(store1) recorder.AddStore(store2) recorder.AddNode(reg1, nodeDesc, 50) // Ensure the metric system's view of time does not advance during this test // as the test expects time to not advance too far which would age the actual // data (e.g. in histogram's) unexpectedly. defer metric.TestingSetNow(func() time.Time { return time.Unix(0, manual.UnixNano()).UTC() })() // ======================================== // Generate Metrics Data & Expected Results // ======================================== // Flatten the four registries into an array for ease of use. regList := []struct { reg *metric.Registry prefix string source int64 isNode bool }{ { reg: reg1, prefix: "one.", source: 1, isNode: true, }, { reg: reg1, prefix: "two.", source: 1, isNode: true, }, { reg: store1.registry, prefix: "", source: int64(store1.storeID), isNode: false, }, { reg: store2.registry, prefix: "", source: int64(store2.storeID), isNode: false, }, } // Every registry will have a copy of the following metrics. metricNames := []struct { name string typ string val int64 }{ {"testGauge", "gauge", 20}, {"testGaugeFloat64", "floatgauge", 20}, {"testCounter", "counter", 5}, {"testRate", "rate", 2}, {"testHistogram", "histogram", 10}, {"testLatency", "latency", 10}, // Stats needed for store summaries. {"ranges", "counter", 1}, {"replicas.leaders", "gauge", 1}, {"replicas.leaseholders", "gauge", 1}, {"ranges.available", "gauge", 1}, } // Add the metrics to each registry and set their values. At the same time, // generate expected time series results and status summary metric values. var expected []tspb.TimeSeriesData expectedNodeSummaryMetrics := make(map[string]float64) expectedStoreSummaryMetrics := make(map[string]float64) // addExpected generates expected data for a single metric data point. addExpected := func(prefix, name string, source, time, val int64, isNode bool) { // Generate time series data. tsPrefix := "cr.node." if !isNode { tsPrefix = "cr.store." } expect := tspb.TimeSeriesData{ Name: tsPrefix + prefix + name, Source: strconv.FormatInt(source, 10), Datapoints: []tspb.TimeSeriesDatapoint{ { TimestampNanos: time, Value: float64(val), }, }, } expected = append(expected, expect) // Generate status summary data. if isNode { expectedNodeSummaryMetrics[prefix+name] = float64(val) } else { // This can overwrite the previous value, but this is expected as // all stores in our tests have identical values; when comparing // status summaries, the same map is used as expected data for all // stores. expectedStoreSummaryMetrics[prefix+name] = float64(val) } } for _, reg := range regList { for _, data := range metricNames { switch data.typ { case "gauge": g := metric.NewGauge(metric.Metadata{Name: reg.prefix + data.name}) reg.reg.AddMetric(g) g.Update(data.val) addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode) case "floatgauge": g := metric.NewGaugeFloat64(metric.Metadata{Name: reg.prefix + data.name}) reg.reg.AddMetric(g) g.Update(float64(data.val)) addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode) case "counter": c := metric.NewCounter(metric.Metadata{Name: reg.prefix + data.name}) reg.reg.AddMetric(c) c.Inc((data.val)) addExpected(reg.prefix, data.name, reg.source, 100, data.val, reg.isNode) case "rate": r := metric.NewRates(metric.Metadata{Name: reg.prefix + data.name}) reg.reg.AddMetricGroup(r) r.Add(data.val) addExpected(reg.prefix, data.name+"-count", reg.source, 100, data.val, reg.isNode) for _, scale := range metric.DefaultTimeScales { // Rate data is subject to timing errors in tests. Zero out // these values. addExpected(reg.prefix, data.name+sep+scale.Name(), reg.source, 100, 0, reg.isNode) } case "histogram": h := metric.NewHistogram(metric.Metadata{Name: reg.prefix + data.name}, time.Second, 1000, 2) reg.reg.AddMetric(h) h.RecordValue(data.val) for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+q.suffix, reg.source, 100, data.val, reg.isNode) } case "latency": l := metric.NewLatency(metric.Metadata{Name: reg.prefix + data.name}) reg.reg.AddMetricGroup(l) l.RecordValue(data.val) // Latency is simply three histograms (at different resolution // time scales). for _, scale := range metric.DefaultTimeScales { for _, q := range recordHistogramQuantiles { addExpected(reg.prefix, data.name+sep+scale.Name()+q.suffix, reg.source, 100, data.val, reg.isNode) } } } } } // ======================================== // Verify time series data // ======================================== actual := recorder.GetTimeSeriesData() // Zero-out timing-sensitive rate values from actual data. for _, act := range actual { match, err := regexp.MatchString(`testRate-\d+m`, act.Name) if err != nil { t.Fatal(err) } if match { act.Datapoints[0].Value = 0.0 } } // Actual comparison is simple: sort the resulting arrays by time and name, // and use reflect.DeepEqual. sort.Sort(byTimeAndName(actual)) sort.Sort(byTimeAndName(expected)) if a, e := actual, expected; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not yield expected time series collection; diff:\n %v", pretty.Diff(e, a)) } // ======================================== // Verify node summary generation // ======================================== expectedNodeSummary := &NodeStatus{ Desc: nodeDesc, BuildInfo: build.GetInfo(), StartedAt: 50, UpdatedAt: 100, Metrics: expectedNodeSummaryMetrics, StoreStatuses: []StoreStatus{ { Desc: storeDesc1, Metrics: expectedStoreSummaryMetrics, }, { Desc: storeDesc2, Metrics: expectedStoreSummaryMetrics, }, }, } nodeSummary := recorder.GetStatusSummary() if nodeSummary == nil { t.Fatalf("recorder did not return nodeSummary.") } sort.Sort(byStoreDescID(nodeSummary.StoreStatuses)) if a, e := nodeSummary, expectedNodeSummary; !reflect.DeepEqual(a, e) { t.Errorf("recorder did not produce expected NodeSummary; diff:\n %v", pretty.Diff(e, a)) } }