func newConsumerMetrics(consumerName string) *consumerMetrics { kafkaMetrics := &consumerMetrics{ registry: metrics.NewRegistry(), } kafkaMetrics.fetchersIdleTimer = metrics.NewRegisteredTimer(fmt.Sprintf("FetchersIdleTime-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.fetchDurationTimer = metrics.NewRegisteredTimer(fmt.Sprintf("FetchDuration-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.numWorkerManagersGauge = metrics.NewRegisteredGauge(fmt.Sprintf("NumWorkerManagers-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.activeWorkersCounter = metrics.NewRegisteredCounter(fmt.Sprintf("WMsActiveWorkers-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.pendingWMsTasksCounter = metrics.NewRegisteredCounter(fmt.Sprintf("WMsPendingTasks-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.wmsBatchDurationTimer = metrics.NewRegisteredTimer(fmt.Sprintf("WMsBatchDuration-%s", consumerName), kafkaMetrics.registry) kafkaMetrics.wmsIdleTimer = metrics.NewRegisteredTimer(fmt.Sprintf("WMsIdleTime-%s", consumerName), kafkaMetrics.registry) return kafkaMetrics }
func newConsumerMetrics(consumerName, prefix string) *ConsumerMetrics { kafkaMetrics := &ConsumerMetrics{ registry: metrics.DefaultRegistry, } // Ensure prefix ends with a dot (.) so it plays nice with statsd/graphite prefix = strings.Trim(prefix, " ") if prefix != "" && prefix[len(prefix)-1:] != "." { prefix += "." } kafkaMetrics.consumerName = consumerName kafkaMetrics.prefix = prefix kafkaMetrics.fetchersIdleTimer = metrics.NewRegisteredTimer(fmt.Sprintf("%sFetchersIdleTime-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.fetchDurationTimer = metrics.NewRegisteredTimer(fmt.Sprintf("%sFetchDuration-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.numWorkerManagersGauge = metrics.NewRegisteredGauge(fmt.Sprintf("%sNumWorkerManagers-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.activeWorkersCounter = metrics.NewRegisteredCounter(fmt.Sprintf("%sWMsActiveWorkers-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.pendingWMsTasksCounter = metrics.NewRegisteredCounter(fmt.Sprintf("%sWMsPendingTasks-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.taskTimeoutCounter = metrics.NewRegisteredCounter(fmt.Sprintf("%sTaskTimeouts-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.wmsBatchDurationTimer = metrics.NewRegisteredTimer(fmt.Sprintf("%sWMsBatchDuration-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.wmsIdleTimer = metrics.NewRegisteredTimer(fmt.Sprintf("%sWMsIdleTime-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.numFetchedMessagesCounter = metrics.NewRegisteredCounter(fmt.Sprintf("%sFetchedMessages-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.numConsumedMessagesCounter = metrics.NewRegisteredCounter(fmt.Sprintf("%sConsumedMessages-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.numAcksCounter = metrics.NewRegisteredCounter(fmt.Sprintf("%sAcks-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.topicPartitionLag = make(map[TopicAndPartition]metrics.Gauge) kafkaMetrics.reportingStopChannels = make([]chan struct{}, 0) return kafkaMetrics }
func newConsumerMetrics(consumerName, prefix string) *ConsumerMetrics { kafkaMetrics := &ConsumerMetrics{ registry: metrics.DefaultRegistry, } // Ensure prefix ends with a dot (.) so it plays nice with statsd/graphite prefix = strings.Trim(prefix, " ") if prefix != "" && prefix[len(prefix)-1:] != "." { prefix += "." } kafkaMetrics.fetchersIdleTimer = metrics.NewRegisteredTimer(fmt.Sprintf("%sFetchersIdleTime-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.fetchDurationTimer = metrics.NewRegisteredTimer(fmt.Sprintf("%sFetchDuration-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.numWorkerManagersGauge = metrics.NewRegisteredGauge(fmt.Sprintf("%sNumWorkerManagers-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.activeWorkersCounter = metrics.NewRegisteredCounter(fmt.Sprintf("%sWMsActiveWorkers-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.pendingWMsTasksCounter = metrics.NewRegisteredCounter(fmt.Sprintf("%sWMsPendingTasks-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.taskTimeoutCounter = metrics.NewRegisteredCounter(fmt.Sprintf("%sTaskTimeouts-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.wmsBatchDurationTimer = metrics.NewRegisteredTimer(fmt.Sprintf("%sWMsBatchDuration-%s", prefix, consumerName), kafkaMetrics.registry) kafkaMetrics.wmsIdleTimer = metrics.NewRegisteredTimer(fmt.Sprintf("%sWMsIdleTime-%s", prefix, consumerName), kafkaMetrics.registry) return kafkaMetrics }
// StartBinaryGenerator starts up 1 goroutine running a "master" afl-fuzz and n-1 "slave" // afl-fuzz processes, where n is specified by config.Generator.NumFuzzProcesses. // Output goes to config.Generator.AflOutputPath func StartBinaryGenerator() error { executable, err := setup() if err != nil { return fmt.Errorf("Failed binary generator setup: %s", err) } masterCmd := &exec.Command{ Name: "./afl-fuzz", Args: []string{"-i", config.Generator.FuzzSamples, "-o", config.Generator.AflOutputPath, "-m", "5000", "-t", "3000", "-M", "fuzzer0", "--", executable, "--src", "skp", "--skps", "@@", "--config", "8888"}, Dir: config.Generator.AflRoot, LogStdout: true, LogStderr: true, Env: []string{"AFL_SKIP_CPUFREQ=true"}, // Avoids a warning afl-fuzz spits out about dynamic scaling of cpu frequency } if config.Generator.WatchAFL { masterCmd.Stdout = os.Stdout } fuzzProcesses = append(fuzzProcesses, run(masterCmd)) fuzzCount := config.Generator.NumFuzzProcesses if fuzzCount <= 0 { // TODO(kjlubick): Make this actually an intelligent number based on the number of cores. fuzzCount = 10 } fuzzProcessCount = go_metrics.NewRegisteredCounter("afl_fuzz_process_count", go_metrics.DefaultRegistry) fuzzProcessCount.Inc(int64(fuzzCount)) for i := 1; i < fuzzCount; i++ { fuzzerName := fmt.Sprintf("fuzzer%d", i) slaveCmd := &exec.Command{ Name: "./afl-fuzz", Args: []string{"-i", config.Generator.FuzzSamples, "-o", config.Generator.AflOutputPath, "-m", "5000", "-t", "3000", "-S", fuzzerName, "--", executable, "--src", "skp", "--skps", "@@", "--config", "8888"}, Dir: config.Generator.AflRoot, LogStdout: true, LogStderr: true, Env: []string{"AFL_SKIP_CPUFREQ=true"}, // Avoids a warning afl-fuzz spits out about dynamic scaling of cpu frequency } fuzzProcesses = append(fuzzProcesses, run(slaveCmd)) } return nil }
func TestParsing(t *testing.T) { tile := tiling.NewTile() offset := 1 dm := loadDMResults(t) metricsProcessed := metrics.NewRegisteredCounter("testing.ingestion.processed", metrics.DefaultRegistry) addResultToTile(dm, tile, offset, metricsProcessed) if got, want := len(tile.Traces), 2; got != want { t.Errorf("Wrong number of Traces: Got %v Want %v", got, want) } tr := tile.Traces["x86_64:565:Debug:HD7770:ShuttleA:varied_text_clipped_no_lcd:Win8"].(*types.GoldenTrace) if got, want := tr.Values[1], "445aa63b2200baaba9b37fd5f80c0447"; got != want { t.Errorf("Digest wrong: Got %v Want %v", got, want) } if got, want := len(tr.Params()), 9; got != want { t.Errorf("Params wrong: Got %v Want %v", got, want) } if got, want := tr.Params()["ext"], "png"; got != want { t.Errorf("Extension not injected:: Got %v Want %v", got, want) } if got, want := int64(2), metricsProcessed.Count(); got != want { t.Errorf("Wrong number of points ingested: Got %v Want %v", got, want) } }
// Init initializes the module. func Init() { downloadSuccessCount = metrics.NewRegisteredCounter("gold.gsdownload.success", metrics.DefaultRegistry) downloadFailureCount = metrics.NewRegisteredCounter("gold.gsdownload.failiure", metrics.DefaultRegistry) }
func newCounter(name, suffix string) metrics.Counter { return metrics.NewRegisteredCounter("ingester."+name+".gauge."+suffix, metrics.DefaultRegistry) }
// tryInfoLink is the regex that matches URLs paths that are direct links to data about a single try. tryInfoLink = regexp.MustCompile("^/json/([a-f0-9]+)$") // workspaceLink is the regex that matches URLs paths for workspaces. workspaceLink = regexp.MustCompile("^/w/([a-z0-9-]+)$") // errorRE is the regex that matches compiler errors and extracts the line / column information. errorRE = regexp.MustCompile("^.*.cpp:(\\d+):(\\d+):\\s*(.*)") // paintDeclRE is the regex that matches paint declarations so we can set up fonts for it paintDeclRE = regexp.MustCompile("^\\s+SkPaint\\s+(\\S+)\\s*;") gitHash = "" gitInfo = "" requestsCounter = metrics.NewRegisteredCounter("requests", metrics.DefaultRegistry) ) // Command line flags. var ( configFilename = flag.String("config", "webtry.toml", "Configuration filename") graphiteServer = flag.String("graphite_server", "skia-monitoring:2003", "Where is Graphite metrics ingestion server running.") ) // lineNumbers adds #line numbering to the user's code. func LineNumbers(c string) string { lines := strings.Split(c, "\n") ret := []string{} for i, line := range lines { ret = append(ret, fmt.Sprintf("#line %d", i+1)) ret = append(ret, line)
func TestAddBenchDataToTile(t *testing.T) { // Load the sample data file as BenchData. _, filename, _, _ := runtime.Caller(0) r, err := os.Open(filepath.Join(filepath.Dir(filename), "testdata", "nano.json")) if err != nil { t.Fatal("Failed to open test file: ", err) } benchData, err := ParseBenchDataFromReader(r) if err != nil { t.Fatal("Failed to parse test file: ", err) } metricsProcessed := metrics.NewRegisteredCounter("testing.ingestion.processed", metrics.DefaultRegistry) // Create an empty Tile. tile := tiling.NewTile() tile.Scale = 0 tile.TileIndex = 0 offset := 1 testcases := []struct { key string value float64 subResult string }{ { key: "x86:GTX660:ShuttleA:Ubuntu12:DeferredSurfaceCopy_discardable_640_480:gpu", value: 0.1157132745098039, subResult: "min_ms", }, { key: "x86:GTX660:ShuttleA:Ubuntu12:memory_usage_0_0:meta:max_rss_mb", value: 858, subResult: "max_rss_mb", }, { key: "x86:GTX660:ShuttleA:Ubuntu12:src_pipe_global_weak_symbol:memory:bytes", value: 158, subResult: "bytes", }, { key: "x86:GTX660:ShuttleA:Ubuntu12:DeferredSurfaceCopy_nonDiscardable_640_480:8888", value: 2.855735, subResult: "min_ms", }, { key: "x86:GTX660:ShuttleA:Ubuntu12:DeferredSurfaceCopy_nonDiscardable_640_480:8888:bytes", value: 298888, subResult: "bytes", }, { key: "x86:GTX660:ShuttleA:Ubuntu12:DeferredSurfaceCopy_nonDiscardable_640_480:8888:ops", value: 3333, subResult: "ops", }, } // Do everything twice to ensure that we are idempotent. for i := 0; i < 2; i++ { // Add the BenchData to the Tile. addBenchDataToTile(benchData, tile, offset, metricsProcessed) // Test that the Tile has the right data. if got, want := len(tile.Traces), 13; got != want { t.Errorf("Wrong number of traces: Got %d Want %d", got, want) } for _, tc := range testcases { trace, ok := tile.Traces[tc.key] if !ok { t.Errorf("Missing expected key: %s", tc.key) } if got, want := trace.(*types.PerfTrace).Values[offset], tc.value; got != want { t.Errorf("Wrong value in trace: Got %v Want %v", got, want) } } trace := tile.Traces[testcases[0].key] // Validate the traces Params. expected := map[string]string{ "arch": "x86", "gpu": "GTX660", "model": "ShuttleA", "os": "Ubuntu12", "system": "UNIX", "test": "DeferredSurfaceCopy_discardable_640_480", "config": "gpu", "GL_RENDERER": "GeForce GTX 660/PCIe/SSE2", "GL_SHADING_LANGUAGE_VERSION": "4.40 NVIDIA via Cg compiler", "GL_VENDOR": "NVIDIA Corporation", "GL_VERSION": "4.4.0 NVIDIA 331.49", "source_type": "bench", "sub_result": "min_ms", } if got, want := len(trace.Params()), len(expected); got != want { t.Errorf("Params wrong length: Got %v Want %v", got, want) } for k, v := range expected { if got, want := trace.Params()[k], v; got != want { t.Errorf("Wrong params: Got %v Want %v", got, want) } } // Validate the Tiles ParamSet. if got, want := len(tile.ParamSet), len(expected)+2; got != want { t.Errorf("Wrong ParamSet length: Got %v Want %v", got, want) } for k, _ := range expected { if _, ok := tile.ParamSet[k]; !ok { t.Errorf("Missing from ParamSet: %s", k) } } // The new symbol table size options values should also show up in the ParamSet. for _, k := range []string{"path", "symbol"} { if _, ok := tile.ParamSet[k]; !ok { t.Errorf("Missing from ParamSet: %s", k) } } if got, want := len(tile.ParamSet["source_type"]), 1; got != want { t.Errorf("Wrong ParamSet for source_type: Got %v Want %v", got, want) } if got, want := tile.ParamSet["source_type"][0], "bench"; got != want { t.Errorf("Wrong ParamSet value: Got %v Want %v", got, want) } } if got, want := metricsProcessed.Count(), int64(26); got != want { t.Errorf("Wrong number of points ingested: Got %v Want %v", got, want) } // Now update one of the params for a trace and reingest and confirm that the // trace params get updated. benchData.Options["system"] = "Linux" addBenchDataToTile(benchData, tile, offset, metricsProcessed) if got, want := "Linux", tile.Traces[testcases[0].key].Params()["system"]; got != want { t.Errorf("Failed to update params: Got %v Want %v", got, want) } if got, want := metricsProcessed.Count(), int64(39); got != want { t.Errorf("Wrong number of points ingested: Got %v Want %v", got, want) } }
CLUSTER_SIZE = 50 CLUSTER_STDDEV = 0.001 // TRACKED_ITEM_URL_TEMPLATE is used to generate the URL that is // embedded in an issue. It is also used to search for issues linked to a // specific item (cluster). The format verb is to be replaced with the ID // of the tracked item. TRACKED_ITEM_URL_TEMPLATE = "https://perf.skia.org/cl/%d" ) var ( // The number of clusters with a status of "New". newClustersGauge = metrics.NewRegisteredGauge("alerting.new", metrics.DefaultRegistry) // The number of times we've successfully done alert clustering. runsCounter = metrics.NewRegisteredCounter("alerting.runs", metrics.DefaultRegistry) // How long it takes to do a clustering run. alertingLatency = metrics.NewRegisteredTimer("alerting.latency", metrics.DefaultRegistry) // tileBuilder is the tracedb.Builder where we load Tiles from. tileBuilder *tracedb.Builder ) // CombineClusters combines freshly found clusters with existing clusters. // // Algorithm: // Run clustering and pick out the "Interesting" clusters. // Compare all the Interesting clusters to all the existing relevant clusters, // where "relevant" clusters are ones whose Hash/timestamp of the step // exists in the current tile.