// NewTempRepo assumes the repo is called testrepo.zip and is in a directory // called testdata under the directory of the unit test that is calling it. // // The directory that was created is stored in TempRepo Path. func NewTempRepo() *TempRepo { tmpdir, err := ioutil.TempDir("", "skiaperf") if err != nil { glog.Fatalln("Failed to create testing Git repo:", err) } _, filename, _, _ := runtime.Caller(1) if err := unzip(filepath.Join(filepath.Dir(filename), "testdata", "testrepo.zip"), tmpdir); err != nil { glog.Fatalln("Failed to unzip testing Git repo:", err) } return &TempRepo{Dir: tmpdir} }
// monitorIssueTracker reads the counts for all the types of issues in the skia // issue tracker (code.google.com/p/skia) and stuffs the counts into Graphite. func monitorIssueTracker() { c := &http.Client{ Transport: &http.Transport{ Dial: dialTimeout, }, } if *useMetadata { *apikey = metadata.Must(metadata.ProjectGet(metadata.APIKEY)) } // Create a new metrics registry for the issue tracker metrics. addr, err := net.ResolveTCPAddr("tcp", *graphiteServer) if err != nil { glog.Fatalln("Failed to resolve the Graphite server: ", err) } issueRegistry := metrics.NewRegistry() go graphite.Graphite(issueRegistry, common.SAMPLE_PERIOD, "issues", addr) // IssueStatus has all the info we need to capture and record a single issue status. I.e. capture // the count of all issues with a status of "New". type IssueStatus struct { Name string Metric metrics.Gauge URL string } allIssueStatusLabels := []string{ "New", "Accepted", "Unconfirmed", "Started", "Fixed", "Verified", "Invalid", "WontFix", "Done", "Available", "Assigned", } issueStatus := []*IssueStatus{} for _, issueName := range allIssueStatusLabels { issueStatus = append(issueStatus, &IssueStatus{ Name: issueName, Metric: metrics.NewRegisteredGauge(strings.ToLower(issueName), issueRegistry), URL: "https://www.googleapis.com/projecthosting/v2/projects/skia/issues?fields=totalResults&key=" + *apikey + "&status=" + issueName, }) } liveness := imetrics.NewLiveness("issue-tracker") for _ = range time.Tick(ISSUE_TRACKER_PERIOD) { for _, issue := range issueStatus { resp, err := c.Get(issue.URL) jsonResp := map[string]int64{} dec := json.NewDecoder(resp.Body) if err := dec.Decode(&jsonResp); err != nil { glog.Warningf("Failed to decode JSON response: %s", err) util.Close(resp.Body) continue } issue.Metric.Update(jsonResp["totalResults"]) glog.Infof("Num Issues: %s - %d", issue.Name, jsonResp["totalResults"]) if err == nil && resp.Body != nil { util.Close(resp.Body) } } liveness.Update() } }
func main() { defer common.LogPanic() common.InitWithMetrics("probeserver", graphiteServer) client, err := auth.NewDefaultJWTServiceAccountClient("https://www.googleapis.com/auth/userinfo.email") if err != nil { glog.Fatalf("Failed to create client for talking to the issue tracker: %s", err) } go monitorIssueTracker(client) glog.Infoln("Looking for Graphite server.") addr, err := net.ResolveTCPAddr("tcp", *graphiteServer) if err != nil { glog.Fatalln("Failed to resolve the Graphite server: ", err) } glog.Infoln("Found Graphite server.") liveness := imetrics.NewLiveness("probes") // We have two sets of metrics, one for the probes and one for the probe // server itself. The server's metrics are handled by common.Init() probeRegistry := metrics.NewRegistry() go graphite.Graphite(probeRegistry, common.SAMPLE_PERIOD, *prefix, addr) // TODO(jcgregorio) Monitor config file and reload if it changes. cfg, err := readConfigFiles(*config) if err != nil { glog.Fatalln("Failed to read config file: ", err) } glog.Infoln("Successfully read config file.") // Register counters for each probe. for name, probe := range cfg { probe.failure = metrics.NewRegisteredGauge(name+".failure", probeRegistry) probe.latency = metrics.NewRegisteredGauge(name+".latency", probeRegistry) } // Create a client that uses our dialer with a timeout. c := &http.Client{ Transport: &http.Transport{ Dial: dialTimeout, }, } probeOneRound(cfg, c) for _ = range time.Tick(*runEvery) { probeOneRound(cfg, c) liveness.Update() } }
// monitorIssueTracker reads the counts for all the types of issues in the Skia // issue tracker (bugs.chromium.org/p/skia) and stuffs the counts into Graphite. func monitorIssueTracker(c *http.Client) { // Create a new metrics registry for the issue tracker metrics. addr, err := net.ResolveTCPAddr("tcp", *graphiteServer) if err != nil { glog.Fatalln("Failed to resolve the Graphite server: ", err) } issueRegistry := metrics.NewRegistry() go graphite.Graphite(issueRegistry, common.SAMPLE_PERIOD, "issues", addr) // IssueStatus has all the info we need to capture and record a single issue status. I.e. capture // the count of all issues with a status of "New". type IssueStatus struct { Name string Metric metrics.Gauge URL string } allIssueStatusLabels := []string{ "New", "Accepted", "Unconfirmed", "Started", "Fixed", "Verified", "Invalid", "WontFix", "Done", "Available", "Assigned", } issueStatus := []*IssueStatus{} for _, issueName := range allIssueStatusLabels { q := url.Values{} q.Set("fields", "totalResults") q.Set("status", issueName) issueStatus = append(issueStatus, &IssueStatus{ Name: issueName, Metric: metrics.NewRegisteredGauge(strings.ToLower(issueName), issueRegistry), URL: issues.MONORAIL_BASE_URL + "?" + q.Encode(), }) } liveness := imetrics.NewLiveness("issue-tracker") for _ = range time.Tick(ISSUE_TRACKER_PERIOD) { for _, issue := range issueStatus { resp, err := c.Get(issue.URL) if err != nil { glog.Errorf("Failed to retrieve response from %s: %s", issue.URL, err) continue } jsonResp := map[string]int64{} dec := json.NewDecoder(resp.Body) if err := dec.Decode(&jsonResp); err != nil { glog.Warningf("Failed to decode JSON response: %s", err) util.Close(resp.Body) continue } issue.Metric.Update(jsonResp["totalResults"]) glog.Infof("Num Issues: %s - %d", issue.Name, jsonResp["totalResults"]) if err == nil && resp.Body != nil { util.Close(resp.Body) } } liveness.Update() } }
// Migrates the database to the specified target version. Use DBVersion() to // retrieve the current version of the database. func (vdb *VersionedDB) Migrate(targetVersion int) (rv error) { if (targetVersion < 0) || (targetVersion > vdb.MaxDBVersion()) { glog.Fatalf("Target db version must be in range: [0 .. %d]", vdb.MaxDBVersion()) } currentVersion, err := vdb.DBVersion() if err != nil { return err } if currentVersion > vdb.MaxDBVersion() { glog.Fatalf("Version table is out of date with current DB version.") } if targetVersion == currentVersion { return nil } // start a transaction txn, err := vdb.DB.Begin() if err != nil { return err } defer func() { rv = CommitOrRollback(txn, rv) }() // run through the transactions runSteps := vdb.getMigrations(currentVersion, targetVersion) if len(runSteps) == 0 { glog.Fatalln("Unable to find migration steps.") } for _, step := range runSteps { for _, stmt := range step { glog.Infoln("EXECUTING: \n", stmt) if _, err = txn.Exec(stmt); err != nil { return err } } } // update the dbversion table if err = vdb.setDBVersion(txn, targetVersion); err != nil { return err } return nil }
func BenchmarkBigDataset(b *testing.B) { // Download the testdata and remove the testdata directory at the end. err := testutils.DownloadTestDataArchive(b, TEST_DATA_STORAGE_PATH, TEST_DATA_DIR) assert.Nil(b, err, "Unable to download testdata.") defer func() { util.LogErr(os.RemoveAll(TEST_DATA_DIR)) }() // Load the data fileInfos, err := ioutil.ReadDir(TEST_DATA_DIR) assert.Nil(b, err) results := make(chan interface{}, len(fileInfos)) var codec TestStructCodec counter := 0 for _, fi := range fileInfos { if strings.HasSuffix(fi.Name(), ".json") { go func(fName string) { f, err := os.Open(fName) if err != nil { glog.Fatalf("Unable to open file %s", fName) } content, err := ioutil.ReadAll(f) if err != nil { glog.Fatalf("Unable to read file %s", fName) } v, err := codec.Decode(content) if err != nil { glog.Fatalf("Unable to decode file %s", fName) } if _, ok := v.(*TestStruct); !ok { glog.Fatalln("Expected to get instance of TestStruct") } // Store the filepath in this field to use as cache key. ts := v.(*TestStruct) ts.PixelDiffFilePath = fName results <- ts }(filepath.Join("./benchdata", fi.Name())) counter++ } } groundTruth := make(map[string]interface{}, counter) cache := NewRedisLRUCache("localhost:6379", 1, "di", TestStructCodec(0)) rlruCache := cache.(*RedisLRUCache) rlruCache.Purge() for i := 0; i < counter; i++ { ret := <-results ts := ret.(*TestStruct) groundTruth[ts.PixelDiffFilePath] = ret } glog.Infof("Done importing %d files. Starting bench.", len(groundTruth)) b.ResetTimer() for k, v := range groundTruth { cache.Add(k, v) } assert.Equal(b, len(groundTruth), cache.Len()) counter = 0 for k, v := range groundTruth { found, ok := cache.Get(k) assert.True(b, ok) assert.Equal(b, v, found) counter++ // if (counter % 1000) == 0 { // glog.Infof("Checked %d records.", counter) // } } b.StopTimer() // Cleanup code that should not be timed but deserves to be tested. rlruCache.Purge() assert.Equal(b, 0, cache.Len()) }
func (g *logger) Fatalln(args ...interface{}) { glog.Fatalln(args...) }
// Cleanup cleans up the temporary repo. func (t *TempRepo) Cleanup() { if err := os.RemoveAll(t.Dir); err != nil { glog.Fatalln("Failed to clean up after test:", err) } }