func main() { flag.Usage = printUsage common.Init() args := flag.Args() if len(args) == 0 { printUsage() os.Exit(1) } store := filetilestore.NewFileTileStore(*tileDir, *dataset, 0) switch args[0] { case VALIDATE: if !validator.ValidateDataset(store, *verbose, *echoHashes) { glog.Fatal("FAILED Validation.") } case DUMP_COMMITS: checkArgs(args, DUMP_COMMITS, 1) nCommits := parseInt(args[1]) dumpCommits(store, nCommits) case MD5: checkArgs(args, MD5, 2) hash := args[1] nCommits := parseInt(args[2]) md5Commits(store, hash, nCommits) case JSON: checkArgs(args, JSON, 3) nCommits := parseInt(args[1]) nTraces := parseInt(args[2]) fname := args[3] dumpTileToJSON(store, nCommits, nTraces, fname) default: glog.Fatalf("Unknow command: %s", args[0]) } }
// GetTileStoreFromEnv looks at the TEST_TILE_DIR environement variable for the // name of directory that contains tiles. If it's defined it will return a // TileStore instance. If the not the calling test will fail. func GetTileStoreFromEnv(t assert.TestingT) tiling.TileStore { // Get the TEST_TILE environment variable that points to the // tile to read. tileDir := os.Getenv("TEST_TILE_DIR") assert.NotEqual(t, "", tileDir, "Please define the TEST_TILE_DIR environment variable to point to a live tile store.") return filetilestore.NewFileTileStore(tileDir, config.DATASET_GOLD, 2*time.Minute) }
func Init() { rand.Seed(time.Now().UnixNano()) if *resourcesDir == "" { _, filename, _, _ := runtime.Caller(0) *resourcesDir = filepath.Join(filepath.Dir(filename), "../..") } indexTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/index.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) clusterTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/clusters.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) alertsTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/alerting.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) clTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/cl.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) activityTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/activitylog.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) compareTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/compare.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) helpTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/help.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) nanoTileStore = filetilestore.NewFileTileStore(*tileStoreDir, config.DATASET_NANO, 2*time.Minute) var err error git, err = gitinfo.CloneOrUpdate(*gitRepoURL, *gitRepoDir, false) if err != nil { glog.Fatal(err) } }
func TestIngestCommits(t *testing.T) { // Get a known Git repo with 34 commits in it setup. tr := util.NewTempRepo() defer tr.Cleanup() // Create a temporary place for a filetilestore. tileDir, err := ioutil.TempDir("", "skiaperf") if err != nil { t.Fatal("Failed to create testing Tile dir: ", err) } defer testutils.RemoveAll(t, tileDir) git, err := gitinfo.NewGitInfo(filepath.Join(tr.Dir, "testrepo"), false, false) if err != nil { glog.Fatalf("Failed loading Git info: %s\n", err) } // Construct an Ingestor and have it UpdateCommitInfo. i, err := ingester.NewIngester(git, tileDir, config.DATASET_NANO, NewNanoBenchIngester(), 1, time.Second, map[string]string{}, "", "") if err != nil { t.Fatal("Failed to create ingester:", err) } if err := i.UpdateCommitInfo(false); err != nil { t.Fatal("Failed to ingest commits:", err) } // Validate the generated Tiles. store := filetilestore.NewFileTileStore(tileDir, config.DATASET_NANO, 0) if !validator.ValidateDataset(store, false, false) { t.Error("Failed to validate the created Tiles:", err) } // Test TileTracker while were here. tt := ingester.NewTileTracker(store, i.HashToNumber()) err = tt.Move("7a6fe813047d1a84107ef239e81f310f27861473") if err != nil { t.Fatal(err) } if got, want := tt.LastTileNum(), 2; got != want { t.Errorf("Move failed, wrong tile: Got %d Want %d", got, want) } err = tt.Move("87709bc360f35de52c2f2bc2fc70962fb234db2d") if err != nil { t.Fatal(err) } if got, want := tt.LastTileNum(), 2; got != want { t.Errorf("Move failed, wrong tile: Got %d Want %d", got, want) } }
func main() { common.Init() grpclog.Init() // Load the 0,-1 tile. tilestore := filetilestore.NewFileTileStore(*tilestore, *dataset, time.Hour) tile, err := tilestore.Get(0, -1) if err != nil { glog.Fatalf("Failed to load tile: %s", err) } // Trim to the last 50 commits. begin := 0 end := tile.LastCommitIndex() if end >= 49 { begin = end - 49 } glog.Infof("Loaded Tile") tile, err = tile.Trim(begin, end) // Set up a connection to the server. conn, err := grpc.Dial(*address, grpc.WithInsecure()) if err != nil { glog.Fatalf("did not connect: %v", err) } defer util.Close(conn) // Build a TraceService client. builder := ptypes.PerfTraceBuilder if *gold { builder = types.GoldenTraceBuilder } ts, err := db.NewTraceServiceDB(conn, builder) if err != nil { log.Fatalf("Failed to create db.DB: %s", err) } glog.Infof("Opened tracedb") if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { glog.Fatalf("Failed to open profiling file: %s", err) } if err := pprof.StartCPUProfile(f); err != nil { glog.Fatalf("Failed to start profiling: %s", err) } defer pprof.StopCPUProfile() _main(tile, ts) } else { _main(tile, ts) } }
func BenchmarkStatusWatcher(b *testing.B) { // Get the TEST_TILE environment variable that points to the // tile to read. tileDir := os.Getenv("TEST_TILE_DIR") assert.NotEqual(b, "", tileDir, "Please define the TEST_TILE_DIR environment variable to point to a live tile store.") tileStore := filetilestore.NewFileTileStore(tileDir, config.DATASET_GOLD, 2*time.Minute) storages := &storage.Storage{ TileStore: tileStore, } // Load the tile into memory and reset the timer to avoid measuring // disk load time. _, err := storages.GetLastTileTrimmed(true) assert.Nil(b, err) b.ResetTimer() testStatusWatcher(b, tileStore) }
// NewIngester creates an Ingester given the repo and tilestore specified. func NewIngester(git *gitinfo.GitInfo, tileStoreDir string, datasetName string, ri ResultIngester, nCommits int, minDuration time.Duration, config map[string]string, statusDir, metricName string) (*Ingester, error) { var storageService *storage.Service = nil var err error = nil // check if the ingestion source is coming from Google Storage if config["GSDir"] != "" { storageService, err = storage.New(client) if err != nil { return nil, fmt.Errorf("Failed to create interace to Google Storage: %s\n", err) } } var processedFiles *leveldb.DB = nil if statusDir != "" { statusDir = fileutil.Must(fileutil.EnsureDirExists(filepath.Join(statusDir, datasetName))) processedFiles, err = leveldb.OpenFile(filepath.Join(statusDir, "processed_files.ldb"), nil) if err != nil { glog.Fatalf("Unable to open status db at %s: %s", filepath.Join(statusDir, "processed_files.ldb"), err) } } i := &Ingester{ git: git, tileStore: filetilestore.NewFileTileStore(tileStoreDir, datasetName, -1), storage: storageService, hashToNumber: map[string]int{}, resultIngester: ri, config: config, datasetName: datasetName, elapsedTimePerUpdate: newGauge(metricName, "update"), metricsProcessed: newCounter(metricName, "processed"), lastSuccessfulUpdate: time.Now(), timeSinceLastSucceessfulUpdate: newGauge(metricName, "time-since-last-successful-update"), nCommits: nCommits, minDuration: minDuration, processedFiles: processedFiles, } i.timeSinceLastSucceessfulUpdate.Update(int64(time.Since(i.lastSuccessfulUpdate).Seconds())) go func() { for _ = range time.Tick(time.Minute) { i.timeSinceLastSucceessfulUpdate.Update(int64(time.Since(i.lastSuccessfulUpdate).Seconds())) } }() return i, nil }
func main() { common.Init() grpclog.Init() // Load the 0,-1 tile. fileTilestore := filetilestore.NewFileTileStore(*tilestore, *dataset, time.Hour) tile, err := fileTilestore.Get(0, -1) if err != nil { glog.Fatalf("Failed to load tile: %s", err) } // Trim to the last 50 commits. begin := 0 end := tile.LastCommitIndex() if end >= 49 { begin = end - 49 } glog.Infof("Loaded Tile") tile, err = tile.Trim(begin, end) // Set up a connection to the server. conn, err := grpc.Dial(*address, grpc.WithInsecure()) if err != nil { glog.Fatalf("did not connect: %v", err) } defer util.Close(conn) // Build a TraceService client. builder := ptypes.PerfTraceBuilder isGold := *dataset == gconfig.DATASET_GOLD if isGold { builder = gtypes.GoldenTraceBuilder } glog.Infof("START load tracedb.") ts, err := db.NewTraceServiceDB(conn, builder) if err != nil { log.Fatalf("Failed to create db.DB: %s", err) } glog.Infof("DONE load tracedb.") if err = diff(tile, ts, isGold); err != nil { glog.Fatalf("Diff error: %s", err) } }
func TestMerging(t *testing.T) { randomPath, err := ioutil.TempDir("", "mergedtiles_test") if err != nil { t.Fatalf("Failing to create temporary directory: %s", err) return } defer testutils.RemoveAll(t, randomPath) // The test file needs to be created in the 0/ subdirectory of the path. randomFullPath := filepath.Join(randomPath, "test", "0") if err := os.MkdirAll(randomFullPath, 0775); err != nil { t.Fatalf("Failing to create temporary subdirectory: %s", err) return } fileName := filepath.Join(randomFullPath, "0000.gob") makeFakeTile(t, fileName, &tiling.Tile{ Traces: map[string]tiling.Trace{ "test": &types.PerfTrace{ Values: []float64{0.0, 1.4, -2}, Params_: map[string]string{"test": "parameter"}, }, }, ParamSet: map[string][]string{ "test": []string{"parameter"}, }, Commits: []*tiling.Commit{ &tiling.Commit{ CommitTime: 42, Hash: "ffffffffffffffffffffffffffffffffffffffff", Author: "*****@*****.**", }, &tiling.Commit{ CommitTime: 43, Hash: "eeeeeeeeeee", Author: "*****@*****.**", }, &tiling.Commit{ CommitTime: 44, Hash: "aaaaaaaaaaa", Author: "*****@*****.**", }, }, Scale: 0, TileIndex: 0, }) ts := filetilestore.NewFileTileStore(randomPath, "test", 10*time.Millisecond) m := NewMergedTiles(ts, 2) _, err = m.Get(0, 0, 1) if err == nil { t.Fatalf("Failed to error when requesting a merged tile that doesn't exist: %s", err) } fileName = filepath.Join(randomFullPath, "0001.gob") makeFakeTile(t, fileName, &tiling.Tile{ Traces: map[string]tiling.Trace{}, ParamSet: map[string][]string{ "test": []string{"parameter"}, }, Commits: []*tiling.Commit{ &tiling.Commit{ CommitTime: 45, Hash: "0000000000000000000000000000000000000000", Author: "*****@*****.**", }, }, Scale: 0, TileIndex: 0, }) _, err = m.Get(0, 0, 1) if err != nil { t.Fatalf("Failed to error when requesting a merged tile that doesn't exist: %s", err) } }
func main() { defer common.LogPanic() var err error mainTimer := timer.New("main init") // Setup DB flags. dbConf := database.ConfigFromFlags(db.PROD_DB_HOST, db.PROD_DB_PORT, database.USER_RW, db.PROD_DB_NAME, db.MigrationSteps()) // Global init to initialize common.InitWithMetrics("skiacorrectness", graphiteServer) v, err := skiaversion.GetVersion() if err != nil { glog.Fatalf("Unable to retrieve version: %s", err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) // Enable the memory profiler if memProfile was set. // TODO(stephana): This should be moved to a HTTP endpoint that // only responds to internal IP addresses/ports. if *memProfile > 0 { time.AfterFunc(*memProfile, func() { glog.Infof("Writing Memory Profile") f, err := ioutil.TempFile("./", "memory-profile") if err != nil { glog.Fatalf("Unable to create memory profile file: %s", err) } if err := pprof.WriteHeapProfile(f); err != nil { glog.Fatalf("Unable to write memory profile file: %v", err) } util.Close(f) glog.Infof("Memory profile written to %s", f.Name()) os.Exit(0) }) } if *cpuProfile > 0 { glog.Infof("Writing CPU Profile") f, err := ioutil.TempFile("./", "cpu-profile") if err != nil { glog.Fatalf("Unable to create cpu profile file: %s", err) } if err := pprof.StartCPUProfile(f); err != nil { glog.Fatalf("Unable to write cpu profile file: %v", err) } time.AfterFunc(*cpuProfile, func() { pprof.StopCPUProfile() util.Close(f) glog.Infof("CPU profile written to %s", f.Name()) os.Exit(0) }) } // Init this module. Init() // Initialize submodules. filediffstore.Init() // Set up login // TODO (stephana): Factor out to go/login/login.go and removed hard coded // values. var cookieSalt = "notverysecret" var clientID = "31977622648-ubjke2f3staq6ouas64r31h8f8tcbiqp.apps.googleusercontent.com" var clientSecret = "rK-kRY71CXmcg0v9I9KIgWci" var useRedirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port) if !*local { cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT)) clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID)) clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET)) useRedirectURL = *redirectURL } login.Init(clientID, clientSecret, useRedirectURL, cookieSalt, login.DEFAULT_SCOPE, *authWhiteList, *local) // get the Oauthclient if necessary. client := getOAuthClient(*oauthCacheFile) // Set up the cache implementation to use. cacheFactory := filediffstore.MemCacheFactory if *redisHost != "" { cacheFactory = func(uniqueId string, codec util.LRUCodec) util.LRUCache { return redisutil.NewRedisLRUCache(*redisHost, *redisDB, uniqueId, codec) } } // Get the expecations storage, the filediff storage and the tilestore. diffStore, err := filediffstore.NewFileDiffStore(client, *imageDir, *gsBucketName, filediffstore.DEFAULT_GS_IMG_DIR_NAME, cacheFactory, filediffstore.RECOMMENDED_WORKER_POOL_SIZE) if err != nil { glog.Fatalf("Allocating DiffStore failed: %s", err) } if !*local { if err := dbConf.GetPasswordFromMetadata(); err != nil { glog.Fatal(err) } } vdb, err := dbConf.NewVersionedDB() if err != nil { glog.Fatal(err) } if !vdb.IsLatestVersion() { glog.Fatal("Wrong DB version. Please updated to latest version.") } digestStore, err := digeststore.New(*storageDir) if err != nil { glog.Fatal(err) } eventBus := eventbus.New(nil) storages = &storage.Storage{ DiffStore: diffStore, ExpectationsStore: expstorage.NewCachingExpectationStore(expstorage.NewSQLExpectationStore(vdb), eventBus), IgnoreStore: ignore.NewSQLIgnoreStore(vdb), TileStore: filetilestore.NewFileTileStore(*tileStoreDir, config.DATASET_GOLD, 2*time.Minute), DigestStore: digestStore, NCommits: *nCommits, EventBus: eventBus, TrybotResults: trybot.NewTrybotResultStorage(vdb), RietveldAPI: rietveld.New(*rietveldURL, nil), } if err := history.Init(storages, *nTilesToBackfill); err != nil { glog.Fatalf("Unable to initialize history package: %s", err) } if blamer, err = blame.New(storages); err != nil { glog.Fatalf("Unable to create blamer: %s", err) } if err := ignore.Init(storages.IgnoreStore); err != nil { glog.Fatalf("Failed to start monitoring for expired ignore rules: %s", err) } // Enable the experimental features. tallies, err = tally.New(storages) if err != nil { glog.Fatalf("Failed to build tallies: %s", err) } paramsetSum = paramsets.New(tallies, storages) if !*local { *issueTrackerKey = metadata.Must(metadata.ProjectGet(metadata.APIKEY)) } issueTracker = issues.NewIssueTracker(*issueTrackerKey) summaries, err = summary.New(storages, tallies, blamer) if err != nil { glog.Fatalf("Failed to build summary: %s", err) } statusWatcher, err = status.New(storages) if err != nil { glog.Fatalf("Failed to initialize status watcher: %s", err) } imgFS := NewURLAwareFileServer(*imageDir, IMAGE_URL_PREFIX) pathToURLConverter = imgFS.GetURL if err := warmer.Init(storages, summaries, tallies); err != nil { glog.Fatalf("Failed to initialize the warmer: %s", err) } mainTimer.Stop() // Everything is wired up at this point. Run the self tests if requested. if *selfTest { search.SelfTest(storages, tallies, blamer, paramsetSum) } router := mux.NewRouter() // Set up the resource to serve the image files. router.PathPrefix(IMAGE_URL_PREFIX).Handler(imgFS.Handler) // New Polymer based UI endpoints. router.PathPrefix("/res/").HandlerFunc(makeResourceHandler()) // TODO(stephana): Remove the 'poly' prefix from all the handlers and clean // up main2.go by either merging it it into main.go or making it clearer that // it contains all the handlers. Make it clearer what variables are shared // between the different file. // All the handlers will be prefixed with poly to differentiate it from the // angular code until the angular code is removed. router.HandleFunc(OAUTH2_CALLBACK_PATH, login.OAuth2CallbackHandler) router.HandleFunc("/", byBlameHandler).Methods("GET") router.HandleFunc("/list", templateHandler("list.html")).Methods("GET") router.HandleFunc("/_/details", polyDetailsHandler).Methods("GET") router.HandleFunc("/_/diff", polyDiffJSONDigestHandler).Methods("GET") router.HandleFunc("/_/hashes", polyAllHashesHandler).Methods("GET") router.HandleFunc("/_/ignores", polyIgnoresJSONHandler).Methods("GET") router.HandleFunc("/_/ignores/add/", polyIgnoresAddHandler).Methods("POST") router.HandleFunc("/_/ignores/del/{id}", polyIgnoresDeleteHandler).Methods("POST") router.HandleFunc("/_/ignores/save/{id}", polyIgnoresUpdateHandler).Methods("POST") router.HandleFunc("/_/list", polyListTestsHandler).Methods("GET") router.HandleFunc("/_/paramset", polyParamsHandler).Methods("GET") router.HandleFunc("/_/nxn", nxnJSONHandler).Methods("GET") // TODO(stephana): Once /_/search3 is stable it will replace /_/search and the // /search*.html pages will be consolidated into one. router.HandleFunc("/_/search", polySearchJSONHandler).Methods("GET") router.HandleFunc("/_/search3", search3JSONHandler).Methods("GET") router.HandleFunc("/_/status/{test}", polyTestStatusHandler).Methods("GET") router.HandleFunc("/_/test", polyTestHandler).Methods("POST") router.HandleFunc("/_/triage", polyTriageHandler).Methods("POST") router.HandleFunc("/_/triagelog", polyTriageLogHandler).Methods("GET") router.HandleFunc("/_/triagelog/undo", triageUndoHandler).Methods("POST") router.HandleFunc("/_/failure", failureListJSONHandler).Methods("GET") router.HandleFunc("/_/failure/clear", failureClearJSONHandler).Methods("POST") router.HandleFunc("/_/trybot", listTrybotsJSONHandler).Methods("GET") router.HandleFunc("/byblame", byBlameHandler).Methods("GET") router.HandleFunc("/cluster", templateHandler("cluster.html")).Methods("GET") router.HandleFunc("/search2", search2Handler).Methods("GET") router.HandleFunc("/cmp/{test}", templateHandler("compare.html")).Methods("GET") router.HandleFunc("/detail", templateHandler("single.html")).Methods("GET") router.HandleFunc("/diff", templateHandler("diff.html")).Methods("GET") router.HandleFunc("/help", templateHandler("help.html")).Methods("GET") router.HandleFunc("/ignores", templateHandler("ignores.html")).Methods("GET") router.HandleFunc("/loginstatus/", login.StatusHandler) router.HandleFunc("/logout/", login.LogoutHandler) router.HandleFunc("/search", templateHandler("search.html")).Methods("GET") router.HandleFunc("/triagelog", templateHandler("triagelog.html")).Methods("GET") router.HandleFunc("/trybot", templateHandler("trybot.html")).Methods("GET") router.HandleFunc("/failures", templateHandler("failures.html")).Methods("GET") // Add the necessary middleware and have the router handle all requests. // By structuring the middleware this way we only log requests that are // authenticated. rootHandler := util.LoggingGzipRequestResponse(router) if *forceLogin { rootHandler = login.ForceAuth(rootHandler, OAUTH2_CALLBACK_PATH) } // The polyStatusHandler is being polled, so we exclude it from logging. http.HandleFunc("/_/trstatus", polyStatusHandler) http.Handle("/", rootHandler) // Start the server glog.Infoln("Serving on http://127.0.0.1" + *port) glog.Fatal(http.ListenAndServe(*port, nil)) }