func main() { defer common.LogPanic() common.Init() if flag.NArg() != 2 { log.Fatal("Usage: imagediff [--out filename] imagepath1.png imagepath2.png\n") } a, err := diff.OpenImage(flag.Arg(0)) if err != nil { log.Fatal(err) } b, err := diff.OpenImage(flag.Arg(1)) if err != nil { log.Fatal(err) } metrics, d := diff.Diff(a, b) fmt.Printf("Dimensions are different: %v\n", metrics.DimDiffer) fmt.Printf("Number of pixels different: %v\n", metrics.NumDiffPixels) fmt.Printf("Pixel diff percent: %v\n", metrics.PixelDiffPercent) if *out == "" { return } else { fmt.Println("Writing image diff.") } f, err := os.Create(*out) if err != nil { log.Fatal(err) } if err := png.Encode(f, d); err != nil { log.Fatal(err) } }
func main() { defer common.LogPanic() common.Init() if logDirFlag := flag.Lookup("log_dir"); logDirFlag != nil { logDir = logDirFlag.Value.String() } if *dryRun { exec.SetRunForTesting(func(command *exec.Command) error { glog.Infof("dry_run: %s", exec.DebugString(command)) return nil }) } if *local { frontend.InitForTesting("http://localhost:8000/") } else { frontend.MustInit() } workerHealthTick := time.Tick(*workerHealthCheckInterval) pollTick := time.Tick(*pollInterval) // Run immediately, since pollTick will not fire until after pollInterval. pollAndExecOnce() for { select { case <-workerHealthTick: doWorkerHealthCheck() case <-pollTick: pollAndExecOnce() } } }
func main() { defer common.LogPanic() common.Init() args := flag.Args() if len(args) != 2 { flag.Usage() os.Exit(1) } bucket, prefix := args[0], args[1] v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus := eventbus.New(globalEventBus) eventBus.SubscribeAsync(event.StorageEvent(bucket, prefix), func(evData interface{}) { data := evData.(*event.GoogleStorageEventData) glog.Infof("Google Storage notification from bucket\n %s: %s : %s", data.Updated, data.Bucket, data.Name) }) select {} }
func main() { common.Init() v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus := eventbus.New(globalEventBus) // Send events every so often. for _ = range time.Tick(2 * time.Second) { evData := &event.GoogleStorageEventData{ Bucket: "test-bucket", Name: "test-name", Updated: time.Now().String(), } eventBus.Publish(event.GLOBAL_GOOGLE_STORAGE, evData) glog.Infof("Sent Event: %#v ", evData) } }
func main() { flag.Usage = printUsage common.Init() args := flag.Args() if len(args) == 0 { printUsage() os.Exit(1) } store := filetilestore.NewFileTileStore(*tileDir, *dataset, 0) switch args[0] { case VALIDATE: if !validator.ValidateDataset(store, *verbose, *echoHashes) { glog.Fatal("FAILED Validation.") } case DUMP_COMMITS: checkArgs(args, DUMP_COMMITS, 1) nCommits := parseInt(args[1]) dumpCommits(store, nCommits) case MD5: checkArgs(args, MD5, 2) hash := args[1] nCommits := parseInt(args[2]) md5Commits(store, hash, nCommits) case JSON: checkArgs(args, JSON, 3) nCommits := parseInt(args[1]) nTraces := parseInt(args[2]) fname := args[3] dumpTileToJSON(store, nCommits, nTraces, fname) default: glog.Fatalf("Unknow command: %s", args[0]) } }
func main() { // Set up flags. dbConf := database.ConfigFromFlags(buildbot.PROD_DB_HOST, buildbot.PROD_DB_PORT, database.USER_ROOT, buildbot.PROD_DB_NAME, buildbot.MigrationSteps()) // Global init to initialize glog and parse arguments. common.Init() if err := dbConf.PromptForPassword(); err != nil { glog.Fatal(err) } vdb, err := dbConf.NewVersionedDB() if err != nil { glog.Fatal(err) } // Get the current database version maxDBVersion := vdb.MaxDBVersion() glog.Infof("Latest database version: %d", maxDBVersion) dbVersion, err := vdb.DBVersion() if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } glog.Infof("Current database version: %d", dbVersion) if dbVersion < maxDBVersion { glog.Infof("Migrating to version: %d", maxDBVersion) err = vdb.Migrate(maxDBVersion) if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } } glog.Infoln("Database migration finished.") }
func main() { defer common.LogPanic() common.Init() args := flag.Args() if len(args) != 3 { glog.Errorf("Expected arguments: branch target buildID") glog.Errorf("i.e.: git_master-skia razor-userdebug 1772442") os.Exit(1) } // Set the arguments necessary to lookup the git hash. branch := args[0] target := args[1] buildID := args[2] glog.Infof("Branch, target, buildID: %s, %s, %s", branch, target, buildID) // Set up the oauth client. var client *http.Client var err error // In this case we don't want a backoff transport since the Apiary backend // seems to fail a lot, so we basically want to fall back to polling if a // call fails. transport := &http.Transport{ Dial: util.DialTimeout, } if *local { // Use a local client secret file to load data. client, err = auth.InstalledAppClient(OAUTH_CACHE_FILEPATH, CLIENT_SECRET_FILEPATH, transport, androidbuildinternal.AndroidbuildInternalScope, storage.CloudPlatformScope) if err != nil { glog.Fatalf("Unable to create installed app oauth client:%s", err) } } else { // Use compute engine service account. client = auth.GCEServiceAccountClient(transport) } f, err := androidbuild.New("/tmp/android-gold-ingest", client) if err != nil { glog.Fatalf("Failed to construct client: %s", err) } for { r, err := f.Get(branch, target, buildID) if err != nil { glog.Errorf("Failed to get requested info: %s", err) time.Sleep(1 * time.Minute) continue } if r != nil { glog.Infof("Successfully found: %#v", *r) } time.Sleep(1 * time.Minute) } }
func main() { rand.Seed(time.Now().Unix()) // Grab the first argument off of os.Args, the command, before we call flag.Parse. if len(os.Args) < 2 { Usage() return } cmd := os.Args[1] os.Args = append([]string{os.Args[0]}, os.Args[2:]...) // Now parge the flags. common.Init() // Set up a connection to the server. conn, err := grpc.Dial(*address, grpc.WithInsecure()) if err != nil { glog.Fatalf("did not connect: %v", err) } defer util.Close(conn) client := traceservice.NewTraceServiceClient(conn) switch cmd { case "ls": list(client) case "ping": ping(client) case "count": count(client) case "sample": sample(client) default: fmt.Printf("Unknown command: %s\n", cmd) Usage() } }
func main() { common.Init() v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus := eventbus.New(globalEventBus) eventBus.SubscribeAsync(event.GLOBAL_GOOGLE_STORAGE, func(evData interface{}) { data := evData.(*event.GoogleStorageEventData) glog.Infof("Google Storage notification from bucket\n %s: %s : %s", data.Updated, data.Bucket, data.Name) }) select {} }
func main() { common.Init() grpclog.Init() // Set up a connection to the server. conn, err := grpc.Dial(*address, grpc.WithInsecure()) if err != nil { glog.Fatalf("did not connect: %v", err) } defer util.Close(conn) // Build a TraceService client. builder := ptypes.PerfTraceBuilder if *gold { builder = types.GoldenTraceBuilder } ts, err := db.NewTraceServiceDB(conn, builder) if err != nil { log.Fatalf("Failed to create db.DB: %s", err) } glog.Infof("Opened tracedb") if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { glog.Fatalf("Failed to open profiling file: %s", err) } if err := pprof.StartCPUProfile(f); err != nil { glog.Fatalf("Failed to start profiling: %s", err) } defer pprof.StopCPUProfile() _main(ts) } else { _main(ts) } }
func main() { common.Init() args := flag.Args() if err := RunApp(ADBFromFlags(), *app, args); err != nil { glog.Fatal(err) } }
func main() { defer common.LogPanic() common.Init() if *emailTokenPath == "" { glog.Error("Must specify --email_token_path") return } defer glog.Flush() for _, shiftType := range allShiftTypes { res, err := http.Get(shiftType.nextSheriffEndpoint) if err != nil { glog.Fatalf("Could not HTTP Get: %s", err) } defer util.Close(res.Body) var jsonType map[string]interface{} if err := json.NewDecoder(res.Body).Decode(&jsonType); err != nil { glog.Fatalf("Could not unmarshal JSON: %s", err) } sheriffEmail, _ := jsonType["username"].(string) if sheriffEmail == NO_SHERIFF { glog.Infof("Skipping emailing %s because %s was specified", shiftType.shiftName, NO_SHERIFF) continue } sheriffUsername := strings.Split(string(sheriffEmail), "@")[0] emailTemplateParsed := template.Must(template.New("sheriff_email").Parse(EMAIL_TEMPLATE)) emailBytes := new(bytes.Buffer) if err := emailTemplateParsed.Execute(emailBytes, struct { SheriffName string SheriffType string SheriffSchedules string SheriffDoc string ScheduleStart string ScheduleEnd string }{ SheriffName: sheriffUsername, SheriffType: shiftType.shiftName, SheriffSchedules: shiftType.schedulesLink, SheriffDoc: shiftType.documentationLink, ScheduleStart: jsonType["schedule_start"].(string), ScheduleEnd: jsonType["schedule_end"].(string), }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } emailSubject := fmt.Sprintf("%s is the next %s", sheriffUsername, shiftType.shiftName) if err := sendEmail([]string{sheriffEmail, EXTRA_RECIPIENT}, emailSubject, emailBytes.String()); err != nil { glog.Fatalf("Error sending email to sheriff: %s", err) } } }
func main() { common.Init() out, err := util.SSH(*cmd, util.Slaves, *timeout) if err != nil { glog.Fatal(err) } if *printOutput { for k, v := range out { fmt.Printf("\n=====%s=====\n%s\n", k, v) } } }
func main() { defer common.LogPanic() // Set up flags. dbConf := db.DBConfigFromFlags() // Global init to initialize glog and parse arguments. common.Init() if *promptPassword { if err := dbConf.PromptForPassword(); err != nil { glog.Fatal(err) } } if !*local { if err := dbConf.GetPasswordFromMetadata(); err != nil { glog.Fatal(err) } } vdb, err := dbConf.NewVersionedDB() if err != nil { glog.Fatal(err) } if *targetVersion < 0 { // Get the current database version maxDBVersion := vdb.MaxDBVersion() glog.Infof("Latest database version: %d", maxDBVersion) dbVersion, err := vdb.DBVersion() if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } glog.Infof("Current database version: %d", dbVersion) if dbVersion < maxDBVersion { glog.Infof("Migrating to version: %d", maxDBVersion) err = vdb.Migrate(maxDBVersion) if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } } } else { glog.Infof("Migrating to version: %d", *targetVersion) err = vdb.Migrate(*targetVersion) if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } } glog.Infoln("Database migration finished.") }
func main() { common.Init() grpclog.Init() // Load the 0,-1 tile. tilestore := filetilestore.NewFileTileStore(*tilestore, *dataset, time.Hour) tile, err := tilestore.Get(0, -1) if err != nil { glog.Fatalf("Failed to load tile: %s", err) } // Trim to the last 50 commits. begin := 0 end := tile.LastCommitIndex() if end >= 49 { begin = end - 49 } glog.Infof("Loaded Tile") tile, err = tile.Trim(begin, end) // Set up a connection to the server. conn, err := grpc.Dial(*address, grpc.WithInsecure()) if err != nil { glog.Fatalf("did not connect: %v", err) } defer util.Close(conn) // Build a TraceService client. builder := ptypes.PerfTraceBuilder if *gold { builder = types.GoldenTraceBuilder } ts, err := db.NewTraceServiceDB(conn, builder) if err != nil { log.Fatalf("Failed to create db.DB: %s", err) } glog.Infof("Opened tracedb") if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { glog.Fatalf("Failed to open profiling file: %s", err) } if err := pprof.StartCPUProfile(f); err != nil { glog.Fatalf("Failed to start profiling: %s", err) } defer pprof.StopCPUProfile() _main(tile, ts) } else { _main(tile, ts) } }
func main() { defer common.LogPanic() common.Init() frontend.MustInit() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&admin_tasks.RecreateWebpageArchivesUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Capture archives", util.GetMasterLogLink(*runID), "")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Capture archives on Workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } cmd := []string{ fmt.Sprintf("cd %s;", util.CtTreeDir), "git pull;", "make all;", // The main command that runs capture_archives on all workers. fmt.Sprintf("DISPLAY=:0 capture_archives --worker_num=%s --log_dir=%s --log_id=%s --pageset_type=%s --chromium_build=%s;", util.WORKER_NUM_KEYWORD, util.GLogDir, *runID, *pagesetType, *chromiumBuild), } _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, util.CAPTURE_ARCHIVES_TIMEOUT) if err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } *taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() common.Init() var err error var wg sync.WaitGroup // Make a channel to deliver work. targetCh := make(chan Target) // Record the latency measurements in millis for each request. latencySamples := make([]float64, 0) latencies := make(chan float64) go func() { for m := range latencies { latencySamples = append(latencySamples, m) } }() err = startWorkers(targetCh, latencies, &wg) if err != nil { log.Fatalf("Failure starting workers: %v\n", err) } b0 := time.Now() // Pump requests out to all the workers to do. for i := 0; i < *numFetches; i++ { t := targets[rand.Int()%len(targets)] targetCh <- t } close(targetCh) // Wait for all HTTP requests to complete. wg.Wait() b1 := time.Now() fmt.Print("\n") fmt.Printf("Total time of run: %v\n", b1.Sub(b0)) fmt.Printf("Average QPS: %.2f\n", float64(*numFetches)/b1.Sub(b0).Seconds()) fmt.Println(NewSimpleStats(latencySamples, "Latency").String()) }
func main() { common.Init() grpclog.Init() // Load the 0,-1 tile. fileTilestore := filetilestore.NewFileTileStore(*tilestore, *dataset, time.Hour) tile, err := fileTilestore.Get(0, -1) if err != nil { glog.Fatalf("Failed to load tile: %s", err) } // Trim to the last 50 commits. begin := 0 end := tile.LastCommitIndex() if end >= 49 { begin = end - 49 } glog.Infof("Loaded Tile") tile, err = tile.Trim(begin, end) // Set up a connection to the server. conn, err := grpc.Dial(*address, grpc.WithInsecure()) if err != nil { glog.Fatalf("did not connect: %v", err) } defer util.Close(conn) // Build a TraceService client. builder := ptypes.PerfTraceBuilder isGold := *dataset == gconfig.DATASET_GOLD if isGold { builder = gtypes.GoldenTraceBuilder } glog.Infof("START load tracedb.") ts, err := db.NewTraceServiceDB(conn, builder) if err != nil { log.Fatalf("Failed to create db.DB: %s", err) } glog.Infof("DONE load tracedb.") if err = diff(tile, ts, isGold); err != nil { glog.Fatalf("Diff error: %s", err) } }
func main() { common.Init() webhook.MustInitRequestSaltFromFile(util.WebhookRequestSaltPath) // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&frontend.RecreatePageSetsUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Creating pagesets")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Creating Pagesets on Workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } cmd := []string{ fmt.Sprintf("cd %s;", util.CtTreeDir), "git pull;", "make all;", // The main command that runs create_pagesets on all workers. fmt.Sprintf("create_pagesets --worker_num=%s --log_dir=%s --pageset_type=%s;", util.WORKER_NUM_KEYWORD, util.GLogDir, *pagesetType), } // Setting a 4 hour timeout since it may take a while to upload page sets to // Google Storage when doing 10k page sets per worker. if _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, 4*time.Hour); err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } *taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() common.Init() frontend.MustInit() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&chromium_builds.UpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Build chromium", util.GetMasterLogLink(*runID), "")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running build chromium") defer glog.Flush() if *chromiumHash == "" { glog.Error("Must specify --chromium_hash") return } if *skiaHash == "" { glog.Error("Must specify --skia_hash") return } if _, _, err := util.CreateChromiumBuild("", *targetPlatform, *chromiumHash, *skiaHash, *applyPatches); err != nil { glog.Errorf("Error while creating the Chromium build: %s", err) return } taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() common.Init() client := util.NewTimeoutClient() retVal := 255 // Populate certs based on cmd-line args. for _, metadata := range flag.Args() { c := &cert{ metadata: metadata, file: fileFromMetadata(metadata), etag: "", } err := get(client, c) if err != nil { glog.Fatalf("Failed to retrieve the cert %s: %s", c, err) } retVal = 0 } os.Exit(retVal) }
func main() { if len(os.Args) < 2 { fmt.Println("Usage: query_issues <query> [OPTIONS]") return } query := os.Args[1] os.Args = append([]string{os.Args[0]}, os.Args[2:]...) common.Init() client, err := auth.NewDefaultJWTServiceAccountClient("https://www.googleapis.com/auth/userinfo.email") if err != nil { glog.Fatalf("Unable to create installed app oauth client:%s", err) } tracker := issues.NewMonorailIssueTracker(client) iss, err := tracker.FromQuery(query) if err != nil { fmt.Printf("Failed to retrieve issues: %s\n", err) return } fmt.Printf("Found: %d\n", len(iss)) for _, issue := range iss { fmt.Printf("%20d %10s %s\n", issue.ID, issue.State, issue.Title) } }
func main() { defer common.LogPanic() common.Init() fileInfos, err := ioutil.ReadDir(".") if err != nil { glog.Fatalf("Unable to read directory.") } // Get the directory for _, info := range fileInfos { if info.IsDir() { continue } fileName := info.Name() outFileName := fileutil.TwoLevelRadixPath(fileName) if fileName == outFileName { glog.Infof("Excluding %s -> %s", fileName, outFileName) continue } if !fileutil.FileExists(outFileName) { // Create the path if it doesn't exist. targetDir, _ := filepath.Split(outFileName) if err = os.MkdirAll(targetDir, 0700); err != nil { glog.Errorf("Unable to run create path: %s", targetDir) } if err := os.Rename(fileName, outFileName); err != nil { glog.Errorf("Unable to run mv %s %s", fileName, outFileName) } } } }
func main() { common.Init() webhook.MustInitRequestSaltFromFile(util.WebhookRequestSaltPath) // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&frontend.ChromiumPerfUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Chromium perf")) // Ensure webapp is updated and email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup dirs after run completes. defer skutil.RemoveAll(filepath.Join(util.StorageDir, util.ChromiumPerfRunsDir)) defer skutil.RemoveAll(filepath.Join(util.StorageDir, util.BenchmarkRunsDir)) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running chromium perf task on workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *benchmarkName == "" { glog.Error("Must specify --benchmark_name") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Errorf("Could not instantiate gsutil object: %s", err) return } remoteOutputDir := filepath.Join(util.ChromiumPerfRunsDir, *runID) // Copy the patches to Google Storage. skiaPatchName := *runID + ".skia.patch" blinkPatchName := *runID + ".blink.patch" chromiumPatchName := *runID + ".chromium.patch" for _, patchName := range []string{skiaPatchName, blinkPatchName, chromiumPatchName} { if err := gs.UploadFile(patchName, os.TempDir(), remoteOutputDir); err != nil { glog.Errorf("Could not upload %s to %s: %s", patchName, remoteOutputDir, err) return } } skiaPatchLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, remoteOutputDir, skiaPatchName) blinkPatchLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, remoteOutputDir, blinkPatchName) chromiumPatchLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, remoteOutputDir, chromiumPatchName) // Create the two required chromium builds (with patch and without the patch). chromiumHash, skiaHash, err := util.CreateChromiumBuild(*runID, *targetPlatform, "", "", true) if err != nil { glog.Errorf("Could not create chromium build: %s", err) return } // Reboot all workers to start from a clean slate. util.RebootWorkers() // Run the run_chromium_perf script on all workers. runIDNoPatch := *runID + "-nopatch" runIDWithPatch := *runID + "-withpatch" chromiumBuildNoPatch := fmt.Sprintf("try-%s-%s-%s", chromiumHash, skiaHash, runIDNoPatch) chromiumBuildWithPatch := fmt.Sprintf("try-%s-%s-%s", chromiumHash, skiaHash, runIDWithPatch) runChromiumPerfCmdTemplate := "DISPLAY=:0 run_chromium_perf " + "--worker_num={{.WorkerNum}} --log_dir={{.LogDir}} --pageset_type={{.PagesetType}} " + "--chromium_build_nopatch={{.ChromiumBuildNoPatch}} --chromium_build_withpatch={{.ChromiumBuildWithPatch}} " + "--run_id_nopatch={{.RunIDNoPatch}} --run_id_withpatch={{.RunIDWithPatch}} " + "--benchmark_name={{.BenchmarkName}} --benchmark_extra_args=\"{{.BenchmarkExtraArgs}}\" " + "--browser_extra_args_nopatch=\"{{.BrowserExtraArgsNoPatch}}\" --browser_extra_args_withpatch=\"{{.BrowserExtraArgsWithPatch}}\" " + "--repeat_benchmark={{.RepeatBenchmark}} --target_platform={{.TargetPlatform}};" runChromiumPerfTemplateParsed := template.Must(template.New("run_chromium_perf_cmd").Parse(runChromiumPerfCmdTemplate)) runChromiumPerfCmdBytes := new(bytes.Buffer) if err := runChromiumPerfTemplateParsed.Execute(runChromiumPerfCmdBytes, struct { WorkerNum string LogDir string PagesetType string ChromiumBuildNoPatch string ChromiumBuildWithPatch string RunIDNoPatch string RunIDWithPatch string BenchmarkName string BenchmarkExtraArgs string BrowserExtraArgsNoPatch string BrowserExtraArgsWithPatch string RepeatBenchmark int TargetPlatform string }{ WorkerNum: util.WORKER_NUM_KEYWORD, LogDir: util.GLogDir, PagesetType: *pagesetType, ChromiumBuildNoPatch: chromiumBuildNoPatch, ChromiumBuildWithPatch: chromiumBuildWithPatch, RunIDNoPatch: runIDNoPatch, RunIDWithPatch: runIDWithPatch, BenchmarkName: *benchmarkName, BenchmarkExtraArgs: *benchmarkExtraArgs, BrowserExtraArgsNoPatch: *browserExtraArgsNoPatch, BrowserExtraArgsWithPatch: *browserExtraArgsWithPatch, RepeatBenchmark: *repeatBenchmark, TargetPlatform: *targetPlatform, }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } cmd := []string{ fmt.Sprintf("cd %s;", util.CtTreeDir), "git pull;", "make all;", // The main command that runs run_chromium_perf on all workers. runChromiumPerfCmdBytes.String(), } // Setting a 1 day timeout since it may take a while run benchmarks with many // repeats. if _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, 1*24*time.Hour); err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } // If "--output-format=csv-pivot-table" was specified then merge all CSV files and upload. if strings.Contains(*benchmarkExtraArgs, "--output-format=csv-pivot-table") { for _, runID := range []string{runIDNoPatch, runIDWithPatch} { if err := mergeUploadCSVFiles(runID, gs); err != nil { glog.Errorf("Unable to merge and upload CSV files for %s: %s", runID, err) } } } // Compare the resultant CSV files using csv_comparer.py noPatchCSVPath := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runIDNoPatch, runIDNoPatch+".output") withPatchCSVPath := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runIDWithPatch, runIDWithPatch+".output") htmlOutputDir := filepath.Join(util.StorageDir, util.ChromiumPerfRunsDir, *runID, "html") skutil.MkdirAll(htmlOutputDir, 0700) htmlRemoteDir := filepath.Join(remoteOutputDir, "html") htmlOutputLinkBase := util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, htmlRemoteDir) + "/" htmlOutputLink = htmlOutputLinkBase + "index.html" noPatchOutputLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, util.BenchmarkRunsDir, runIDNoPatch, "consolidated_outputs", runIDNoPatch+".output") withPatchOutputLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, util.BenchmarkRunsDir, runIDWithPatch, "consolidated_outputs", runIDWithPatch+".output") // Construct path to the csv_comparer python script. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") pathToCsvComparer := filepath.Join(pathToPyFiles, "csv_comparer.py") args := []string{ pathToCsvComparer, "--csv_file1=" + noPatchCSVPath, "--csv_file2=" + withPatchCSVPath, "--output_html=" + htmlOutputDir, "--variance_threshold=" + strconv.FormatFloat(*varianceThreshold, 'f', 2, 64), "--discard_outliers=" + strconv.FormatFloat(*discardOutliers, 'f', 2, 64), "--absolute_url=" + htmlOutputLinkBase, "--requester_email=" + *emails, "--skia_patch_link=" + skiaPatchLink, "--blink_patch_link=" + blinkPatchLink, "--chromium_patch_link=" + chromiumPatchLink, "--raw_csv_nopatch=" + noPatchOutputLink, "--raw_csv_withpatch=" + withPatchOutputLink, "--num_repeated=" + strconv.Itoa(*repeatBenchmark), "--target_platform=" + *targetPlatform, "--browser_args_nopatch=" + *browserExtraArgsNoPatch, "--browser_args_withpatch=" + *browserExtraArgsWithPatch, "--pageset_type=" + *pagesetType, "--chromium_hash=" + chromiumHash, "--skia_hash=" + skiaHash, } if err := util.ExecuteCmd("python", args, []string{}, 2*time.Hour, nil, nil); err != nil { glog.Errorf("Error running csv_comparer.py: %s", err) return } // Copy the HTML files to Google Storage. if err := gs.UploadDir(htmlOutputDir, htmlRemoteDir, true); err != nil { glog.Errorf("Could not upload %s to %s: %s", htmlOutputDir, htmlRemoteDir, err) return } taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() common.Init() defer util.TimeTrack(time.Now(), "Creating Pagesets") defer glog.Flush() // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_CREATING_PAGESETS)) defer util.DeleteTaskFile(util.ACTIVITY_CREATING_PAGESETS) // Delete and remake the local pagesets directory. pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType) skutil.RemoveAll(pathToPagesets) skutil.MkdirAll(pathToPagesets, 0700) // Get info about the specified pageset type. pagesetTypeInfo := util.PagesetTypeToInfo[*pagesetType] csvSource := pagesetTypeInfo.CSVSource numPages := pagesetTypeInfo.NumPages userAgent := pagesetTypeInfo.UserAgent // Download the CSV file from Google Storage to a tmp location. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } respBody, err := gs.GetRemoteFileContents(csvSource) if err != nil { glog.Error(err) return } defer skutil.Close(respBody) csvFile := filepath.Join(os.TempDir(), filepath.Base(csvSource)) out, err := os.Create(csvFile) if err != nil { glog.Errorf("Unable to create file %s: %s", csvFile, err) return } defer skutil.Close(out) defer skutil.Remove(csvFile) if _, err = io.Copy(out, respBody); err != nil { glog.Error(err) return } // Figure out which pagesets this worker should generate. numPagesPerSlave := numPages / util.NUM_WORKERS startNum := (*workerNum-1)*numPagesPerSlave + 1 endNum := *workerNum * numPagesPerSlave // Construct path to the create_page_set.py python script. _, currentFile, _, _ := runtime.Caller(0) createPageSetScript := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py", "create_page_set.py") // Execute the create_page_set.py python script. timeoutSecs := util.PagesetTypeToInfo[*pagesetType].CreatePagesetsTimeoutSecs for currNum := startNum; currNum <= endNum; currNum++ { args := []string{ createPageSetScript, "-s", strconv.Itoa(currNum), "-e", strconv.Itoa(currNum), "-c", csvFile, "-p", *pagesetType, "-u", userAgent, "-o", pathToPagesets, } if err := util.ExecuteCmd("python", args, []string{}, time.Duration(timeoutSecs)*time.Second, nil, nil); err != nil { glog.Error(err) return } } // Write timestamp to the pagesets dir. skutil.LogErr(util.CreateTimestampFile(pathToPagesets)) // Upload pagesets dir to Google Storage. if err := gs.UploadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } }
func Init() { common.Init() initRest() }
func main() { defer common.LogPanic() common.Init() defer util.TimeTrack(time.Now(), "Running Lua Scripts") defer glog.Flush() if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_RUNNING_LUA_SCRIPTS)) defer util.DeleteTaskFile(util.ACTIVITY_RUNNING_LUA_SCRIPTS) // Sync Skia tree. skutil.LogErr(util.SyncDir(util.SkiaTreeDir)) // Build tools. skutil.LogErr(util.BuildSkiaTools()) // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Download SKPs if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.SKPS_DIR_NAME, filepath.Join(*pagesetType, *chromiumBuild), *workerNum); err != nil { glog.Error(err) return } localSkpsDir := filepath.Join(util.SkpsDir, *pagesetType, *chromiumBuild) // Download the lua script for this run from Google storage. luaScriptName := *runID + ".lua" luaScriptLocalPath := filepath.Join(os.TempDir(), luaScriptName) remoteDir := filepath.Join(util.LuaRunsDir, *runID) luaScriptRemotePath := filepath.Join(remoteDir, "scripts", luaScriptName) respBody, err := gs.GetRemoteFileContents(luaScriptRemotePath) if err != nil { glog.Errorf("Could not fetch %s: %s", luaScriptRemotePath, err) return } defer skutil.Close(respBody) out, err := os.Create(luaScriptLocalPath) if err != nil { glog.Errorf("Unable to create file %s: %s", luaScriptLocalPath, err) return } defer skutil.Close(out) defer skutil.Remove(luaScriptLocalPath) if _, err = io.Copy(out, respBody); err != nil { glog.Error(err) return } // Run lua_pictures and save stdout and stderr in files. stdoutFileName := *runID + ".output" stdoutFilePath := filepath.Join(os.TempDir(), stdoutFileName) stdoutFile, err := os.Create(stdoutFilePath) defer skutil.Close(stdoutFile) defer skutil.Remove(stdoutFilePath) if err != nil { glog.Errorf("Could not create %s: %s", stdoutFilePath, err) return } stderrFileName := *runID + ".err" stderrFilePath := filepath.Join(os.TempDir(), stderrFileName) stderrFile, err := os.Create(stderrFilePath) defer skutil.Close(stderrFile) defer skutil.Remove(stderrFilePath) if err != nil { glog.Errorf("Could not create %s: %s", stderrFilePath, err) return } args := []string{ "--skpPath", localSkpsDir, "--luaFile", luaScriptLocalPath, } err = util.ExecuteCmd( filepath.Join(util.SkiaTreeDir, "out", "Release", util.BINARY_LUA_PICTURES), args, []string{}, util.LUA_PICTURES_TIMEOUT, stdoutFile, stderrFile) if err != nil { glog.Error(err) return } // Copy stdout and stderr files to Google Storage. skutil.LogErr( gs.UploadFile(stdoutFileName, os.TempDir(), filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum), "outputs"))) skutil.LogErr( gs.UploadFile(stderrFileName, os.TempDir(), filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum), "errors"))) }
// Find and run tests. func main() { defer common.LogPanic() common.Init() defer timer.New("Finished").Stop() _, filename, _, _ := runtime.Caller(0) rootDir := path.Dir(filename) // If we are running full tests make sure we have the latest // pdfium_test installed. if !*short { glog.Info("Installing pdfium_test if necessary.") pdfiumInstall := path.Join(rootDir, "pdfium", "install_pdfium.sh") if err := exec.Command(pdfiumInstall).Run(); err != nil { glog.Fatalf("Failed to install pdfium_test: %v", err) } glog.Info("Latest pdfium_test installed successfully.") } // Gather all of the tests to run. glog.Info("Searching for tests.") tests := []*test{} // Search for Python tests in the repo. if err := filepath.Walk(rootDir, func(p string, info os.FileInfo, err error) error { basename := path.Base(p) // Skip some directories. if info.IsDir() { for _, skip := range NO_CRAWL_DIR_NAMES { if basename == skip { return filepath.SkipDir } } for _, skip := range NO_CRAWL_REL_PATHS { if p == path.Join(rootDir, skip) { return filepath.SkipDir } } } if strings.HasSuffix(basename, "_test.py") { tests = append(tests, pythonTest(p)) } return nil }); err != nil { glog.Fatal(err) } // Go tests. for goDir, extraLog := range GO_TEST_DIRS_AND_VERBOSITY { tests = append(tests, goTest(goDir, extraLog)) } // Other tests. tests = append(tests, cmdTest([]string{"go", "vet", "./..."}, ".", "go vet")) tests = append(tests, cmdTest([]string{"errcheck", "go.skia.org/infra/..."}, ".", "errcheck")) tests = append(tests, polylintTests()...) goimportsCmd := []string{"goimports", "-l", "."} tests = append(tests, &test{ Name: "goimports", Cmd: strings.Join(goimportsCmd, " "), run: func() (error, string) { command := exec.Command(goimportsCmd[0], goimportsCmd[1:]...) output, err := command.Output() outStr := strings.Trim(string(output), "\n") if err != nil { if _, err2 := exec.LookPath(goimportsCmd[0]); err2 != nil { return fmt.Errorf(ERR_NEED_INSTALL, goimportsCmd[0], err), outStr } return err, outStr } diffFiles := strings.Split(outStr, "\n") if len(diffFiles) > 0 && !(len(diffFiles) == 1 && diffFiles[0] == "") { return fmt.Errorf("goimports found diffs in the following files:\n - %s", strings.Join(diffFiles, ",\n - ")), outStr } return nil, "" }, }) // Run the tests. glog.Infof("Found %d tests.", len(tests)) var mutex sync.Mutex errors := map[string]error{} var wg sync.WaitGroup for _, t := range tests { wg.Add(1) go func(t *test) { defer wg.Done() if err := t.Run(); err != nil { mutex.Lock() errors[t.Name] = err mutex.Unlock() } }(t) } wg.Wait() if len(errors) > 0 { for _, e := range errors { glog.Error(e) } os.Exit(1) } glog.Info("All tests succeeded.") }
func main() { defer common.LogPanic() common.Init() defer util.TimeTrack(time.Now(), "Capturing Archives") defer glog.Flush() // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_CAPTURING_ARCHIVES)) defer util.DeleteTaskFile(util.ACTIVITY_CAPTURING_ARCHIVES) if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } // Reset the local chromium checkout. if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err) return } // Sync the local chromium checkout. if err := util.SyncDir(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir, err) return } // Delete and remake the local webpage archives directory. pathToArchives := filepath.Join(util.WebArchivesDir, *pagesetType) skutil.RemoveAll(pathToArchives) skutil.MkdirAll(pathToArchives, 0700) // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Download the specified chromium build if it does not exist locally. if err := gs.DownloadChromiumBuild(*chromiumBuild); err != nil { glog.Error(err) return } // Download pagesets if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType) chromiumBinary := filepath.Join(util.ChromiumBuildsDir, *chromiumBuild, util.BINARY_CHROME) recordWprBinary := filepath.Join(util.TelemetryBinariesDir, util.BINARY_RECORD_WPR) timeoutSecs := util.PagesetTypeToInfo[*pagesetType].CaptureArchivesTimeoutSecs // Loop through all pagesets. fileInfos, err := ioutil.ReadDir(pathToPagesets) if err != nil { glog.Errorf("Unable to read the pagesets dir %s: %s", pathToPagesets, err) return } // TODO(rmistry): Remove this hack once the 1M webpage archives have been captured. glog.Infof("The length of fileInfos is: %s", len(fileInfos)) fileInfos = fileInfos[18500:20000] glog.Infof("The fileInfos are: %s", fileInfos) for _, fileInfo := range fileInfos { pagesetBaseName := filepath.Base(fileInfo.Name()) if pagesetBaseName == util.TIMESTAMP_FILE_NAME || filepath.Ext(pagesetBaseName) == ".pyc" { // Ignore timestamp files and .pyc files. continue } // Convert the filename into a format consumable by the record_wpr binary. pagesetArchiveName := strings.TrimSuffix(pagesetBaseName, filepath.Ext(pagesetBaseName)) pagesetPath := filepath.Join(pathToPagesets, fileInfo.Name()) glog.Infof("===== Processing %s =====", pagesetPath) args := []string{ "--extra-browser-args=--disable-setuid-sandbox", "--browser=exact", "--browser-executable=" + chromiumBinary, fmt.Sprintf("%s_page_set", pagesetArchiveName), "--page-set-base-dir=" + pathToPagesets, } env := []string{ fmt.Sprintf("PYTHONPATH=%s:$PYTHONPATH", pathToPagesets), "DISPLAY=:0", } skutil.LogErr(util.ExecuteCmd(recordWprBinary, args, env, time.Duration(timeoutSecs)*time.Second, nil, nil)) } // Write timestamp to the webpage archives dir. skutil.LogErr(util.CreateTimestampFile(pathToArchives)) // Upload webpage archives dir to Google Storage. if err := gs.UploadWorkerArtifacts(util.WEB_ARCHIVES_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } }
func main() { common.Init() webhook.MustInitRequestSaltFromFile(util.WebhookRequestSaltPath) // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&frontend.CaptureSkpsUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Capture SKPs")) // Ensure webapp is updated and completion email is sent even if task // fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running capture skps task on workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Run the capture_skps script on all workers. captureSKPsCmdTemplate := "DISPLAY=:0 capture_skps --worker_num={{.WorkerNum}} --log_dir={{.LogDir}} " + "--pageset_type={{.PagesetType}} --chromium_build={{.ChromiumBuild}} --run_id={{.RunID}} " + "--target_platform={{.TargetPlatform}};" captureSKPsTemplateParsed := template.Must(template.New("capture_skps_cmd").Parse(captureSKPsCmdTemplate)) captureSKPsCmdBytes := new(bytes.Buffer) if err := captureSKPsTemplateParsed.Execute(captureSKPsCmdBytes, struct { WorkerNum string LogDir string PagesetType string ChromiumBuild string RunID string TargetPlatform string }{ WorkerNum: util.WORKER_NUM_KEYWORD, LogDir: util.GLogDir, PagesetType: *pagesetType, ChromiumBuild: *chromiumBuild, RunID: *runID, TargetPlatform: *targetPlatform, }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } cmd := []string{ fmt.Sprintf("cd %s;", util.CtTreeDir), "git pull;", "make all;", // The main command that runs capture_skps on all workers. captureSKPsCmdBytes.String(), } // Setting a 2 day timeout since it may take a while to capture 1M SKPs. if _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, 2*24*time.Hour); err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } taskCompletedSuccessfully = true }