func runBenchmark(fileInfoName, pathToPagesets, pathToPyFiles, localOutputDir, chromiumBuildName, chromiumBinary, runID, browserExtraArgs string) error { pagesetBaseName := filepath.Base(fileInfoName) if pagesetBaseName == util.TIMESTAMP_FILE_NAME || filepath.Ext(pagesetBaseName) == ".pyc" { // Ignore timestamp files and .pyc files. return nil } // Convert the filename into a format consumable by the run_benchmarks // binary. pagesetName := strings.TrimSuffix(pagesetBaseName, filepath.Ext(pagesetBaseName)) pagesetPath := filepath.Join(pathToPagesets, fileInfoName) glog.Infof("===== Processing %s for %s =====", pagesetPath, runID) skutil.LogErr(os.Chdir(pathToPyFiles)) args := []string{ util.BINARY_RUN_BENCHMARK, fmt.Sprintf("%s.%s", *benchmarkName, util.BenchmarksToPagesetName[*benchmarkName]), "--page-set-name=" + pagesetName, "--page-set-base-dir=" + pathToPagesets, "--also-run-disabled-tests", } // Need to capture output for all benchmarks. outputDirArgValue := filepath.Join(localOutputDir, pagesetName) args = append(args, "--output-dir="+outputDirArgValue) // Figure out which browser should be used. if *targetPlatform == util.PLATFORM_ANDROID { if err := installChromeAPK(chromiumBuildName); err != nil { return fmt.Errorf("Error while installing APK: %s", err) } args = append(args, "--browser=android-chrome-shell") } else { args = append(args, "--browser=exact", "--browser-executable="+chromiumBinary) } // Split benchmark args if not empty and append to args. if *benchmarkExtraArgs != "" { for _, benchmarkArg := range strings.Split(*benchmarkExtraArgs, " ") { args = append(args, benchmarkArg) } } // Add the number of times to repeat. args = append(args, fmt.Sprintf("--page-repeat=%d", *repeatBenchmark)) // Add browserArgs if not empty to args. if browserExtraArgs != "" { args = append(args, "--extra-browser-args="+browserExtraArgs) } // Set the PYTHONPATH to the pagesets and the telemetry dirs. env := []string{ fmt.Sprintf("PYTHONPATH=%s:%s:%s:$PYTHONPATH", pathToPagesets, util.TelemetryBinariesDir, util.TelemetrySrcDir), "DISPLAY=:0", } timeoutSecs := util.PagesetTypeToInfo[*pagesetType].RunChromiumPerfTimeoutSecs if err := util.ExecuteCmd("python", args, env, time.Duration(timeoutSecs)*time.Second, nil, nil); err != nil { glog.Errorf("Run benchmark command failed with: %s", err) glog.Errorf("Killing all running chrome processes in case there is a non-recoverable error.") skutil.LogErr(util.ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, 5*time.Minute, nil, nil)) } return nil }
func installChromeAPK(chromiumBuildName string) error { // Install the APK on the Android device. chromiumApk := filepath.Join(util.ChromiumBuildsDir, chromiumBuildName, util.ApkName) glog.Infof("Installing the APK at %s", chromiumApk) if err := util.ExecuteCmd(util.BINARY_ADB, []string{"install", "-r", chromiumApk}, []string{}, 15*time.Minute, nil, nil); err != nil { return fmt.Errorf("Could not install the chromium APK at %s: %s", chromiumBuildName, err) } return nil }
// SKPs are captured in parallel leading to multiple chrome instances coming up // at the same time, when there are crashes chrome processes stick around which // can severely impact the machine's performance. To stop this from // happening chrome zombie processes are periodically killed. func chromeProcessesCleaner(mutex *sync.RWMutex) { for _ = range time.Tick(*chromeCleanerTimer) { glog.Info("The chromeProcessesCleaner goroutine has started") glog.Info("Waiting for all existing tasks to complete before killing zombie chrome processes") mutex.Lock() skutil.LogErr(util.ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, util.PKILL_TIMEOUT, nil, nil)) mutex.Unlock() } }
func mergeUploadCSVFiles(localOutputDir, pathToPyFiles, runID, remoteDir string, gs *util.GsUtil) error { // Move all results into a single directory. fileInfos, err := ioutil.ReadDir(localOutputDir) if err != nil { return fmt.Errorf("Unable to read %s: %s", localOutputDir, err) } for _, fileInfo := range fileInfos { if !fileInfo.IsDir() { continue } outputFile := filepath.Join(localOutputDir, fileInfo.Name(), "results-pivot-table.csv") newFile := filepath.Join(localOutputDir, fmt.Sprintf("%s.csv", fileInfo.Name())) if err := os.Rename(outputFile, newFile); err != nil { glog.Errorf("Could not rename %s to %s: %s", outputFile, newFile, err) continue } // Add the rank of the page to the CSV file. headers, values, err := getRowsFromCSV(newFile) if err != nil { glog.Errorf("Could not read %s: %s", newFile, err) continue } pageRank := strings.Split(fileInfo.Name(), "_")[1] for i := range headers { for j := range values { if headers[i] == "page" { values[j][i] = fmt.Sprintf("%s (#%s)", values[j][i], pageRank) } } } if err := writeRowsToCSV(newFile, headers, values); err != nil { glog.Errorf("Could not write to %s: %s", newFile, err) continue } } // Call csv_pivot_table_merger.py to merge all results into a single results CSV. pathToCsvMerger := filepath.Join(pathToPyFiles, "csv_pivot_table_merger.py") outputFileName := runID + ".output" args := []string{ pathToCsvMerger, "--csv_dir=" + localOutputDir, "--output_csv_name=" + filepath.Join(localOutputDir, outputFileName), } err = util.ExecuteCmd("python", args, []string{}, util.CSV_PIVOT_TABLE_MERGER_TIMEOUT, nil, nil) if err != nil { return fmt.Errorf("Error running csv_pivot_table_merger.py: %s", err) } // Copy the output file to Google Storage. remoteOutputDir := filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum), "outputs") if err := gs.UploadFile(outputFileName, localOutputDir, remoteOutputDir); err != nil { return fmt.Errorf("Unable to upload %s to %s: %s", outputFileName, remoteOutputDir, err) } return nil }
func mergeUploadCSVFiles(runID string, gs *util.GsUtil) error { localOutputDir := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runID) skutil.MkdirAll(localOutputDir, 0700) // Copy outputs from all slaves locally. for i := 0; i < util.NUM_WORKERS; i++ { workerNum := i + 1 workerLocalOutputPath := filepath.Join(localOutputDir, fmt.Sprintf("slave%d", workerNum)+".csv") workerRemoteOutputPath := filepath.Join(util.BenchmarkRunsDir, runID, fmt.Sprintf("slave%d", workerNum), "outputs", runID+".output") respBody, err := gs.GetRemoteFileContents(workerRemoteOutputPath) if err != nil { glog.Errorf("Could not fetch %s: %s", workerRemoteOutputPath, err) // TODO(rmistry): Should we instead return here? We can only return // here if all 100 slaves reliably run without any failures which they // really should. continue } defer skutil.Close(respBody) out, err := os.Create(workerLocalOutputPath) if err != nil { return fmt.Errorf("Unable to create file %s: %s", workerLocalOutputPath, err) } defer skutil.Close(out) defer skutil.Remove(workerLocalOutputPath) if _, err = io.Copy(out, respBody); err != nil { return fmt.Errorf("Unable to copy to file %s: %s", workerLocalOutputPath, err) } } // Call csv_merger.py to merge all results into a single results CSV. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") pathToCsvMerger := filepath.Join(pathToPyFiles, "csv_merger.py") outputFileName := runID + ".output" args := []string{ pathToCsvMerger, "--csv_dir=" + localOutputDir, "--output_csv_name=" + filepath.Join(localOutputDir, outputFileName), } if err := util.ExecuteCmd("python", args, []string{}, 1*time.Hour, nil, nil); err != nil { return fmt.Errorf("Error running csv_merger.py: %s", err) } // Copy the output file to Google Storage. remoteOutputDir := filepath.Join(util.BenchmarkRunsDir, runID, "consolidated_outputs") if err := gs.UploadFile(outputFileName, localOutputDir, remoteOutputDir); err != nil { return fmt.Errorf("Unable to upload %s to %s: %s", outputFileName, remoteOutputDir, err) } return nil }
func mergeUploadCSVFiles(runID string, gs *util.GsUtil) ([]string, error) { localOutputDir := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runID) skutil.MkdirAll(localOutputDir, 0700) noOutputSlaves := []string{} // Copy outputs from all slaves locally. for i := 0; i < util.NumWorkers(); i++ { workerNum := i + 1 workerLocalOutputPath := filepath.Join(localOutputDir, fmt.Sprintf("slave%d", workerNum)+".csv") workerRemoteOutputPath := filepath.Join(util.BenchmarkRunsDir, runID, fmt.Sprintf("slave%d", workerNum), "outputs", runID+".output") respBody, err := gs.GetRemoteFileContents(workerRemoteOutputPath) if err != nil { glog.Errorf("Could not fetch %s: %s", workerRemoteOutputPath, err) noOutputSlaves = append(noOutputSlaves, fmt.Sprintf(util.WORKER_NAME_TEMPLATE, workerNum)) continue } defer skutil.Close(respBody) out, err := os.Create(workerLocalOutputPath) if err != nil { return noOutputSlaves, fmt.Errorf("Unable to create file %s: %s", workerLocalOutputPath, err) } defer skutil.Close(out) defer skutil.Remove(workerLocalOutputPath) if _, err = io.Copy(out, respBody); err != nil { return noOutputSlaves, fmt.Errorf("Unable to copy to file %s: %s", workerLocalOutputPath, err) } } // Call csv_merger.py to merge all results into a single results CSV. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") pathToCsvMerger := filepath.Join(pathToPyFiles, "csv_merger.py") outputFileName := runID + ".output" args := []string{ pathToCsvMerger, "--csv_dir=" + localOutputDir, "--output_csv_name=" + filepath.Join(localOutputDir, outputFileName), } err := util.ExecuteCmd("python", args, []string{}, util.CSV_MERGER_TIMEOUT, nil, nil) if err != nil { return noOutputSlaves, fmt.Errorf("Error running csv_merger.py: %s", err) } // Copy the output file to Google Storage. remoteOutputDir := filepath.Join(util.BenchmarkRunsDir, runID, "consolidated_outputs") if err := gs.UploadFile(outputFileName, localOutputDir, remoteOutputDir); err != nil { return noOutputSlaves, fmt.Errorf("Unable to upload %s to %s: %s", outputFileName, remoteOutputDir, err) } return noOutputSlaves, nil }
func runRenderPictures(localSkpsDir, localOutputDir, remoteOutputDir string, runGpu bool) error { picturesArgs := *renderPicturesArgs if runGpu { glog.Info("Run with GPU has been specified. Using --config gpu.") reg, _ := regexp.Compile("--config [a-zA-Z0-9]+") picturesArgs = reg.ReplaceAllString(picturesArgs, "--config gpu") } skutil.MkdirAll(localOutputDir, 0700) args := []string{ "-r", localSkpsDir, "-w", localOutputDir, "--writeJsonSummaryPath", filepath.Join(localOutputDir, "summary.json"), "--imageBaseGSUrl", remoteOutputDir, } for _, picturesArg := range strings.Split(picturesArgs, " ") { args = append(args, picturesArg) } if err := util.ExecuteCmd(filepath.Join(util.SkiaTreeDir, "out", "Release", util.BINARY_RENDER_PICTURES), args, []string{"DISPLAY=:0"}, 15*time.Minute, nil, nil); err != nil { return fmt.Errorf("Failure when running render_pictures: %s", err) } return nil }
// VerifyLocalDevice does not throw an error if an Android device is connected and // online. An error is returned if either "adb" is not installed or if the Android // device is offline or missing. func VerifyLocalDevice() error { // Run "adb version". // Command should return without an error. err := util.ExecuteCmd(util.BINARY_ADB, []string{"version"}, []string{}, util.ADB_VERSION_TIMEOUT, nil, nil) if err != nil { return fmt.Errorf("adb not installed or not found: %s", err) } // Run "adb devices | grep offline". // Command should return with an error. devicesCmd := exec.Command(util.BINARY_ADB, "devices") offlineCmd := exec.Command("grep", "offline") offlineCmd.Stdin, _ = devicesCmd.StdoutPipe() offlineCmd.Stdout = skexec.WriteInfoLog skutil.LogErr(offlineCmd.Start()) skutil.LogErr(devicesCmd.Run()) if err := offlineCmd.Wait(); err == nil { // A nil error here means that an offline device was found. return fmt.Errorf("Android device is offline: %s", err) } // Running "adb devices | grep device$ // Command should return without an error. devicesCmd = exec.Command(util.BINARY_ADB, "devices") missingCmd := exec.Command("grep", "device$") missingCmd.Stdin, _ = devicesCmd.StdoutPipe() missingCmd.Stdout = skexec.WriteInfoLog skutil.LogErr(missingCmd.Start()) skutil.LogErr(devicesCmd.Run()) if err := missingCmd.Wait(); err != nil { // An error here means that the device is missing. return fmt.Errorf("Android device is missing: %s", err) } return nil }
func main() { defer common.LogPanic() common.Init() defer util.TimeTrack(time.Now(), "Creating Pagesets") defer glog.Flush() // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_CREATING_PAGESETS)) defer util.DeleteTaskFile(util.ACTIVITY_CREATING_PAGESETS) // Delete and remake the local pagesets directory. pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType) skutil.RemoveAll(pathToPagesets) skutil.MkdirAll(pathToPagesets, 0700) // Get info about the specified pageset type. pagesetTypeInfo := util.PagesetTypeToInfo[*pagesetType] csvSource := pagesetTypeInfo.CSVSource numPages := pagesetTypeInfo.NumPages userAgent := pagesetTypeInfo.UserAgent // Download the CSV file from Google Storage to a tmp location. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } respBody, err := gs.GetRemoteFileContents(csvSource) if err != nil { glog.Error(err) return } defer skutil.Close(respBody) csvFile := filepath.Join(os.TempDir(), filepath.Base(csvSource)) out, err := os.Create(csvFile) if err != nil { glog.Errorf("Unable to create file %s: %s", csvFile, err) return } defer skutil.Close(out) defer skutil.Remove(csvFile) if _, err = io.Copy(out, respBody); err != nil { glog.Error(err) return } // Figure out which pagesets this worker should generate. numPagesPerSlave := numPages / util.NUM_WORKERS startNum := (*workerNum-1)*numPagesPerSlave + 1 endNum := *workerNum * numPagesPerSlave // Construct path to the create_page_set.py python script. _, currentFile, _, _ := runtime.Caller(0) createPageSetScript := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py", "create_page_set.py") // Execute the create_page_set.py python script. timeoutSecs := util.PagesetTypeToInfo[*pagesetType].CreatePagesetsTimeoutSecs for currNum := startNum; currNum <= endNum; currNum++ { args := []string{ createPageSetScript, "-s", strconv.Itoa(currNum), "-e", strconv.Itoa(currNum), "-c", csvFile, "-p", *pagesetType, "-u", userAgent, "-o", pathToPagesets, } if err := util.ExecuteCmd("python", args, []string{}, time.Duration(timeoutSecs)*time.Second, nil, nil); err != nil { glog.Error(err) return } } // Write timestamp to the pagesets dir. skutil.LogErr(util.CreateTimestampFile(pathToPagesets)) // Upload pagesets dir to Google Storage. if err := gs.UploadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } }
func main() { defer common.LogPanic() worker_common.Init() if !*worker_common.Local { defer util.CleanTmpDir() } defer util.TimeTrack(time.Now(), "Capturing SKPs") defer glog.Flush() // Validate required arguments. if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } if *targetPlatform == util.PLATFORM_ANDROID { glog.Error("Android is not yet supported for capturing SKPs.") return } // Reset the local chromium checkout. if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err) return } // Sync the local chromium checkout. if err := util.SyncDir(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir, err) return } // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_CAPTURING_SKPS)) defer util.DeleteTaskFile(util.ACTIVITY_CAPTURING_SKPS) // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Download the specified chromium build. if err := gs.DownloadChromiumBuild(*chromiumBuild); err != nil { glog.Error(err) return } // Delete the chromium build to save space when we are done. defer skutil.RemoveAll(filepath.Join(util.ChromiumBuildsDir, *chromiumBuild)) chromiumBinary := filepath.Join(util.ChromiumBuildsDir, *chromiumBuild, util.BINARY_CHROME) if *targetPlatform == util.PLATFORM_ANDROID { // Install the APK on the Android device. if err := util.InstallChromeAPK(*chromiumBuild); err != nil { glog.Errorf("Could not install the chromium APK: %s", err) return } } // Download pagesets if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType) // Download archives if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.WEB_ARCHIVES_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } // Create the dir that SKPs will be stored in. pathToSkps := filepath.Join(util.SkpsDir, *pagesetType, *chromiumBuild) // Delete and remake the local SKPs directory. skutil.RemoveAll(pathToSkps) skutil.MkdirAll(pathToSkps, 0700) // Establish output paths. localOutputDir := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, *runID) skutil.RemoveAll(localOutputDir) skutil.MkdirAll(localOutputDir, 0700) defer skutil.RemoveAll(localOutputDir) // Construct path to the ct_run_benchmark python script. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") timeoutSecs := util.PagesetTypeToInfo[*pagesetType].CaptureSKPsTimeoutSecs fileInfos, err := ioutil.ReadDir(pathToPagesets) if err != nil { glog.Errorf("Unable to read the pagesets dir %s: %s", pathToPagesets, err) return } // Create channel that contains all pageset file names. This channel will // be consumed by the worker pool. pagesetRequests := util.GetClosedChannelOfPagesets(fileInfos) var wg sync.WaitGroup // Use a RWMutex for the chromeProcessesCleaner goroutine to communicate to // the workers (acting as "readers") when it wants to be the "writer" and // kill all zombie chrome processes. var mutex sync.RWMutex // Loop through workers in the worker pool. for i := 0; i < WORKER_POOL_SIZE; i++ { // Increment the WaitGroup counter. wg.Add(1) // Create and run a goroutine closure that captures SKPs. go func() { // Decrement the WaitGroup counter when the goroutine completes. defer wg.Done() for pagesetName := range pagesetRequests { mutex.RLock() // Read the pageset. pagesetPath := filepath.Join(pathToPagesets, pagesetName) decodedPageset, err := util.ReadPageset(pagesetPath) if err != nil { glog.Errorf("Could not read %s: %s", pagesetPath, err) continue } glog.Infof("===== Processing %s =====", pagesetPath) skutil.LogErr(os.Chdir(pathToPyFiles)) args := []string{ filepath.Join(util.TelemetryBinariesDir, util.BINARY_RUN_BENCHMARK), util.BenchmarksToTelemetryName[util.BENCHMARK_SKPICTURE_PRINTER], "--also-run-disabled-tests", "--page-repeat=1", // Only need one run for SKPs. "--skp-outdir=" + pathToSkps, "--extra-browser-args=" + util.DEFAULT_BROWSER_ARGS, "--user-agent=" + decodedPageset.UserAgent, "--urls-list=" + decodedPageset.UrlsList, "--archive-data-file=" + decodedPageset.ArchiveDataFile, } // Figure out which browser should be used. if *targetPlatform == util.PLATFORM_ANDROID { args = append(args, "--browser=android-chromium") } else { args = append(args, "--browser=exact", "--browser-executable="+chromiumBinary) } // Set the PYTHONPATH to the pagesets and the telemetry dirs. env := []string{ fmt.Sprintf("PYTHONPATH=%s:%s:%s:$PYTHONPATH", pathToPagesets, util.TelemetryBinariesDir, util.TelemetrySrcDir), "DISPLAY=:0", } skutil.LogErr( util.ExecuteCmd("python", args, env, time.Duration(timeoutSecs)*time.Second, nil, nil)) mutex.RUnlock() } }() } if !*worker_common.Local { // Start the cleaner. go util.ChromeProcessesCleaner(&mutex, *chromeCleanerTimer) } // Wait for all spawned goroutines to complete. wg.Wait() // Move, validate and upload all SKP files. // List all directories in pathToSkps and copy out the skps. skpFileInfos, err := ioutil.ReadDir(pathToSkps) if err != nil { glog.Errorf("Unable to read %s: %s", pathToSkps, err) return } for _, fileInfo := range skpFileInfos { if !fileInfo.IsDir() { // We are only interested in directories. continue } skpName := fileInfo.Name() // Find the largest layer in this directory. layerInfos, err := ioutil.ReadDir(filepath.Join(pathToSkps, skpName)) if err != nil { glog.Errorf("Unable to read %s: %s", filepath.Join(pathToSkps, skpName), err) } if len(layerInfos) > 0 { largestLayerInfo := layerInfos[0] for _, layerInfo := range layerInfos { fmt.Println(layerInfo.Size()) if layerInfo.Size() > largestLayerInfo.Size() { largestLayerInfo = layerInfo } } // Only save SKPs greater than 6000 bytes. Less than that are probably // malformed. if largestLayerInfo.Size() > 6000 { layerPath := filepath.Join(pathToSkps, skpName, largestLayerInfo.Name()) skutil.Rename(layerPath, filepath.Join(pathToSkps, skpName+".skp")) } else { glog.Warningf("Skipping %s because size was less than 5000 bytes", skpName) } } // We extracted what we needed from the directory, now delete it. skutil.RemoveAll(filepath.Join(pathToSkps, skpName)) } glog.Info("Calling remove_invalid_skps.py") // Sync Skia tree. skutil.LogErr(util.SyncDir(util.SkiaTreeDir)) // Build tools. skutil.LogErr(util.BuildSkiaTools()) // Run remove_invalid_skps.py pathToRemoveSKPs := filepath.Join(pathToPyFiles, "remove_invalid_skps.py") pathToSKPInfo := filepath.Join(util.SkiaTreeDir, "out", "Release", "skpinfo") args := []string{ pathToRemoveSKPs, "--skp_dir=" + pathToSkps, "--path_to_skpinfo=" + pathToSKPInfo, } skutil.LogErr(util.ExecuteCmd("python", args, []string{}, util.REMOVE_INVALID_SKPS_TIMEOUT, nil, nil)) // Write timestamp to the SKPs dir. skutil.LogErr(util.CreateTimestampFile(pathToSkps)) // Upload SKPs dir to Google Storage. if err := gs.UploadWorkerArtifacts(util.SKPS_DIR_NAME, filepath.Join(*pagesetType, *chromiumBuild), *workerNum); err != nil { glog.Error(err) return } }
func main() { defer common.LogPanic() common.Init() defer util.TimeTrack(time.Now(), "Running Lua Scripts") defer glog.Flush() if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_RUNNING_LUA_SCRIPTS)) defer util.DeleteTaskFile(util.ACTIVITY_RUNNING_LUA_SCRIPTS) // Sync Skia tree. skutil.LogErr(util.SyncDir(util.SkiaTreeDir)) // Build tools. skutil.LogErr(util.BuildSkiaTools()) // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Download SKPs if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.SKPS_DIR_NAME, filepath.Join(*pagesetType, *chromiumBuild), *workerNum); err != nil { glog.Error(err) return } localSkpsDir := filepath.Join(util.SkpsDir, *pagesetType, *chromiumBuild) // Download the lua script for this run from Google storage. luaScriptName := *runID + ".lua" luaScriptLocalPath := filepath.Join(os.TempDir(), luaScriptName) remoteDir := filepath.Join(util.LuaRunsDir, *runID) luaScriptRemotePath := filepath.Join(remoteDir, "scripts", luaScriptName) respBody, err := gs.GetRemoteFileContents(luaScriptRemotePath) if err != nil { glog.Errorf("Could not fetch %s: %s", luaScriptRemotePath, err) return } defer skutil.Close(respBody) out, err := os.Create(luaScriptLocalPath) if err != nil { glog.Errorf("Unable to create file %s: %s", luaScriptLocalPath, err) return } defer skutil.Close(out) defer skutil.Remove(luaScriptLocalPath) if _, err = io.Copy(out, respBody); err != nil { glog.Error(err) return } // Run lua_pictures and save stdout and stderr in files. stdoutFileName := *runID + ".output" stdoutFilePath := filepath.Join(os.TempDir(), stdoutFileName) stdoutFile, err := os.Create(stdoutFilePath) defer skutil.Close(stdoutFile) defer skutil.Remove(stdoutFilePath) if err != nil { glog.Errorf("Could not create %s: %s", stdoutFilePath, err) return } stderrFileName := *runID + ".err" stderrFilePath := filepath.Join(os.TempDir(), stderrFileName) stderrFile, err := os.Create(stderrFilePath) defer skutil.Close(stderrFile) defer skutil.Remove(stderrFilePath) if err != nil { glog.Errorf("Could not create %s: %s", stderrFilePath, err) return } args := []string{ "--skpPath", localSkpsDir, "--luaFile", luaScriptLocalPath, } err = util.ExecuteCmd( filepath.Join(util.SkiaTreeDir, "out", "Release", util.BINARY_LUA_PICTURES), args, []string{}, util.LUA_PICTURES_TIMEOUT, stdoutFile, stderrFile) if err != nil { glog.Error(err) return } // Copy stdout and stderr files to Google Storage. skutil.LogErr( gs.UploadFile(stdoutFileName, os.TempDir(), filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum), "outputs"))) skutil.LogErr( gs.UploadFile(stderrFileName, os.TempDir(), filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum), "errors"))) }
func main() { common.Init() defer util.CleanTmpDir() defer util.TimeTrack(time.Now(), "Running Chromium Perf") defer glog.Flush() // Validate required arguments. if *chromiumBuildNoPatch == "" { glog.Error("Must specify --chromium_build_nopatch") return } if *chromiumBuildWithPatch == "" { glog.Error("Must specify --chromium_build_withpatch") return } if *runIDNoPatch == "" { glog.Error("Must specify --run_id_nopatch") return } if *runIDWithPatch == "" { glog.Error("Must specify --run_id_withpatch") return } if *benchmarkName == "" { glog.Error("Must specify --benchmark_name") return } // Reset the local chromium checkout. if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err) return } // Sync the local chromium checkout. if err := util.SyncDir(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir, err) return } // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_RUNNING_CHROMIUM_PERF)) defer util.DeleteTaskFile(util.ACTIVITY_RUNNING_CHROMIUM_PERF) if *targetPlatform == util.PLATFORM_ANDROID { if err := adb.VerifyLocalDevice(); err != nil { // Android device missing or offline. glog.Errorf("Could not find Android device: %s", err) return } // Make sure adb shell is running as root. skutil.LogErr( util.ExecuteCmd(util.BINARY_ADB, []string{"root"}, []string{}, 5*time.Minute, nil, nil)) } // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Download the specified chromium builds. for _, chromiumBuild := range []string{*chromiumBuildNoPatch, *chromiumBuildWithPatch} { if err := gs.DownloadChromiumBuild(chromiumBuild); err != nil { glog.Error(err) return } //Delete the chromium build to save space when we are done. defer skutil.RemoveAll(filepath.Join(util.ChromiumBuildsDir, chromiumBuild)) } chromiumBinaryNoPatch := filepath.Join(util.ChromiumBuildsDir, *chromiumBuildNoPatch, util.BINARY_CHROME) chromiumBinaryWithPatch := filepath.Join(util.ChromiumBuildsDir, *chromiumBuildWithPatch, util.BINARY_CHROME) // Download pagesets if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType) // Download archives if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.WEB_ARCHIVES_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } // Establish nopatch output paths. localOutputDirNoPatch := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, *runIDNoPatch) skutil.RemoveAll(localOutputDirNoPatch) skutil.MkdirAll(localOutputDirNoPatch, 0700) defer skutil.RemoveAll(localOutputDirNoPatch) remoteDirNoPatch := filepath.Join(util.BenchmarkRunsDir, *runIDNoPatch) // Establish withpatch output paths. localOutputDirWithPatch := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, *runIDWithPatch) skutil.RemoveAll(localOutputDirWithPatch) skutil.MkdirAll(localOutputDirWithPatch, 0700) defer skutil.RemoveAll(localOutputDirWithPatch) remoteDirWithPatch := filepath.Join(util.BenchmarkRunsDir, *runIDWithPatch) // Construct path to the ct_run_benchmark python script. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") fileInfos, err := ioutil.ReadDir(pathToPagesets) if err != nil { glog.Errorf("Unable to read the pagesets dir %s: %s", pathToPagesets, err) return } // Loop through all pagesets and run once without the patch and once with the // patch. for _, fileInfo := range fileInfos { if err := runBenchmark(fileInfo.Name(), pathToPagesets, pathToPyFiles, localOutputDirNoPatch, *chromiumBuildNoPatch, chromiumBinaryNoPatch, *runIDNoPatch, *browserExtraArgsNoPatch); err != nil { glog.Errorf("Error while running nopatch benchmark: %s", err) return } if err := runBenchmark(fileInfo.Name(), pathToPagesets, pathToPyFiles, localOutputDirWithPatch, *chromiumBuildWithPatch, chromiumBinaryWithPatch, *runIDWithPatch, *browserExtraArgsWithPatch); err != nil { glog.Errorf("Error while running withpatch benchmark: %s", err) return } } // If "--output-format=csv-pivot-table" was specified then merge all CSV files and upload. if strings.Contains(*benchmarkExtraArgs, "--output-format=csv-pivot-table") { if err := mergeUploadCSVFiles(localOutputDirNoPatch, pathToPyFiles, *runIDNoPatch, remoteDirNoPatch, gs); err != nil { glog.Errorf("Error while processing nopatch CSV files: %s", err) return } if err := mergeUploadCSVFiles(localOutputDirWithPatch, pathToPyFiles, *runIDWithPatch, remoteDirWithPatch, gs); err != nil { glog.Errorf("Error while processing withpatch CSV files: %s", err) return } } }
func main() { common.Init() webhook.MustInitRequestSaltFromFile(util.WebhookRequestSaltPath) // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Skia correctness")) // Ensure webapp is updated and email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running skia correctness task on workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } remoteOutputDir := filepath.Join(util.SkiaCorrectnessRunsDir, *runID) // Copy the patch to Google Storage. patchName := *runID + ".patch" patchRemoteDir := filepath.Join(remoteOutputDir, "patches") if err := gs.UploadFile(patchName, os.TempDir(), patchRemoteDir); err != nil { glog.Errorf("Could not upload %s to %s: %s", patchName, patchRemoteDir, err) return } skiaPatchLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, patchRemoteDir, patchName) // Run the run_skia_correctness script on all workers. runSkiaCorrCmdTemplate := "DISPLAY=:0 run_skia_correctness --worker_num={{.WorkerNum}} --log_dir={{.LogDir}} " + "--pageset_type={{.PagesetType}} --chromium_build={{.ChromiumBuild}} --run_id={{.RunID}} " + "--render_pictures_args=\"{{.RenderPicturesArgs}}\" --gpu_nopatch_run={{.GpuNoPatchRun}} " + "--gpu_withpatch_run={{.GpuWithPatchRun}};" runSkiaCorrTemplateParsed := template.Must(template.New("run_skia_correctness_cmd").Parse(runSkiaCorrCmdTemplate)) runSkiaCorrCmdBytes := new(bytes.Buffer) if err := runSkiaCorrTemplateParsed.Execute(runSkiaCorrCmdBytes, struct { WorkerNum string LogDir string PagesetType string ChromiumBuild string RunID string RenderPicturesArgs string GpuNoPatchRun string GpuWithPatchRun string }{ WorkerNum: util.WORKER_NUM_KEYWORD, LogDir: util.GLogDir, PagesetType: *pagesetType, ChromiumBuild: *chromiumBuild, RunID: *runID, RenderPicturesArgs: *renderPicturesArgs, GpuNoPatchRun: strconv.FormatBool(*gpuNoPatchRun), GpuWithPatchRun: strconv.FormatBool(*gpuWithPatchRun), }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } cmd := []string{ fmt.Sprintf("cd %s;", util.CtTreeDir), "git pull;", "make all;", // The main command that runs run_skia_correctness on all workers. runSkiaCorrCmdBytes.String(), } if _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, 4*time.Hour); err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } localOutputDir := filepath.Join(util.StorageDir, util.SkiaCorrectnessRunsDir, *runID) localSummariesDir := filepath.Join(localOutputDir, "summaries") skutil.MkdirAll(localSummariesDir, 0700) defer skutil.RemoveAll(filepath.Join(util.StorageDir, util.SkiaCorrectnessRunsDir)) // Copy outputs from all slaves locally. for i := 0; i < util.NUM_WORKERS; i++ { workerNum := i + 1 workerLocalOutputPath := filepath.Join(localSummariesDir, fmt.Sprintf("slave%d", workerNum)+".json") workerRemoteOutputPath := filepath.Join(remoteOutputDir, fmt.Sprintf("slave%d", workerNum), fmt.Sprintf("slave%d", workerNum)+".json") respBody, err := gs.GetRemoteFileContents(workerRemoteOutputPath) if err != nil { glog.Errorf("Could not fetch %s: %s", workerRemoteOutputPath, err) // TODO(rmistry): Should we instead return here? We can only return // here if all 100 slaves reliably run without any failures which they // really should. continue } defer skutil.Close(respBody) out, err := os.Create(workerLocalOutputPath) if err != nil { glog.Errorf("Unable to create file %s: %s", workerLocalOutputPath, err) return } defer skutil.Close(out) defer skutil.Remove(workerLocalOutputPath) if _, err = io.Copy(out, respBody); err != nil { glog.Errorf("Unable to copy to file %s: %s", workerLocalOutputPath, err) return } } // Call json_summary_combiner.py to merge all results into a single results CSV. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") pathToJsonCombiner := filepath.Join(pathToPyFiles, "json_summary_combiner.py") localHtmlDir := filepath.Join(localOutputDir, "html") remoteHtmlDir := filepath.Join(remoteOutputDir, "html") baseHtmlLink := util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, remoteHtmlDir) + "/" htmlOutputLink = baseHtmlLink + "index.html" skutil.MkdirAll(localHtmlDir, 0700) args := []string{ pathToJsonCombiner, "--json_summaries_dir=" + localSummariesDir, "--output_html_dir=" + localHtmlDir, "--absolute_url=" + baseHtmlLink, "--render_pictures_args=" + *renderPicturesArgs, "--nopatch_gpu=" + strconv.FormatBool(*gpuNoPatchRun), "--withpatch_gpu=" + strconv.FormatBool(*gpuWithPatchRun), } if err := util.ExecuteCmd("python", args, []string{}, 1*time.Hour, nil, nil); err != nil { glog.Errorf("Error running json_summary_combiner.py: %s", err) return } // Copy the HTML files to Google Storage. if err := gs.UploadDir(localHtmlDir, remoteHtmlDir, true); err != nil { glog.Errorf("Could not upload %s to %s: %s", localHtmlDir, remoteHtmlDir, err) return } taskCompletedSuccessfully = true }
func main() { common.Init() webhook.MustInitRequestSaltFromFile(util.WebhookRequestSaltPath) // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&frontend.ChromiumPerfUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Chromium perf")) // Ensure webapp is updated and email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup dirs after run completes. defer skutil.RemoveAll(filepath.Join(util.StorageDir, util.ChromiumPerfRunsDir)) defer skutil.RemoveAll(filepath.Join(util.StorageDir, util.BenchmarkRunsDir)) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running chromium perf task on workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *benchmarkName == "" { glog.Error("Must specify --benchmark_name") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Errorf("Could not instantiate gsutil object: %s", err) return } remoteOutputDir := filepath.Join(util.ChromiumPerfRunsDir, *runID) // Copy the patches to Google Storage. skiaPatchName := *runID + ".skia.patch" blinkPatchName := *runID + ".blink.patch" chromiumPatchName := *runID + ".chromium.patch" for _, patchName := range []string{skiaPatchName, blinkPatchName, chromiumPatchName} { if err := gs.UploadFile(patchName, os.TempDir(), remoteOutputDir); err != nil { glog.Errorf("Could not upload %s to %s: %s", patchName, remoteOutputDir, err) return } } skiaPatchLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, remoteOutputDir, skiaPatchName) blinkPatchLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, remoteOutputDir, blinkPatchName) chromiumPatchLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, remoteOutputDir, chromiumPatchName) // Create the two required chromium builds (with patch and without the patch). chromiumHash, skiaHash, err := util.CreateChromiumBuild(*runID, *targetPlatform, "", "", true) if err != nil { glog.Errorf("Could not create chromium build: %s", err) return } // Reboot all workers to start from a clean slate. util.RebootWorkers() // Run the run_chromium_perf script on all workers. runIDNoPatch := *runID + "-nopatch" runIDWithPatch := *runID + "-withpatch" chromiumBuildNoPatch := fmt.Sprintf("try-%s-%s-%s", chromiumHash, skiaHash, runIDNoPatch) chromiumBuildWithPatch := fmt.Sprintf("try-%s-%s-%s", chromiumHash, skiaHash, runIDWithPatch) runChromiumPerfCmdTemplate := "DISPLAY=:0 run_chromium_perf " + "--worker_num={{.WorkerNum}} --log_dir={{.LogDir}} --pageset_type={{.PagesetType}} " + "--chromium_build_nopatch={{.ChromiumBuildNoPatch}} --chromium_build_withpatch={{.ChromiumBuildWithPatch}} " + "--run_id_nopatch={{.RunIDNoPatch}} --run_id_withpatch={{.RunIDWithPatch}} " + "--benchmark_name={{.BenchmarkName}} --benchmark_extra_args=\"{{.BenchmarkExtraArgs}}\" " + "--browser_extra_args_nopatch=\"{{.BrowserExtraArgsNoPatch}}\" --browser_extra_args_withpatch=\"{{.BrowserExtraArgsWithPatch}}\" " + "--repeat_benchmark={{.RepeatBenchmark}} --target_platform={{.TargetPlatform}};" runChromiumPerfTemplateParsed := template.Must(template.New("run_chromium_perf_cmd").Parse(runChromiumPerfCmdTemplate)) runChromiumPerfCmdBytes := new(bytes.Buffer) if err := runChromiumPerfTemplateParsed.Execute(runChromiumPerfCmdBytes, struct { WorkerNum string LogDir string PagesetType string ChromiumBuildNoPatch string ChromiumBuildWithPatch string RunIDNoPatch string RunIDWithPatch string BenchmarkName string BenchmarkExtraArgs string BrowserExtraArgsNoPatch string BrowserExtraArgsWithPatch string RepeatBenchmark int TargetPlatform string }{ WorkerNum: util.WORKER_NUM_KEYWORD, LogDir: util.GLogDir, PagesetType: *pagesetType, ChromiumBuildNoPatch: chromiumBuildNoPatch, ChromiumBuildWithPatch: chromiumBuildWithPatch, RunIDNoPatch: runIDNoPatch, RunIDWithPatch: runIDWithPatch, BenchmarkName: *benchmarkName, BenchmarkExtraArgs: *benchmarkExtraArgs, BrowserExtraArgsNoPatch: *browserExtraArgsNoPatch, BrowserExtraArgsWithPatch: *browserExtraArgsWithPatch, RepeatBenchmark: *repeatBenchmark, TargetPlatform: *targetPlatform, }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } cmd := []string{ fmt.Sprintf("cd %s;", util.CtTreeDir), "git pull;", "make all;", // The main command that runs run_chromium_perf on all workers. runChromiumPerfCmdBytes.String(), } // Setting a 1 day timeout since it may take a while run benchmarks with many // repeats. if _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, 1*24*time.Hour); err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } // If "--output-format=csv-pivot-table" was specified then merge all CSV files and upload. if strings.Contains(*benchmarkExtraArgs, "--output-format=csv-pivot-table") { for _, runID := range []string{runIDNoPatch, runIDWithPatch} { if err := mergeUploadCSVFiles(runID, gs); err != nil { glog.Errorf("Unable to merge and upload CSV files for %s: %s", runID, err) } } } // Compare the resultant CSV files using csv_comparer.py noPatchCSVPath := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runIDNoPatch, runIDNoPatch+".output") withPatchCSVPath := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runIDWithPatch, runIDWithPatch+".output") htmlOutputDir := filepath.Join(util.StorageDir, util.ChromiumPerfRunsDir, *runID, "html") skutil.MkdirAll(htmlOutputDir, 0700) htmlRemoteDir := filepath.Join(remoteOutputDir, "html") htmlOutputLinkBase := util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, htmlRemoteDir) + "/" htmlOutputLink = htmlOutputLinkBase + "index.html" noPatchOutputLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, util.BenchmarkRunsDir, runIDNoPatch, "consolidated_outputs", runIDNoPatch+".output") withPatchOutputLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, util.BenchmarkRunsDir, runIDWithPatch, "consolidated_outputs", runIDWithPatch+".output") // Construct path to the csv_comparer python script. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") pathToCsvComparer := filepath.Join(pathToPyFiles, "csv_comparer.py") args := []string{ pathToCsvComparer, "--csv_file1=" + noPatchCSVPath, "--csv_file2=" + withPatchCSVPath, "--output_html=" + htmlOutputDir, "--variance_threshold=" + strconv.FormatFloat(*varianceThreshold, 'f', 2, 64), "--discard_outliers=" + strconv.FormatFloat(*discardOutliers, 'f', 2, 64), "--absolute_url=" + htmlOutputLinkBase, "--requester_email=" + *emails, "--skia_patch_link=" + skiaPatchLink, "--blink_patch_link=" + blinkPatchLink, "--chromium_patch_link=" + chromiumPatchLink, "--raw_csv_nopatch=" + noPatchOutputLink, "--raw_csv_withpatch=" + withPatchOutputLink, "--num_repeated=" + strconv.Itoa(*repeatBenchmark), "--target_platform=" + *targetPlatform, "--browser_args_nopatch=" + *browserExtraArgsNoPatch, "--browser_args_withpatch=" + *browserExtraArgsWithPatch, "--pageset_type=" + *pagesetType, "--chromium_hash=" + chromiumHash, "--skia_hash=" + skiaHash, } if err := util.ExecuteCmd("python", args, []string{}, 2*time.Hour, nil, nil); err != nil { glog.Errorf("Error running csv_comparer.py: %s", err) return } // Copy the HTML files to Google Storage. if err := gs.UploadDir(htmlOutputDir, htmlRemoteDir, true); err != nil { glog.Errorf("Could not upload %s to %s: %s", htmlOutputDir, htmlRemoteDir, err) return } taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() worker_common.Init() defer util.TimeTrack(time.Now(), "Fixing archives") defer glog.Flush() // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_FIXING_ARCHIVES)) defer util.DeleteTaskFile(util.ACTIVITY_FIXING_ARCHIVES) if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Reset the local chromium checkout. if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err) return } // Sync the local chromium checkout. if err := util.SyncDir(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir, err) return } // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Download the specified chromium build. if err := gs.DownloadChromiumBuild(*chromiumBuild); err != nil { glog.Error(err) return } // Delete the chromium build to save space when we are done. defer skutil.RemoveAll(filepath.Join(util.ChromiumBuildsDir, *chromiumBuild)) chromiumBinary := filepath.Join(util.ChromiumBuildsDir, *chromiumBuild, util.BINARY_CHROME) // Download pagesets if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType) // Download archives if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.WEB_ARCHIVES_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } // Establish output paths. localOutputDir := filepath.Join(util.StorageDir, util.FixArchivesRunsDir, *runID) skutil.RemoveAll(localOutputDir) skutil.MkdirAll(localOutputDir, 0700) defer skutil.RemoveAll(localOutputDir) // Construct path to the ct_run_benchmark python script. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") timeoutSecs := util.PagesetTypeToInfo[*pagesetType].RunChromiumPerfTimeoutSecs fileInfos, err := ioutil.ReadDir(pathToPagesets) if err != nil { glog.Errorf("Unable to read the pagesets dir %s: %s", pathToPagesets, err) return } // Location of the WPR logs. wprLogs := filepath.Join(util.ChromiumSrcDir, "webpagereplay_logs", "logs.txt") // Slice that will contain inconsistentArchives := []string{} // Loop through all pagesets. for _, fileInfo := range fileInfos { benchmarkResults := []float64{} resourceMissingCounts := []int{} pagesetBaseName := filepath.Base(fileInfo.Name()) if pagesetBaseName == util.TIMESTAMP_FILE_NAME || filepath.Ext(pagesetBaseName) == ".pyc" { // Ignore timestamp files and .pyc files. continue } // Convert the filename into a format consumable by the run_benchmarks // binary. pagesetName := strings.TrimSuffix(pagesetBaseName, filepath.Ext(pagesetBaseName)) pagesetPath := filepath.Join(pathToPagesets, fileInfo.Name()) glog.Infof("===== Processing %s =====", pagesetPath) // Repeat runs the specified number of times. for repeatNum := 1; repeatNum <= *repeatBenchmark; repeatNum++ { // Delete webpagereplay_logs before every run. skutil.RemoveAll(wprLogs) skutil.LogErr(os.Chdir(pathToPyFiles)) args := []string{ util.BINARY_RUN_BENCHMARK, fmt.Sprintf("%s.%s", *benchmarkName, util.BenchmarksToTelemetryName[*benchmarkName]), "--page-set-name=" + pagesetName, "--page-set-base-dir=" + pathToPagesets, "--also-run-disabled-tests", } // Add output dir. outputDirArgValue := filepath.Join(localOutputDir, pagesetName, strconv.Itoa(repeatNum)) args = append(args, "--output-dir="+outputDirArgValue) // Figure out which browser should be used. args = append(args, "--browser=exact", "--browser-executable="+chromiumBinary) // Split benchmark args if not empty and append to args. if *benchmarkArgs != "" { for _, benchmarkArg := range strings.Split(*benchmarkArgs, " ") { args = append(args, benchmarkArg) } } // Add browserArgs if not empty to args. if *browserArgs != "" { args = append(args, "--extra-browser-args="+*browserArgs) } // Set the PYTHONPATH to the pagesets and the telemetry dirs. env := []string{ fmt.Sprintf("PYTHONPATH=%s:%s:%s:$PYTHONPATH", pathToPagesets, util.TelemetryBinariesDir, util.TelemetrySrcDir), "DISPLAY=:0", } skutil.LogErr( util.ExecuteCmd("python", args, env, time.Duration(timeoutSecs)*time.Second, nil, nil)) // Examine the results-pivot-table.csv file and store the mean frame time. resultsCSV := filepath.Join(outputDirArgValue, "results-pivot-table.csv") // TODO(rmistry): The format has changed from results.csv to results-pivot-table.csv headers, values, err := getRowsFromCSV(resultsCSV) if err != nil { glog.Errorf("Could not read %s: %s", resultsCSV, err) continue } for i := range headers { if headers[i] == *benchmarkHeaderToCheck { value, _ := strconv.ParseFloat(values[i], 64) benchmarkResults = append(benchmarkResults, value) break } } // Find how many times "Could not replay" showed up in wprLogs. content, err := ioutil.ReadFile(wprLogs) if err != nil { glog.Errorf("Could not read %s: %s", wprLogs, err) continue } resourceMissingCount := strings.Count(string(content), "Could not replay") resourceMissingCounts = append(resourceMissingCounts, resourceMissingCount) } glog.Infof("Benchmark results for %s are: %v", fileInfo.Name(), benchmarkResults) percentageChange := getPercentageChange(benchmarkResults) glog.Infof("Percentage change of results is: %f", percentageChange) glog.Infof("\"Could not replay\" showed up %v times in %s", resourceMissingCounts, wprLogs) maxResourceMissingCount := 0 for _, count := range resourceMissingCounts { if maxResourceMissingCount < count { maxResourceMissingCount = count } } if percentageChange > *percentageChangeThreshold || maxResourceMissingCount > *resourceMissingCountThreshold { glog.Infof("The archive for %s is inconsistent!", fileInfo.Name()) inconsistentArchives = append(inconsistentArchives, fmt.Sprintf("%s percentageChange: %f maxResourceMissingCount: %v", fileInfo.Name(), percentageChange, maxResourceMissingCount)) if *deletePageset { // Delete the pageset. skutil.RemoveAll(pagesetPath) } } } if len(inconsistentArchives) > 0 { glog.Infof("%d archives are inconsistent!", len(inconsistentArchives)) glog.Infof("The list of inconsistentArchives is: %v", inconsistentArchives) if *deletePageset { // Write new timestamp to the pagesets dir. skutil.RemoveAll(filepath.Join(pathToPagesets, util.TIMESTAMP_FILE_NAME)) skutil.LogErr(util.CreateTimestampFile(pathToPagesets)) // Inconsistent pagesets were deleted locally. Upload local pagesets dir // to Google Storage. if err := gs.UploadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } } } }
func runBenchmark(fileInfoName, pathToPagesets, pathToPyFiles, localOutputDir, chromiumBuildName, chromiumBinary, runID, browserExtraArgs string) error { pagesetBaseName := filepath.Base(fileInfoName) if pagesetBaseName == util.TIMESTAMP_FILE_NAME || filepath.Ext(pagesetBaseName) == ".pyc" { // Ignore timestamp files and .pyc files. return nil } // Read the pageset. pagesetName := strings.TrimSuffix(pagesetBaseName, filepath.Ext(pagesetBaseName)) pagesetPath := filepath.Join(pathToPagesets, fileInfoName) decodedPageset, err := util.ReadPageset(pagesetPath) if err != nil { return fmt.Errorf("Could not read %s: %s", pagesetPath, err) } glog.Infof("===== Processing %s for %s =====", pagesetPath, runID) benchmark, present := util.BenchmarksToTelemetryName[*benchmarkName] if !present { // If it is custom benchmark use the entered benchmark name. benchmark = *benchmarkName } args := []string{ filepath.Join(util.TelemetryBinariesDir, util.BINARY_RUN_BENCHMARK), benchmark, "--also-run-disabled-tests", "--user-agent=" + decodedPageset.UserAgent, "--urls-list=" + decodedPageset.UrlsList, "--archive-data-file=" + decodedPageset.ArchiveDataFile, } // Need to capture output for all benchmarks. outputDirArgValue := filepath.Join(localOutputDir, pagesetName) args = append(args, "--output-dir="+outputDirArgValue) // Figure out which browser should be used. if *targetPlatform == util.PLATFORM_ANDROID { if err := util.InstallChromeAPK(chromiumBuildName); err != nil { return fmt.Errorf("Error while installing APK: %s", err) } args = append(args, "--browser=android-chromium") } else { args = append(args, "--browser=exact", "--browser-executable="+chromiumBinary) } // Split benchmark args if not empty and append to args. if *benchmarkExtraArgs != "" { for _, benchmarkArg := range strings.Split(*benchmarkExtraArgs, " ") { args = append(args, benchmarkArg) } } // Add the number of times to repeat. args = append(args, fmt.Sprintf("--page-repeat=%d", *repeatBenchmark)) // Add browserArgs if not empty to args. if browserExtraArgs != "" { args = append(args, "--extra-browser-args="+browserExtraArgs) } // Set the PYTHONPATH to the pagesets and the telemetry dirs. env := []string{ fmt.Sprintf("PYTHONPATH=%s:%s:%s:$PYTHONPATH", pathToPagesets, util.TelemetryBinariesDir, util.TelemetrySrcDir), "DISPLAY=:0", } timeoutSecs := util.PagesetTypeToInfo[*pagesetType].RunChromiumPerfTimeoutSecs if err := util.ExecuteCmd("python", args, env, time.Duration(timeoutSecs)*time.Second, nil, nil); err != nil { glog.Errorf("Run benchmark command failed with: %s", err) } return nil }
func main() { common.Init() webhook.MustInitRequestSaltFromFile(util.WebhookRequestSaltPath) // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&frontend.ChromiumBuildUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Build chromium")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running build chromium") defer glog.Flush() if *chromiumHash == "" { glog.Error("Must specify --chromium_hash") return } if *skiaHash == "" { glog.Error("Must specify --skia_hash") return } if _, _, err := util.CreateChromiumBuild("", *targetPlatform, *chromiumHash, *skiaHash, *applyPatches); err != nil { glog.Errorf("Error while creating the Chromium build: %s", err) return } // Find when the requested Chromium revision was submitted. stdoutFileName := *runID + ".out" stdoutFilePath := filepath.Join(os.TempDir(), stdoutFileName) stdoutFile, err := os.Create(stdoutFilePath) defer skutil.Close(stdoutFile) defer skutil.Remove(stdoutFilePath) if err != nil { glog.Errorf("Could not create %s: %s", stdoutFilePath, err) return } var chromiumBuildDir string if *targetPlatform == "Android" { chromiumBuildDir = filepath.Join(util.ChromiumBuildsDir, "android_base") } else if *targetPlatform == "Linux" { chromiumBuildDir = filepath.Join(util.ChromiumBuildsDir, "linux_base") } if err := os.Chdir(filepath.Join(chromiumBuildDir, "src")); err != nil { glog.Errorf("Could not chdir to %s: %s", chromiumBuildDir, err) return } // Run git log --pretty=format="%at" -1 if err := util.ExecuteCmd(util.BINARY_GIT, []string{"log", "--pretty=format:%at", "-1"}, []string{}, 5*time.Minute, stdoutFile, nil); err != nil { glog.Errorf("Could not run git log cmd: %s", err) return } content, err := ioutil.ReadFile(stdoutFilePath) if err != nil { glog.Errorf("Could not read %s: %s", stdoutFilePath, err) return } chromiumBuildTimestamp = string(content) taskCompletedSuccessfully = true }
func main() { common.Init() defer util.TimeTrack(time.Now(), "Capturing Archives") defer glog.Flush() // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_CAPTURING_ARCHIVES)) defer util.DeleteTaskFile(util.ACTIVITY_CAPTURING_ARCHIVES) if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } // Reset the local chromium checkout. if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err) return } // Sync the local chromium checkout. if err := util.SyncDir(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir, err) return } // Delete and remake the local webpage archives directory. pathToArchives := filepath.Join(util.WebArchivesDir, *pagesetType) skutil.RemoveAll(pathToArchives) skutil.MkdirAll(pathToArchives, 0700) // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Download the specified chromium build if it does not exist locally. if err := gs.DownloadChromiumBuild(*chromiumBuild); err != nil { glog.Error(err) return } // Download pagesets if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType) chromiumBinary := filepath.Join(util.ChromiumBuildsDir, *chromiumBuild, util.BINARY_CHROME) recordWprBinary := filepath.Join(util.TelemetryBinariesDir, util.BINARY_RECORD_WPR) timeoutSecs := util.PagesetTypeToInfo[*pagesetType].CaptureArchivesTimeoutSecs // Loop through all pagesets. fileInfos, err := ioutil.ReadDir(pathToPagesets) if err != nil { glog.Errorf("Unable to read the pagesets dir %s: %s", pathToPagesets, err) return } // TODO(rmistry): Remove this hack once the 1M webpage archives have been captured. glog.Infof("The length of fileInfos is: %s", len(fileInfos)) fileInfos = fileInfos[18500:20000] glog.Infof("The fileInfos are: %s", fileInfos) for _, fileInfo := range fileInfos { pagesetBaseName := filepath.Base(fileInfo.Name()) if pagesetBaseName == util.TIMESTAMP_FILE_NAME || filepath.Ext(pagesetBaseName) == ".pyc" { // Ignore timestamp files and .pyc files. continue } // Convert the filename into a format consumable by the record_wpr binary. pagesetArchiveName := strings.TrimSuffix(pagesetBaseName, filepath.Ext(pagesetBaseName)) pagesetPath := filepath.Join(pathToPagesets, fileInfo.Name()) glog.Infof("===== Processing %s =====", pagesetPath) args := []string{ "--extra-browser-args=--disable-setuid-sandbox", "--browser=exact", "--browser-executable=" + chromiumBinary, fmt.Sprintf("%s_page_set", pagesetArchiveName), "--page-set-base-dir=" + pathToPagesets, } env := []string{ fmt.Sprintf("PYTHONPATH=%s:$PYTHONPATH", pathToPagesets), "DISPLAY=:0", } skutil.LogErr(util.ExecuteCmd(recordWprBinary, args, env, time.Duration(timeoutSecs)*time.Second, nil, nil)) } // Write timestamp to the webpage archives dir. skutil.LogErr(util.CreateTimestampFile(pathToArchives)) // Upload webpage archives dir to Google Storage. if err := gs.UploadWorkerArtifacts(util.WEB_ARCHIVES_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } }
func main() { common.Init() defer util.TimeTrack(time.Now(), "Running Skia Correctness") defer glog.Flush() if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_RUNNING_SKIA_CORRECTNESS)) defer util.DeleteTaskFile(util.ACTIVITY_RUNNING_SKIA_CORRECTNESS) // Establish output paths. localOutputDir := filepath.Join(util.StorageDir, util.SkiaCorrectnessRunsDir, *runID) skutil.RemoveAll(filepath.Join(util.StorageDir, util.SkiaCorrectnessRunsDir)) skutil.MkdirAll(localOutputDir, 0700) defer skutil.RemoveAll(localOutputDir) remoteOutputDir := filepath.Join(util.SkiaCorrectnessRunsDir, *runID, fmt.Sprintf("slave%d", *workerNum)) // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Download SKPs if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.SKPS_DIR_NAME, filepath.Join(*pagesetType, *chromiumBuild), *workerNum); err != nil { glog.Error(err) return } localSkpsDir := filepath.Join(util.SkpsDir, *pagesetType, *chromiumBuild) // Download the Skia patch for this run from Google storage. patchName := *runID + ".patch" patchLocalPath := filepath.Join(os.TempDir(), patchName) remoteDir := filepath.Join(util.SkiaCorrectnessRunsDir, *runID) patchRemotePath := filepath.Join(remoteDir, "patches", patchName) respBody, err := gs.GetRemoteFileContents(patchRemotePath) if err != nil { glog.Errorf("Could not fetch %s: %s", patchRemotePath, err) return } defer skutil.Close(respBody) out, err := os.Create(patchLocalPath) if err != nil { glog.Errorf("Unable to create file %s: %s", patchLocalPath, err) return } defer skutil.Close(out) defer skutil.Remove(patchLocalPath) if _, err = io.Copy(out, respBody); err != nil { glog.Error(err) return } // Apply the patch to a clean checkout and run render_pictures. // Reset Skia tree. skutil.LogErr(util.ResetCheckout(util.SkiaTreeDir)) // Sync Skia tree. skutil.LogErr(util.SyncDir(util.SkiaTreeDir)) // Apply Skia patch. file, _ := os.Open(patchLocalPath) fileInfo, _ := file.Stat() // It is a valid patch only if it is more than 10 bytes. if fileInfo.Size() > 10 { glog.Info("Attempting to apply %s to %s", patchLocalPath, util.SkiaTreeDir) if err := util.ApplyPatch(patchLocalPath, util.SkiaTreeDir); err != nil { glog.Errorf("Could not apply patch %s to %s: %s", patchLocalPath, util.SkiaTreeDir, err) return } glog.Info("Patch successfully applied") } else { glog.Info("Patch is empty or invalid. Skipping the patch.") } // Build tools. skutil.LogErr(util.BuildSkiaTools()) // Run render_pictures. if err := runRenderPictures(localSkpsDir, filepath.Join(localOutputDir, "withpatch"), filepath.Join(remoteOutputDir, "withpatch"), *gpuWithPatchRun); err != nil { glog.Errorf("Error while running withpatch render_pictures: %s", err) return } // Remove the patch and run render_pictures. // Reset Skia tree. skutil.LogErr(util.ResetCheckout(util.SkiaTreeDir)) // Build tools. skutil.LogErr(util.BuildSkiaTools()) // Run render_pictures. if err := runRenderPictures(localSkpsDir, filepath.Join(localOutputDir, "nopatch"), filepath.Join(remoteOutputDir, "nopatch"), *gpuNoPatchRun); err != nil { glog.Errorf("Error while running nopatch render_pictures: %s", err) return } // Comparing pictures and saving differences in JSON output file. jsonSummaryDir := filepath.Join(localOutputDir, "json_summary") skutil.MkdirAll(jsonSummaryDir, 0700) jsonSummaryPath := filepath.Join(jsonSummaryDir, fmt.Sprintf("slave%d", *workerNum)+".json") // Construct path to the write_json_summary python script. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") summaryArgs := []string{ filepath.Join(pathToPyFiles, "write_json_summary.py"), "--img_root=" + localOutputDir, "--withpatch_json=" + filepath.Join(localOutputDir, "withpatch", "summary.json"), "--withpatch_images_base_url=file://" + filepath.Join(localOutputDir, "withpatch"), "--nopatch_json=" + filepath.Join(localOutputDir, "nopatch", "summary.json"), "--nopatch_images_base_url=file://" + filepath.Join(localOutputDir, "nopatch"), "--output_file_path=" + jsonSummaryPath, "--gs_output_dir=gs://" + filepath.Join(util.GS_BUCKET_NAME, remoteDir), "--gs_skp_dir=gs://" + filepath.Join(util.GS_BUCKET_NAME, util.SKPS_DIR_NAME, *pagesetType, *chromiumBuild, fmt.Sprintf("slave%d", *workerNum)), "--slave_num=" + strconv.Itoa(*workerNum), } if err := util.ExecuteCmd("python", summaryArgs, []string{}, 15*time.Minute, nil, nil); err != nil { glog.Error(err) return } // Upload artifacts to Google Storage. // Get list of failed file names and upload only those to Google Storage. // Read the JSON file. summaryFile, err := ioutil.ReadFile(jsonSummaryPath) if err != nil { glog.Errorf("Unable to read %s: %s", jsonSummaryPath, err) } var jsontype map[string]interface{} skutil.LogErr(json.Unmarshal(summaryFile, &jsontype)) if jsontype[fmt.Sprintf("slave%d", *workerNum)] != nil { failedFiles := jsontype[fmt.Sprintf("slave%d", *workerNum)].(map[string]interface{})["failedFiles"].([]interface{}) for i := range failedFiles { failedFile := failedFiles[i].(map[string]interface{})["fileName"].(string) // TODO(rmistry): Use goroutines to do the below in parallel. skutil.LogErr( gs.UploadFile(failedFile, filepath.Join(localOutputDir, "withpatch"), filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum), "withpatch-images"))) skutil.LogErr( gs.UploadFile(failedFile, filepath.Join(localOutputDir, "nopatch"), filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum), "nopatch-images"))) } // Copy the diffs and whitediffs to Google Storage. skutil.LogErr( gs.UploadDir(filepath.Join(localOutputDir, "diffs"), filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum), "diffs"), true)) skutil.LogErr( gs.UploadDir(filepath.Join(localOutputDir, "whitediffs"), filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum), "whitediffs"), true)) } // Upload the summary file. skutil.LogErr( gs.UploadFile(fmt.Sprintf("slave%d", *workerNum)+".json", jsonSummaryDir, filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum)))) }
func main() { defer common.LogPanic() worker_common.Init() defer util.TimeTrack(time.Now(), "Capturing Archives") defer glog.Flush() // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_CAPTURING_ARCHIVES)) defer util.DeleteTaskFile(util.ACTIVITY_CAPTURING_ARCHIVES) if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } // Reset the local chromium checkout. if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err) return } // Sync the local chromium checkout. if err := util.SyncDir(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir, err) return } // Delete and remake the local webpage archives directory. pathToArchives := filepath.Join(util.WebArchivesDir, *pagesetType) skutil.RemoveAll(pathToArchives) skutil.MkdirAll(pathToArchives, 0700) // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Download the specified chromium build if it does not exist locally. if err := gs.DownloadChromiumBuild(*chromiumBuild); err != nil { glog.Error(err) return } // Download pagesets if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType) chromiumBinary := filepath.Join(util.ChromiumBuildsDir, *chromiumBuild, util.BINARY_CHROME) recordWprBinary := filepath.Join(util.TelemetryBinariesDir, util.BINARY_RECORD_WPR) timeoutSecs := util.PagesetTypeToInfo[*pagesetType].CaptureArchivesTimeoutSecs // Loop through all pagesets. fileInfos, err := ioutil.ReadDir(pathToPagesets) if err != nil { glog.Errorf("Unable to read the pagesets dir %s: %s", pathToPagesets, err) return } glog.Infof("The %s fileInfos are: %s", len(fileInfos), fileInfos) for _, fileInfo := range fileInfos { pagesetBaseName := filepath.Base(fileInfo.Name()) if pagesetBaseName == util.TIMESTAMP_FILE_NAME || filepath.Ext(pagesetBaseName) == ".pyc" { // Ignore timestamp files and .pyc files. continue } // Read the pageset. pagesetPath := filepath.Join(pathToPagesets, fileInfo.Name()) decodedPageset, err := util.ReadPageset(pagesetPath) if err != nil { glog.Errorf("Could not read %s: %s", pagesetPath, err) return } glog.Infof("===== Processing %s =====", pagesetPath) args := []string{ util.CAPTURE_ARCHIVES_DEFAULT_CT_BENCHMARK, "--extra-browser-args=--disable-setuid-sandbox", "--browser=exact", "--browser-executable=" + chromiumBinary, "--user-agent=" + decodedPageset.UserAgent, "--urls-list=" + decodedPageset.UrlsList, "--archive-data-file=" + decodedPageset.ArchiveDataFile, } env := []string{ fmt.Sprintf("PYTHONPATH=%s:$PYTHONPATH", pathToPagesets), "DISPLAY=:0", } skutil.LogErr(util.ExecuteCmd(recordWprBinary, args, env, time.Duration(timeoutSecs)*time.Second, nil, nil)) } // Write timestamp to the webpage archives dir. skutil.LogErr(util.CreateTimestampFile(pathToArchives)) // Upload webpage archives dir to Google Storage. if err := gs.UploadWorkerArtifacts(util.WEB_ARCHIVES_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } }
func main() { defer common.LogPanic() worker_common.Init() if !*worker_common.Local { defer util.CleanTmpDir() } defer util.TimeTrack(time.Now(), "Running Chromium Perf") defer glog.Flush() // Validate required arguments. if *chromiumBuildNoPatch == "" { glog.Error("Must specify --chromium_build_nopatch") return } if *chromiumBuildWithPatch == "" { glog.Error("Must specify --chromium_build_withpatch") return } if *runID == "" { glog.Error("Must specify --run_id") return } if *runIDNoPatch == "" { glog.Error("Must specify --run_id_nopatch") return } if *runIDWithPatch == "" { glog.Error("Must specify --run_id_withpatch") return } if *benchmarkName == "" { glog.Error("Must specify --benchmark_name") return } // Reset the local chromium checkout. if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err) return } // Sync the local chromium checkout. if err := util.SyncDir(util.ChromiumSrcDir); err != nil { glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir, err) return } // Create the task file so that the master knows this worker is still busy. skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_RUNNING_CHROMIUM_PERF)) defer util.DeleteTaskFile(util.ACTIVITY_RUNNING_CHROMIUM_PERF) if *targetPlatform == util.PLATFORM_ANDROID { if err := adb.VerifyLocalDevice(); err != nil { // Android device missing or offline. glog.Errorf("Could not find Android device: %s", err) return } // Kill adb server to make sure we start from a clean slate. skutil.LogErr(util.ExecuteCmd(util.BINARY_ADB, []string{"kill-server"}, []string{}, util.ADB_ROOT_TIMEOUT, nil, nil)) // Make sure adb shell is running as root. skutil.LogErr(util.ExecuteCmd(util.BINARY_ADB, []string{"root"}, []string{}, util.ADB_ROOT_TIMEOUT, nil, nil)) } // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Download the benchmark patch for this run from Google storage. benchmarkPatchName := *runID + ".benchmark.patch" benchmarkPatchLocalPath := filepath.Join(os.TempDir(), benchmarkPatchName) remoteDir := filepath.Join(util.ChromiumPerfRunsDir, *runID) benchmarkPatchRemotePath := filepath.Join(remoteDir, benchmarkPatchName) respBody, err := gs.GetRemoteFileContents(benchmarkPatchRemotePath) if err != nil { glog.Errorf("Could not fetch %s: %s", benchmarkPatchRemotePath, err) return } defer skutil.Close(respBody) buf := new(bytes.Buffer) if _, err := buf.ReadFrom(respBody); err != nil { glog.Errorf("Could not read from %s: %s", benchmarkPatchRemotePath, err) return } if err := ioutil.WriteFile(benchmarkPatchLocalPath, buf.Bytes(), 0666); err != nil { glog.Errorf("Unable to create file %s: %s", benchmarkPatchLocalPath, err) return } defer skutil.Remove(benchmarkPatchLocalPath) // Apply benchmark patch to the local chromium checkout. if buf.Len() > 10 { if err := util.ApplyPatch(benchmarkPatchLocalPath, util.ChromiumSrcDir); err != nil { glog.Errorf("Could not apply Telemetry's patch in %s: %s", util.ChromiumSrcDir, err) return } } // Download the specified chromium builds. for _, chromiumBuild := range []string{*chromiumBuildNoPatch, *chromiumBuildWithPatch} { if err := gs.DownloadChromiumBuild(chromiumBuild); err != nil { glog.Error(err) return } //Delete the chromium build to save space when we are done. defer skutil.RemoveAll(filepath.Join(util.ChromiumBuildsDir, chromiumBuild)) } chromiumBinaryNoPatch := filepath.Join(util.ChromiumBuildsDir, *chromiumBuildNoPatch, util.BINARY_CHROME) chromiumBinaryWithPatch := filepath.Join(util.ChromiumBuildsDir, *chromiumBuildWithPatch, util.BINARY_CHROME) // Download pagesets if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType) // Download archives if they do not exist locally. if err := gs.DownloadWorkerArtifacts(util.WEB_ARCHIVES_DIR_NAME, *pagesetType, *workerNum); err != nil { glog.Error(err) return } // Establish nopatch output paths. localOutputDirNoPatch := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, *runIDNoPatch) skutil.RemoveAll(localOutputDirNoPatch) skutil.MkdirAll(localOutputDirNoPatch, 0700) defer skutil.RemoveAll(localOutputDirNoPatch) remoteDirNoPatch := filepath.Join(util.BenchmarkRunsDir, *runIDNoPatch) // Establish withpatch output paths. localOutputDirWithPatch := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, *runIDWithPatch) skutil.RemoveAll(localOutputDirWithPatch) skutil.MkdirAll(localOutputDirWithPatch, 0700) defer skutil.RemoveAll(localOutputDirWithPatch) remoteDirWithPatch := filepath.Join(util.BenchmarkRunsDir, *runIDWithPatch) // Construct path to the ct_run_benchmark python script. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") fileInfos, err := ioutil.ReadDir(pathToPagesets) if err != nil { glog.Errorf("Unable to read the pagesets dir %s: %s", pathToPagesets, err) return } numWorkers := WORKER_POOL_SIZE if *targetPlatform == util.PLATFORM_ANDROID || !*runInParallel { // Do not run page sets in parallel if the target platform is Android. // This is because the nopatch/withpatch APK needs to be installed prior to // each run and this will interfere with the parallel runs. Instead of trying // to find a complicated solution to this, it makes sense for Android to // continue to be serial because it will help guard against // crashes/flakiness/inconsistencies which are more prevalent in mobile runs. numWorkers = 1 glog.Infoln("===== Going to run the task serially =====") } else { glog.Infoln("===== Going to run the task with parallel chrome processes =====") } // Create channel that contains all pageset file names. This channel will // be consumed by the worker pool. pagesetRequests := util.GetClosedChannelOfPagesets(fileInfos) var wg sync.WaitGroup // Use a RWMutex for the chromeProcessesCleaner goroutine to communicate to // the workers (acting as "readers") when it wants to be the "writer" and // kill all zombie chrome processes. var mutex sync.RWMutex // Loop through workers in the worker pool. for i := 0; i < numWorkers; i++ { // Increment the WaitGroup counter. wg.Add(1) // Create and run a goroutine closure that captures SKPs. go func() { // Decrement the WaitGroup counter when the goroutine completes. defer wg.Done() for pagesetName := range pagesetRequests { mutex.RLock() if err := runBenchmark(pagesetName, pathToPagesets, pathToPyFiles, localOutputDirNoPatch, *chromiumBuildNoPatch, chromiumBinaryNoPatch, *runIDNoPatch, *browserExtraArgsNoPatch); err != nil { glog.Errorf("Error while running nopatch benchmark: %s", err) return } if err := runBenchmark(pagesetName, pathToPagesets, pathToPyFiles, localOutputDirWithPatch, *chromiumBuildWithPatch, chromiumBinaryWithPatch, *runIDWithPatch, *browserExtraArgsWithPatch); err != nil { glog.Errorf("Error while running withpatch benchmark: %s", err) return } mutex.RUnlock() } }() } if !*worker_common.Local { // Start the cleaner. go util.ChromeProcessesCleaner(&mutex, *chromeCleanerTimer) } // Wait for all spawned goroutines to complete. wg.Wait() // If "--output-format=csv-pivot-table" was specified then merge all CSV files and upload. if strings.Contains(*benchmarkExtraArgs, "--output-format=csv-pivot-table") { if err := mergeUploadCSVFiles(localOutputDirNoPatch, pathToPyFiles, *runIDNoPatch, remoteDirNoPatch, gs); err != nil { glog.Errorf("Error while processing nopatch CSV files: %s", err) return } if err := mergeUploadCSVFiles(localOutputDirWithPatch, pathToPyFiles, *runIDWithPatch, remoteDirWithPatch, gs); err != nil { glog.Errorf("Error while processing withpatch CSV files: %s", err) return } } }
func main() { common.Init() webhook.MustInitRequestSaltFromFile(util.WebhookRequestSaltPath) // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&frontend.LuaScriptUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Lua script")) // Ensure webapp is updated and email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running Lua script on workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Upload the lua script for this run to Google storage. luaScriptName := *runID + ".lua" defer skutil.Remove(filepath.Join(os.TempDir(), luaScriptName)) luaScriptRemoteDir := filepath.Join(util.LuaRunsDir, *runID, "scripts") luaScriptRemoteLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, luaScriptRemoteDir, luaScriptName) if err := gs.UploadFile(luaScriptName, os.TempDir(), luaScriptRemoteDir); err != nil { glog.Errorf("Could not upload %s to %s: %s", luaScriptName, luaScriptRemoteDir, err) return } // Run the run_lua script on all workers. runLuaCmdTemplate := "DISPLAY=:0 run_lua --worker_num={{.WorkerNum}} --log_dir={{.LogDir}} --pageset_type={{.PagesetType}} --chromium_build={{.ChromiumBuild}} --run_id={{.RunID}};" runLuaTemplateParsed := template.Must(template.New("run_lua_cmd").Parse(runLuaCmdTemplate)) luaCmdBytes := new(bytes.Buffer) if err := runLuaTemplateParsed.Execute(luaCmdBytes, struct { WorkerNum string LogDir string PagesetType string ChromiumBuild string RunID string }{ WorkerNum: util.WORKER_NUM_KEYWORD, LogDir: util.GLogDir, PagesetType: *pagesetType, ChromiumBuild: *chromiumBuild, RunID: *runID, }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } cmd := []string{ fmt.Sprintf("cd %s;", util.CtTreeDir), "git pull;", "make all;", // The main command that runs run_lua on all workers. luaCmdBytes.String(), } if _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, 2*time.Hour); err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } // Copy outputs from all slaves locally and combine it into one file. consolidatedFileName := "lua-output" consolidatedLuaOutput := filepath.Join(os.TempDir(), consolidatedFileName) if err := ioutil.WriteFile(consolidatedLuaOutput, []byte{}, 0660); err != nil { glog.Errorf("Could not create %s: %s", consolidatedLuaOutput, err) return } for i := 0; i < util.NUM_WORKERS; i++ { workerNum := i + 1 workerRemoteOutputPath := filepath.Join(util.LuaRunsDir, *runID, fmt.Sprintf("slave%d", workerNum), "outputs", *runID+".output") respBody, err := gs.GetRemoteFileContents(workerRemoteOutputPath) if err != nil { glog.Errorf("Could not fetch %s: %s", workerRemoteOutputPath, err) // TODO(rmistry): Should we instead return here? We can only return // here if all 100 slaves reliably run without any failures which they // really should. continue } defer skutil.Close(respBody) out, err := os.OpenFile(consolidatedLuaOutput, os.O_RDWR|os.O_APPEND, 0660) if err != nil { glog.Errorf("Unable to open file %s: %s", consolidatedLuaOutput, err) return } defer skutil.Close(out) defer skutil.Remove(consolidatedLuaOutput) if _, err = io.Copy(out, respBody); err != nil { glog.Errorf("Unable to write out %s to %s: %s", workerRemoteOutputPath, consolidatedLuaOutput, err) return } } // Copy the consolidated file into Google Storage. consolidatedOutputRemoteDir := filepath.Join(util.LuaRunsDir, *runID, "consolidated_outputs") luaOutputRemoteLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, consolidatedOutputRemoteDir, consolidatedFileName) if err := gs.UploadFile(consolidatedFileName, os.TempDir(), consolidatedOutputRemoteDir); err != nil { glog.Errorf("Unable to upload %s to %s: %s", consolidatedLuaOutput, consolidatedOutputRemoteDir, err) return } // Upload the lua aggregator (if specified) for this run to Google storage. luaAggregatorName := *runID + ".aggregator" luaAggregatorPath := filepath.Join(os.TempDir(), luaAggregatorName) defer skutil.Remove(luaAggregatorPath) luaAggregatorRemoteLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, luaScriptRemoteDir, luaAggregatorName) luaAggregatorFileInfo, err := os.Stat(luaAggregatorPath) if !os.IsNotExist(err) && luaAggregatorFileInfo.Size() > 10 { if err := gs.UploadFile(luaAggregatorName, os.TempDir(), luaScriptRemoteDir); err != nil { glog.Errorf("Could not upload %s to %s: %s", luaAggregatorName, luaScriptRemoteDir, err) return } // Run the aggregator and save stdout. luaAggregatorOutputFileName := *runID + ".agg.output" luaAggregatorOutputFilePath := filepath.Join(os.TempDir(), luaAggregatorOutputFileName) luaAggregatorOutputFile, err := os.Create(luaAggregatorOutputFilePath) defer skutil.Close(luaAggregatorOutputFile) defer skutil.Remove(luaAggregatorOutputFilePath) if err != nil { glog.Errorf("Could not create %s: %s", luaAggregatorOutputFilePath, err) return } if err := util.ExecuteCmd(util.BINARY_LUA, []string{luaAggregatorPath}, []string{}, time.Hour, luaAggregatorOutputFile, nil); err != nil { glog.Errorf("Could not execute the lua aggregator %s: %s", luaAggregatorPath, err) return } // Copy the aggregator output into Google Storage. luaAggregatorOutputRemoteLink = util.GS_HTTP_LINK + filepath.Join(util.GS_BUCKET_NAME, consolidatedOutputRemoteDir, luaAggregatorOutputFileName) if err := gs.UploadFile(luaAggregatorOutputFileName, os.TempDir(), consolidatedOutputRemoteDir); err != nil { glog.Errorf("Unable to upload %s to %s: %s", luaAggregatorOutputFileName, consolidatedOutputRemoteDir, err) return } } else { glog.Info("A lua aggregator has not been specified.") } taskCompletedSuccessfully = true }
func runBenchmark(fileInfoName, pathToPagesets, localOutputDir, chromiumBuildName, chromiumBinary, browserExtraArgs string) error { pagesetBaseName := filepath.Base(fileInfoName) if pagesetBaseName == util.TIMESTAMP_FILE_NAME || filepath.Ext(pagesetBaseName) == ".pyc" { // Ignore timestamp files and .pyc files. return nil } // Read the pageset. pagesetName := strings.TrimSuffix(pagesetBaseName, filepath.Ext(pagesetBaseName)) pagesetPath := filepath.Join(pathToPagesets, fileInfoName) decodedPageset, err := util.ReadPageset(pagesetPath) if err != nil { return fmt.Errorf("Could not read %s: %s", pagesetPath, err) } glog.Infof("===== Processing %s =====", pagesetPath) benchmark, present := util.BenchmarksToTelemetryName[*benchmarkName] if !present { // If it is custom benchmark use the entered benchmark name. benchmark = *benchmarkName } args := []string{ filepath.Join(*telemetryBinariesDir, util.BINARY_RUN_BENCHMARK), benchmark, "--also-run-disabled-tests", "--user-agent=" + decodedPageset.UserAgent, "--urls-list=" + decodedPageset.UrlsList, "--archive-data-file=" + filepath.Join(*pageSetsDir, decodedPageset.ArchiveDataFile), // Output in chartjson. Needed for uploading to performance dashboard. // Documentation is here: http://www.chromium.org/developers/speed-infra/performance-dashboard/sending-data-to-the-performance-dashboard "--output-format=chartjson", // Upload traces. "--upload-results", "--upload-bucket=output", } // Need to capture output for all benchmarks. outputDirArgValue := filepath.Join(localOutputDir, pagesetName) args = append(args, "--output-dir="+outputDirArgValue) // Figure out which browser should be used. if *targetPlatform == util.PLATFORM_ANDROID { if err := util.InstallChromeAPK(chromiumBuildName); err != nil { return fmt.Errorf("Error while installing APK: %s", err) } args = append(args, "--browser=android-chromium") } else { args = append(args, "--browser=exact", "--browser-executable="+chromiumBinary) } // Split benchmark args if not empty and append to args. if *benchmarkExtraArgs != "" { args = append(args, strings.Split(*benchmarkExtraArgs, " ")...) } // Add the number of times to repeat. args = append(args, fmt.Sprintf("--page-repeat=%d", *repeatBenchmark)) // Add browserArgs if not empty to args. if browserExtraArgs != "" { args = append(args, "--extra-browser-args="+browserExtraArgs) } // Set the PYTHONPATH to the pagesets and the telemetry dirs. env := []string{ fmt.Sprintf("PYTHONPATH=%s:$PYTHONPATH", *telemetryBinariesDir), "DISPLAY=:0", } timeoutSecs := 2 * 60 // 2 mins timeout if err := util.ExecuteCmd("python", args, env, time.Duration(timeoutSecs)*time.Second, nil, nil); err != nil { glog.Errorf("Run benchmark command failed with: %s", err) } return nil }