func main() { defer common.LogPanic() master_common.Init() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&admin_tasks.RecreateWebpageArchivesUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Capture archives", util.GetMasterLogLink(*runID), "")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) if !*master_common.Local { // Cleanup tmp files after the run. defer util.CleanTmpDir() } // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Capture archives on Workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } cmd := append(master_common.WorkerSetupCmds(), // The main command that runs capture_archives on all workers. fmt.Sprintf("DISPLAY=:0 capture_archives --worker_num=%s --log_dir=%s --log_id=%s --pageset_type=%s --chromium_build=%s --local=%t;", util.WORKER_NUM_KEYWORD, util.GLogDir, *runID, *pagesetType, *chromiumBuild, *master_common.Local)) _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, util.CAPTURE_ARCHIVES_TIMEOUT) if err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } *taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() master_common.Init() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&chromium_perf.UpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Chromium perf", util.GetMasterLogLink(*runID), *description)) // Ensure webapp is updated and email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup dirs after run completes. defer skutil.RemoveAll(filepath.Join(util.StorageDir, util.ChromiumPerfRunsDir)) defer skutil.RemoveAll(filepath.Join(util.StorageDir, util.BenchmarkRunsDir)) if !*master_common.Local { // Cleanup tmp files after the run. defer util.CleanTmpDir() } // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running chromium perf task on workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *benchmarkName == "" { glog.Error("Must specify --benchmark_name") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Errorf("Could not instantiate gsutil object: %s", err) return } remoteOutputDir := filepath.Join(util.ChromiumPerfRunsDir, *runID) // Copy the patches to Google Storage. skiaPatchName := *runID + ".skia.patch" chromiumPatchName := *runID + ".chromium.patch" benchmarkPatchName := *runID + ".benchmark.patch" for _, patchName := range []string{skiaPatchName, chromiumPatchName, benchmarkPatchName} { if err := gs.UploadFile(patchName, os.TempDir(), remoteOutputDir); err != nil { glog.Errorf("Could not upload %s to %s: %s", patchName, remoteOutputDir, err) return } } skiaPatchLink = util.GS_HTTP_LINK + filepath.Join(util.GSBucketName, remoteOutputDir, skiaPatchName) chromiumPatchLink = util.GS_HTTP_LINK + filepath.Join(util.GSBucketName, remoteOutputDir, chromiumPatchName) benchmarkPatchLink = util.GS_HTTP_LINK + filepath.Join(util.GSBucketName, remoteOutputDir, benchmarkPatchName) // Create the two required chromium builds (with patch and without the patch). chromiumHash, skiaHash, err := util.CreateChromiumBuild(*runID, *targetPlatform, "", "", true) if err != nil { glog.Errorf("Could not create chromium build: %s", err) return } // Reboot all workers to start from a clean slate. if !*master_common.Local { util.RebootWorkers() } if *targetPlatform == util.PLATFORM_ANDROID { // Reboot all Android devices to start from a clean slate. util.RebootAndroidDevices() } // Run the run_chromium_perf script on all workers. runIDNoPatch := *runID + "-nopatch" runIDWithPatch := *runID + "-withpatch" chromiumBuildNoPatch := fmt.Sprintf("try-%s-%s-%s", chromiumHash, skiaHash, runIDNoPatch) chromiumBuildWithPatch := fmt.Sprintf("try-%s-%s-%s", chromiumHash, skiaHash, runIDWithPatch) runChromiumPerfCmdTemplate := "DISPLAY=:0 run_chromium_perf " + "--worker_num={{.WorkerNum}} --log_dir={{.LogDir}} --log_id={{.RunID}} --pageset_type={{.PagesetType}} " + "--chromium_build_nopatch={{.ChromiumBuildNoPatch}} --chromium_build_withpatch={{.ChromiumBuildWithPatch}} " + "--run_id={{.RunID}} --run_id_nopatch={{.RunIDNoPatch}} --run_id_withpatch={{.RunIDWithPatch}} " + "--benchmark_name={{.BenchmarkName}} --benchmark_extra_args=\"{{.BenchmarkExtraArgs}}\" " + "--browser_extra_args_nopatch=\"{{.BrowserExtraArgsNoPatch}}\" --browser_extra_args_withpatch=\"{{.BrowserExtraArgsWithPatch}}\" " + "--repeat_benchmark={{.RepeatBenchmark}} --run_in_parallel={{.RunInParallel}} --target_platform={{.TargetPlatform}} " + "--local={{.Local}};" runChromiumPerfTemplateParsed := template.Must(template.New("run_chromium_perf_cmd").Parse(runChromiumPerfCmdTemplate)) runChromiumPerfCmdBytes := new(bytes.Buffer) if err := runChromiumPerfTemplateParsed.Execute(runChromiumPerfCmdBytes, struct { WorkerNum string LogDir string PagesetType string ChromiumBuildNoPatch string ChromiumBuildWithPatch string RunID string RunIDNoPatch string RunIDWithPatch string BenchmarkName string BenchmarkExtraArgs string BrowserExtraArgsNoPatch string BrowserExtraArgsWithPatch string RepeatBenchmark int RunInParallel bool TargetPlatform string Local bool }{ WorkerNum: util.WORKER_NUM_KEYWORD, LogDir: util.GLogDir, PagesetType: *pagesetType, ChromiumBuildNoPatch: chromiumBuildNoPatch, ChromiumBuildWithPatch: chromiumBuildWithPatch, RunID: *runID, RunIDNoPatch: runIDNoPatch, RunIDWithPatch: runIDWithPatch, BenchmarkName: *benchmarkName, BenchmarkExtraArgs: *benchmarkExtraArgs, BrowserExtraArgsNoPatch: *browserExtraArgsNoPatch, BrowserExtraArgsWithPatch: *browserExtraArgsWithPatch, RepeatBenchmark: *repeatBenchmark, RunInParallel: *runInParallel, TargetPlatform: *targetPlatform, Local: *master_common.Local, }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } cmd := append(master_common.WorkerSetupCmds(), // The main command that runs run_chromium_perf on all workers. runChromiumPerfCmdBytes.String()) _, err = util.SSH(strings.Join(cmd, " "), util.Slaves, util.RUN_CHROMIUM_PERF_TIMEOUT) if err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } // If "--output-format=csv-pivot-table" was specified then merge all CSV files and upload. noOutputSlaves := []string{} if strings.Contains(*benchmarkExtraArgs, "--output-format=csv-pivot-table") { for _, runID := range []string{runIDNoPatch, runIDWithPatch} { if noOutputSlaves, err = mergeUploadCSVFiles(runID, gs); err != nil { glog.Errorf("Unable to merge and upload CSV files for %s: %s", runID, err) } } } // Compare the resultant CSV files using csv_comparer.py noPatchCSVPath := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runIDNoPatch, runIDNoPatch+".output") withPatchCSVPath := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runIDWithPatch, runIDWithPatch+".output") htmlOutputDir := filepath.Join(util.StorageDir, util.ChromiumPerfRunsDir, *runID, "html") skutil.MkdirAll(htmlOutputDir, 0700) htmlRemoteDir := filepath.Join(remoteOutputDir, "html") htmlOutputLinkBase := util.GS_HTTP_LINK + filepath.Join(util.GSBucketName, htmlRemoteDir) + "/" htmlOutputLink = htmlOutputLinkBase + "index.html" noPatchOutputLink = util.GS_HTTP_LINK + filepath.Join(util.GSBucketName, util.BenchmarkRunsDir, runIDNoPatch, "consolidated_outputs", runIDNoPatch+".output") withPatchOutputLink = util.GS_HTTP_LINK + filepath.Join(util.GSBucketName, util.BenchmarkRunsDir, runIDWithPatch, "consolidated_outputs", runIDWithPatch+".output") // Construct path to the csv_comparer python script. _, currentFile, _, _ := runtime.Caller(0) pathToPyFiles := filepath.Join( filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))), "py") pathToCsvComparer := filepath.Join(pathToPyFiles, "csv_comparer.py") args := []string{ pathToCsvComparer, "--csv_file1=" + noPatchCSVPath, "--csv_file2=" + withPatchCSVPath, "--output_html=" + htmlOutputDir, "--variance_threshold=" + strconv.FormatFloat(*varianceThreshold, 'f', 2, 64), "--discard_outliers=" + strconv.FormatFloat(*discardOutliers, 'f', 2, 64), "--absolute_url=" + htmlOutputLinkBase, "--requester_email=" + *emails, "--skia_patch_link=" + skiaPatchLink, "--chromium_patch_link=" + chromiumPatchLink, "--benchmark_patch_link=" + benchmarkPatchLink, "--description=" + *description, "--raw_csv_nopatch=" + noPatchOutputLink, "--raw_csv_withpatch=" + withPatchOutputLink, "--num_repeated=" + strconv.Itoa(*repeatBenchmark), "--target_platform=" + *targetPlatform, "--browser_args_nopatch=" + *browserExtraArgsNoPatch, "--browser_args_withpatch=" + *browserExtraArgsWithPatch, "--pageset_type=" + *pagesetType, "--chromium_hash=" + chromiumHash, "--skia_hash=" + skiaHash, "--missing_output_slaves=" + strings.Join(noOutputSlaves, " "), "--logs_link_prefix=" + util.LOGS_LINK_PREFIX, } err = util.ExecuteCmd("python", args, []string{}, util.CSV_COMPARER_TIMEOUT, nil, nil) if err != nil { glog.Errorf("Error running csv_comparer.py: %s", err) return } // Copy the HTML files to Google Storage. if err := gs.UploadDir(htmlOutputDir, htmlRemoteDir, true); err != nil { glog.Errorf("Could not upload %s to %s: %s", htmlOutputDir, htmlRemoteDir, err) return } taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() master_common.Init() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Fix archives", util.GetMasterLogLink(*runID), "")) // Ensure completion email is sent even if task fails. defer sendEmail(emailsArr) if !*master_common.Local { // Cleanup tmp files after the run. defer util.CleanTmpDir() } // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running fix archives task on workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Run the fix_archives script on all workers. fixArchivesCmdTemplate := "DISPLAY=:0 fix_archives --worker_num={{.WorkerNum}} --log_dir={{.LogDir}} " + "--pageset_type={{.PagesetType}} --chromium_build={{.ChromiumBuild}} --run_id={{.RunID}} " + "--repeat_benchmark={{.RepeatBenchmark}} --benchmark_name={{.BenchmarkName}} " + "--benchmark_header_to_check=\"{{.BenchmarkHeaderToCheck}}\" --delete_pageset={{.DeletePageset}} " + "--perc_change_threshold={{.PercentageChangeThreshold}} --res_missing_count_threshold={{.ResourceMissingCountThreshold}} " + "--local={{.Local}};" fixArchivesTemplateParsed := template.Must(template.New("fix_archives_cmd").Parse(fixArchivesCmdTemplate)) fixArchivesCmdBytes := new(bytes.Buffer) if err := fixArchivesTemplateParsed.Execute(fixArchivesCmdBytes, struct { WorkerNum string LogDir string PagesetType string ChromiumBuild string RunID string RepeatBenchmark int BenchmarkName string BenchmarkHeaderToCheck string DeletePageset bool PercentageChangeThreshold float64 ResourceMissingCountThreshold int Local bool }{ WorkerNum: util.WORKER_NUM_KEYWORD, LogDir: util.GLogDir, PagesetType: *pagesetType, ChromiumBuild: *chromiumBuild, RunID: *runID, RepeatBenchmark: *repeatBenchmark, BenchmarkName: *benchmarkName, BenchmarkHeaderToCheck: *benchmarkHeaderToCheck, DeletePageset: *deletePageset, PercentageChangeThreshold: *percentageChangeThreshold, ResourceMissingCountThreshold: *resourceMissingCountThreshold, Local: *master_common.Local, }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } cmd := append(master_common.WorkerSetupCmds(), // The main command that runs fix_archives on all workers. fixArchivesCmdBytes.String()) if _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, util.FIX_ARCHIVES_TIMEOUT); err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() master_common.Init() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&capture_skps.UpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Capture SKPs", util.GetMasterLogLink(*runID), *description)) // Ensure webapp is updated and completion email is sent even if task // fails. defer updateWebappTask() defer sendEmail(emailsArr) if !*master_common.Local { // Cleanup tmp files after the run. defer util.CleanTmpDir() } // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running capture skps task on workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Run the capture_skps script on all workers. captureSKPsCmdTemplate := "DISPLAY=:0 capture_skps --worker_num={{.WorkerNum}} --log_dir={{.LogDir}} --log_id={{.RunID}} " + "--pageset_type={{.PagesetType}} --chromium_build={{.ChromiumBuild}} --run_id={{.RunID}} " + "--target_platform={{.TargetPlatform}} --local={{.Local}};" captureSKPsTemplateParsed := template.Must(template.New("capture_skps_cmd").Parse(captureSKPsCmdTemplate)) captureSKPsCmdBytes := new(bytes.Buffer) if err := captureSKPsTemplateParsed.Execute(captureSKPsCmdBytes, struct { WorkerNum string LogDir string PagesetType string ChromiumBuild string RunID string TargetPlatform string Local bool }{ WorkerNum: util.WORKER_NUM_KEYWORD, LogDir: util.GLogDir, PagesetType: *pagesetType, ChromiumBuild: *chromiumBuild, RunID: *runID, TargetPlatform: *targetPlatform, Local: *master_common.Local, }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } cmd := append(master_common.WorkerSetupCmds(), // The main command that runs capture_skps on all workers. captureSKPsCmdBytes.String()) _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, util.CAPTURE_SKPS_TIMEOUT) if err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() master_common.Init() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&lua_scripts.UpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Lua script", util.GetMasterLogLink(*runID), *description)) // Ensure webapp is updated and email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. if !*master_common.Local { defer util.CleanTmpDir() } // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running Lua script on workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } if *runID == "" { glog.Error("Must specify --run_id") return } // Instantiate GsUtil object. gs, err := util.NewGsUtil(nil) if err != nil { glog.Error(err) return } // Upload the lua script for this run to Google storage. luaScriptName := *runID + ".lua" defer skutil.Remove(filepath.Join(os.TempDir(), luaScriptName)) luaScriptRemoteDir := filepath.Join(util.LuaRunsDir, *runID, "scripts") if err := gs.UploadFile(luaScriptName, os.TempDir(), luaScriptRemoteDir); err != nil { glog.Errorf("Could not upload %s to %s: %s", luaScriptName, luaScriptRemoteDir, err) return } // Run the run_lua script on all workers. runLuaCmdTemplate := "DISPLAY=:0 run_lua --worker_num={{.WorkerNum}} --log_dir={{.LogDir}} --log_id={{.RunID}} --pageset_type={{.PagesetType}} --chromium_build={{.ChromiumBuild}} --run_id={{.RunID}} --local={{.Local}};" runLuaTemplateParsed := template.Must(template.New("run_lua_cmd").Parse(runLuaCmdTemplate)) luaCmdBytes := new(bytes.Buffer) if err := runLuaTemplateParsed.Execute(luaCmdBytes, struct { WorkerNum string LogDir string PagesetType string ChromiumBuild string RunID string Local bool }{ WorkerNum: util.WORKER_NUM_KEYWORD, LogDir: util.GLogDir, PagesetType: *pagesetType, ChromiumBuild: *chromiumBuild, RunID: *runID, Local: *master_common.Local, }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } cmd := append(master_common.WorkerSetupCmds(), // The main command that runs run_lua on all workers. luaCmdBytes.String()) _, err = util.SSH(strings.Join(cmd, " "), util.Slaves, util.RUN_LUA_TIMEOUT) if err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } // Copy outputs from all slaves locally and combine it into one file. consolidatedFileName := "lua-output" consolidatedLuaOutput := filepath.Join(os.TempDir(), consolidatedFileName) defer skutil.Remove(consolidatedLuaOutput) if err := ioutil.WriteFile(consolidatedLuaOutput, []byte{}, 0660); err != nil { glog.Errorf("Could not create %s: %s", consolidatedLuaOutput, err) return } for i := 0; i < util.NumWorkers(); i++ { workerNum := i + 1 workerRemoteOutputPath := filepath.Join(util.LuaRunsDir, *runID, fmt.Sprintf("slave%d", workerNum), "outputs", *runID+".output") respBody, err := gs.GetRemoteFileContents(workerRemoteOutputPath) if err != nil { glog.Errorf("Could not fetch %s: %s", workerRemoteOutputPath, err) continue } defer skutil.Close(respBody) out, err := os.OpenFile(consolidatedLuaOutput, os.O_RDWR|os.O_APPEND, 0660) if err != nil { glog.Errorf("Unable to open file %s: %s", consolidatedLuaOutput, err) return } defer skutil.Close(out) if _, err = io.Copy(out, respBody); err != nil { glog.Errorf("Unable to write out %s to %s: %s", workerRemoteOutputPath, consolidatedLuaOutput, err) return } } // Copy the consolidated file into Google Storage. consolidatedOutputRemoteDir := filepath.Join(util.LuaRunsDir, *runID, "consolidated_outputs") luaOutputRemoteLink = util.GS_HTTP_LINK + filepath.Join(util.GSBucketName, consolidatedOutputRemoteDir, consolidatedFileName) if err := gs.UploadFile(consolidatedFileName, os.TempDir(), consolidatedOutputRemoteDir); err != nil { glog.Errorf("Unable to upload %s to %s: %s", consolidatedLuaOutput, consolidatedOutputRemoteDir, err) return } // Upload the lua aggregator (if specified) for this run to Google storage. luaAggregatorName := *runID + ".aggregator" luaAggregatorPath := filepath.Join(os.TempDir(), luaAggregatorName) defer skutil.Remove(luaAggregatorPath) luaAggregatorFileInfo, err := os.Stat(luaAggregatorPath) if !os.IsNotExist(err) && luaAggregatorFileInfo.Size() > 10 { if err := gs.UploadFile(luaAggregatorName, os.TempDir(), luaScriptRemoteDir); err != nil { glog.Errorf("Could not upload %s to %s: %s", luaAggregatorName, luaScriptRemoteDir, err) return } // Run the aggregator and save stdout. luaAggregatorOutputFileName := *runID + ".agg.output" luaAggregatorOutputFilePath := filepath.Join(os.TempDir(), luaAggregatorOutputFileName) luaAggregatorOutputFile, err := os.Create(luaAggregatorOutputFilePath) defer skutil.Close(luaAggregatorOutputFile) defer skutil.Remove(luaAggregatorOutputFilePath) if err != nil { glog.Errorf("Could not create %s: %s", luaAggregatorOutputFilePath, err) return } err = util.ExecuteCmd(util.BINARY_LUA, []string{luaAggregatorPath}, []string{}, util.LUA_AGGREGATOR_TIMEOUT, luaAggregatorOutputFile, nil) if err != nil { glog.Errorf("Could not execute the lua aggregator %s: %s", luaAggregatorPath, err) return } // Copy the aggregator output into Google Storage. luaAggregatorOutputRemoteLink = util.GS_HTTP_LINK + filepath.Join(util.GSBucketName, consolidatedOutputRemoteDir, luaAggregatorOutputFileName) if err := gs.UploadFile(luaAggregatorOutputFileName, os.TempDir(), consolidatedOutputRemoteDir); err != nil { glog.Errorf("Unable to upload %s to %s: %s", luaAggregatorOutputFileName, consolidatedOutputRemoteDir, err) return } } else { glog.Info("A lua aggregator has not been specified.") } taskCompletedSuccessfully = true }