func runBenchmark(fileInfoName, pathToPagesets, pathToPyFiles, localOutputDir, chromiumBuildName, chromiumBinary, runID, browserExtraArgs string) error { pagesetBaseName := filepath.Base(fileInfoName) if pagesetBaseName == util.TIMESTAMP_FILE_NAME || filepath.Ext(pagesetBaseName) == ".pyc" { // Ignore timestamp files and .pyc files. return nil } // Convert the filename into a format consumable by the run_benchmarks // binary. pagesetName := strings.TrimSuffix(pagesetBaseName, filepath.Ext(pagesetBaseName)) pagesetPath := filepath.Join(pathToPagesets, fileInfoName) glog.Infof("===== Processing %s for %s =====", pagesetPath, runID) skutil.LogErr(os.Chdir(pathToPyFiles)) args := []string{ util.BINARY_RUN_BENCHMARK, fmt.Sprintf("%s.%s", *benchmarkName, util.BenchmarksToPagesetName[*benchmarkName]), "--page-set-name=" + pagesetName, "--page-set-base-dir=" + pathToPagesets, "--also-run-disabled-tests", } // Need to capture output for all benchmarks. outputDirArgValue := filepath.Join(localOutputDir, pagesetName) args = append(args, "--output-dir="+outputDirArgValue) // Figure out which browser should be used. if *targetPlatform == util.PLATFORM_ANDROID { if err := installChromeAPK(chromiumBuildName); err != nil { return fmt.Errorf("Error while installing APK: %s", err) } args = append(args, "--browser=android-chrome-shell") } else { args = append(args, "--browser=exact", "--browser-executable="+chromiumBinary) } // Split benchmark args if not empty and append to args. if *benchmarkExtraArgs != "" { for _, benchmarkArg := range strings.Split(*benchmarkExtraArgs, " ") { args = append(args, benchmarkArg) } } // Add the number of times to repeat. args = append(args, fmt.Sprintf("--page-repeat=%d", *repeatBenchmark)) // Add browserArgs if not empty to args. if browserExtraArgs != "" { args = append(args, "--extra-browser-args="+browserExtraArgs) } // Set the PYTHONPATH to the pagesets and the telemetry dirs. env := []string{ fmt.Sprintf("PYTHONPATH=%s:%s:%s:$PYTHONPATH", pathToPagesets, util.TelemetryBinariesDir, util.TelemetrySrcDir), "DISPLAY=:0", } timeoutSecs := util.PagesetTypeToInfo[*pagesetType].RunChromiumPerfTimeoutSecs if err := util.ExecuteCmd("python", args, env, time.Duration(timeoutSecs)*time.Second, nil, nil); err != nil { glog.Errorf("Run benchmark command failed with: %s", err) glog.Errorf("Killing all running chrome processes in case there is a non-recoverable error.") skutil.LogErr(util.ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, 5*time.Minute, nil, nil)) } return nil }
// writeWorkerResult runs the worker and writes the result to Redis. func (r *RedisRTC) writeWorkerResult(priority int64, id string) { result, err := r.worker(priority, id) var writeKey string var writeData []byte if err != nil { writeKey = r.errorKey(id) writeData = []byte(err.Error()) } else { if writeData, err = r.codec.Encode(result); err != nil { writeKey = r.errorKey(id) writeData = []byte(fmt.Sprintf("Error encoding worker result: %s", err)) } else { writeKey = r.key(id) } } c := r.redisPool.Get() defer util.Close(c) util.LogErr(c.Send("MULTI")) util.LogErr(c.Send("SET", writeKey, writeData)) util.LogErr(c.Send("SREM", r.inProgressKey, id)) // Expire the error after 10 seconds to let the client decide // whether we need to retry. if err != nil { util.LogErr(c.Send("EXPIRE", writeKey, 10)) } util.LogErr(c.Send("PUBLISH", r.finishedChannel, id)) if _, err = c.Do("EXEC"); err != nil { glog.Errorf("Error writing result to redis: %s", err) } }
// Add, see the diff.LRUCache interface for details. func (c *RedisLRUCache) Add(key, value interface{}) bool { prefixedKey, rawKey, err := c.encodeKey(key) if err != nil { glog.Errorf("Unable to create redis key: %s", err) return false } byteVal, err := c.encodeVal(value) if err != nil { glog.Errorf("Unable to create redis value: %s", err) return false } conn := c.pool.Get() defer util.Close(conn) util.LogErr(conn.Send("MULTI")) util.LogErr(conn.Send("SET", prefixedKey, byteVal)) util.LogErr(conn.Send("SADD", c.indexSetKey, rawKey)) _, err = conn.Do("EXEC") if err != nil { glog.Errorf("Unable to add key: %s", err) return false } return true }
// getResultErr returns the cached version of the item identified by 'id' or // nil if it's not available. func (r *RedisRTC) getResultErr(id string, returnBytes bool) (interface{}, error) { c := r.redisPool.Get() defer util.Close(c) util.LogErr(c.Send("MULTI")) util.LogErr(c.Send("GET", r.key(id))) util.LogErr(c.Send("GET", r.errorKey(id))) resArr, err := redis.Values(c.Do("EXEC")) if err != nil { return nil, err } retBytes, errBytes := resArr[0], resArr[1] if errBytes != nil { return nil, fmt.Errorf("For %s we received error: %s", id, string(errBytes.([]byte))) } if retBytes != nil { if returnBytes { return retBytes, nil } return r.codec.Decode(retBytes.([]byte)) } // We have neither an error nor any data. return nil, nil }
// ResetCheckout resets the specified Git checkout. func ResetCheckout(dir string) error { if err := os.Chdir(dir); err != nil { return fmt.Errorf("Could not chdir to %s: %s", dir, err) } // Run "git reset --hard HEAD" resetArgs := []string{"reset", "--hard", "HEAD"} util.LogErr(ExecuteCmd(BINARY_GIT, resetArgs, []string{}, GIT_RESET_TIMEOUT, nil, nil)) // Run "git clean -f -d" cleanArgs := []string{"clean", "-f", "-d"} util.LogErr(ExecuteCmd(BINARY_GIT, cleanArgs, []string{}, GIT_CLEAN_TIMEOUT, nil, nil)) return nil }
// isFinished returns true if the task has finished. func (r *RedisRTC) isFinished(id string) bool { c := r.redisPool.Get() defer util.Close(c) util.LogErr(c.Send("MULTI")) util.LogErr(c.Send("EXISTS", r.key(id))) util.LogErr(c.Send("EXISTS", r.errorKey(id))) resArr, err := redis.Ints(c.Do("EXEC")) if err != nil { glog.Errorf("Unable to check if key exits: %s", err) return false } return (resArr[0] + resArr[1]) > 0 }
func updateWebappTask() { if frontend.CTFE_V2 { vars := frontend.LuaScriptUpdateVars{} vars.Id = *gaeTaskID vars.SetCompleted(taskCompletedSuccessfully) vars.ScriptOutput = sql.NullString{String: luaOutputRemoteLink, Valid: true} if luaAggregatorOutputRemoteLink != "" { vars.AggregatedOutput = sql.NullString{String: luaAggregatorOutputRemoteLink, Valid: true} } skutil.LogErr(frontend.UpdateWebappTaskV2(&vars)) return } outputLink := luaOutputRemoteLink if luaAggregatorOutputRemoteLink != "" { // Use the aggregated output if it exists. outputLink = luaAggregatorOutputRemoteLink } extraData := map[string]string{ "lua_script_link": luaScriptRemoteLink, "lua_aggregator_link": luaAggregatorRemoteLink, "lua_output_link": outputLink, } if err := frontend.UpdateWebappTask(*gaeTaskID, frontend.UpdateLuaTasksWebapp, extraData); err != nil { glog.Errorf("Error while updating webapp task: %s", err) return } }
// ExecuteCmd executes the specified binary with the specified args and env. // Stdout and Stderr are written to stdoutFile and stderrFile respectively if // specified. If not specified then stdout and stderr will be outputted only to // glog. Note: It is the responsibility of the caller to close stdoutFile and // stderrFile. func ExecuteCmd(binary string, args, env []string, timeout time.Duration, stdoutFile, stderrFile *os.File) error { // Add the current PATH to the env. env = append(env, "PATH="+os.Getenv("PATH")) // Create the cmd obj. cmd := exec.Command(binary, args...) cmd.Env = env // Attach WriteLog to command. cmd.Stdout = WriteLog{LogFunc: glog.Infof, OutputFile: stdoutFile} cmd.Stderr = WriteLog{LogFunc: glog.Errorf, OutputFile: stderrFile} // Execute cmd. glog.Infof("Executing %s %s", strings.Join(cmd.Env, " "), strings.Join(cmd.Args, " ")) util.LogErr(cmd.Start()) done := make(chan error) go func() { done <- cmd.Wait() }() select { case <-time.After(timeout): if err := cmd.Process.Kill(); err != nil { return fmt.Errorf("Failed to kill timed out process: %s", err) } <-done // allow goroutine to exit glog.Errorf("Command killed since it took longer than %f secs", timeout.Seconds()) return fmt.Errorf("Command killed since it took longer than %f secs", timeout.Seconds()) case err := <-done: if err != nil { return fmt.Errorf("Process done with error: %s", err) } } return nil }
func executeCmd(cmd, hostname string, config *ssh.ClientConfig, timeout time.Duration) (string, error) { // Dial up TCP connection to remote machine. conn, err := net.Dial("tcp", hostname+":22") if err != nil { return "", fmt.Errorf("Failed to ssh connect to %s. Make sure \"PubkeyAuthentication yes\" is in your sshd_config: %s", hostname, err) } defer util.Close(conn) util.LogErr(conn.SetDeadline(time.Now().Add(timeout))) // Create new SSH client connection. sshConn, sshChan, req, err := ssh.NewClientConn(conn, hostname+":22", config) if err != nil { return "", fmt.Errorf("Failed to ssh connect to %s: %s", hostname, err) } // Use client connection to create new client. client := ssh.NewClient(sshConn, sshChan, req) // Client connections can support multiple interactive sessions. session, err := client.NewSession() if err != nil { return "", fmt.Errorf("Failed to ssh connect to %s: %s", hostname, err) } var stdoutBuf bytes.Buffer session.Stdout = &stdoutBuf if err := session.Run(cmd); err != nil { return "", fmt.Errorf("Errored or Timeout out while running \"%s\" on %s: %s", cmd, hostname, err) } return stdoutBuf.String(), nil }
// Will need a local valid google_storage_token.data file with read write access // to run the below test. func Auth_TestUploadWorkerArtifacts(t *testing.T) { client, _ := GetOAuthClient() gs, err := NewGsUtil(client) if err != nil { t.Errorf("Unexpected error: %s", err) } testDir := "testupload" testPagesetType := "10ktest" StorageDir = "testdata" if err := gs.UploadWorkerArtifacts(testDir, testPagesetType, 1); err != nil { t.Errorf("Unexpected error: %s", err) } // Examine contents of the remote directory and then clean it up. service, err := storage.New(gs.client) if err != nil { t.Errorf("Unexpected error: %s", err) } gsDir := filepath.Join(testDir, testPagesetType, "slave1") resp, err := service.Objects.List(GS_BUCKET_NAME).Prefix(gsDir + "/").Do() if err != nil { t.Errorf("Unexpected error: %s", err) } assert.Equal(t, 3, len(resp.Items)) for index, fileName := range []string{"TIMESTAMP", "alexa1-1.py", "alexa2-2.py"} { filePath := fmt.Sprintf("%s/%s", gsDir, fileName) defer util.LogErr(service.Objects.Delete(GS_BUCKET_NAME, filePath).Do()) assert.Equal(t, filePath, resp.Items[index].Name) } }
// NewDefaultRepoManager returns a RepoManager instance which operates in the given // working directory and updates at the given frequency. func NewDefaultRepoManager(workdir string, frequency time.Duration, depot_tools string) (RepoManager, error) { chromiumParentDir := path.Join(workdir, "chromium") skiaDir := path.Join(workdir, "skia") skiaRepo, err := gitinfo.CloneOrUpdate(REPO_SKIA, skiaDir, true) if err != nil { return nil, err } gclient := GCLIENT rollDep := ROLL_DEP if depot_tools != "" { gclient = path.Join(depot_tools, gclient) rollDep = path.Join(depot_tools, rollDep) } r := &repoManager{ chromiumDir: path.Join(chromiumParentDir, "src"), chromiumParentDir: chromiumParentDir, depot_tools: depot_tools, gclient: gclient, rollDep: rollDep, skiaDir: skiaDir, skiaRepo: skiaRepo, } if err := r.update(); err != nil { return nil, err } go func() { for _ = range time.Tick(frequency) { util.LogErr(r.update()) } }() return r, nil }
func buildChromium(chromiumDir, targetPlatform string) error { if err := os.Chdir(filepath.Join(chromiumDir, "src")); err != nil { return fmt.Errorf("Could not chdir to %s/src: %s", chromiumDir, err) } // Find the build target to use while building chromium. buildTarget := "chrome" if targetPlatform == "Android" { util.LogErr( ioutil.WriteFile(filepath.Join(chromiumDir, "chromium.gyp_env"), []byte("{ 'GYP_DEFINES': 'OS=android', }\n"), 0777)) buildTarget = "chrome_shell_apk" } // Restart Goma's compiler proxy right before building the checkout. if err := ExecuteCmd("python", []string{filepath.Join(GomaDir, "goma_ctl.py"), "restart"}, os.Environ(), 10*time.Minute, nil, nil); err != nil { return fmt.Errorf("Error while restarting goma compiler proxy: %s", err) } // Run "GYP_DEFINES='gomadir=/b/build/goma' GYP_GENERATORS='ninja' build/gyp_chromium -Duse_goma=1". env := []string{fmt.Sprintf("GYP_DEFINES=gomadir=%s", GomaDir), "GYP_GENERATORS=ninja"} if err := ExecuteCmd(filepath.Join("build", "gyp_chromium"), []string{"-Duse_goma=1"}, env, 30*time.Minute, nil, nil); err != nil { return fmt.Errorf("Error while running gyp_chromium: %s", err) } // Run "ninja -C out/Release -j100 ${build_target}". // Use the full system env while building chromium. args := []string{"-C", "out/Release", "-j100", buildTarget} return ExecuteCmd("ninja", args, os.Environ(), 2*time.Hour, nil, nil) }
func updateWebappTask() { if frontend.CTFE_V2 { vars := frontend.ChromiumPerfUpdateVars{} vars.Id = *gaeTaskID vars.SetCompleted(taskCompletedSuccessfully) vars.Results = sql.NullString{String: htmlOutputLink, Valid: true} vars.NoPatchRawOutput = sql.NullString{String: noPatchOutputLink, Valid: true} vars.WithPatchRawOutput = sql.NullString{String: withPatchOutputLink, Valid: true} skutil.LogErr(frontend.UpdateWebappTaskV2(&vars)) return } // TODO(rmistry): Add ability to update the webapp without providing links. extraData := map[string]string{ "skia_patch_link": skiaPatchLink, "blink_patch_link": blinkPatchLink, "chromium_patch_link": chromiumPatchLink, "telemetry_nopatch_log_link": noPatchOutputLink, "telemetry_withpatch_log_link": withPatchOutputLink, "html_output_link": htmlOutputLink, "build_log_link": util.MASTER_LOGSERVER_LINK, } if err := frontend.UpdateWebappTask(*gaeTaskID, frontend.UpdateChromiumPerfTasksWebapp, extraData); err != nil { glog.Errorf("Error while updating webapp task: %s", err) return } }
func main() { defer common.LogPanic() common.Init() frontend.MustInit() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&admin_tasks.RecreateWebpageArchivesUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Capture archives", util.GetMasterLogLink(*runID), "")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Capture archives on Workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } cmd := []string{ fmt.Sprintf("cd %s;", util.CtTreeDir), "git pull;", "make all;", // The main command that runs capture_archives on all workers. fmt.Sprintf("DISPLAY=:0 capture_archives --worker_num=%s --log_dir=%s --log_id=%s --pageset_type=%s --chromium_build=%s;", util.WORKER_NUM_KEYWORD, util.GLogDir, *runID, *pagesetType, *chromiumBuild), } _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, util.CAPTURE_ARCHIVES_TIMEOUT) if err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } *taskCompletedSuccessfully = true }
// Remove, see the diff.LRUCache interface for details. func (c *RedisLRUCache) Remove(key interface{}) { conn := c.pool.Get() defer util.Close(conn) prefixedKey, rawKey, err := c.encodeKey(key) if err != nil { glog.Errorf("Unable to create redis key %s", err) } util.LogErr(conn.Send("MULTI")) util.LogErr(conn.Send("DEL", prefixedKey)) util.LogErr(conn.Send("SREM", c.indexSetKey, rawKey)) _, err = conn.Do("EXEC") if err != nil { glog.Errorf("Error deleting key:%s", err) } }
// BuildSkiaTools builds "tools" in the Skia trunk directory. func BuildSkiaTools() error { if err := os.Chdir(SkiaTreeDir); err != nil { return fmt.Errorf("Could not chdir to %s: %s", SkiaTreeDir, err) } // Run "make clean". util.LogErr(ExecuteCmd(BINARY_MAKE, []string{"clean"}, []string{}, 5*time.Minute, nil, nil)) // Build tools. return ExecuteCmd(BINARY_MAKE, []string{"tools", "BUILDTYPE=Release"}, []string{"GYP_DEFINES=\"skia_warnings_as_errors=0\""}, 5*time.Minute, nil, nil) }
func updateWebappTask() { vars := chromium_perf.UpdateVars{} vars.Id = *gaeTaskID vars.SetCompleted(taskCompletedSuccessfully) vars.Results = sql.NullString{String: htmlOutputLink, Valid: true} vars.NoPatchRawOutput = sql.NullString{String: noPatchOutputLink, Valid: true} vars.WithPatchRawOutput = sql.NullString{String: withPatchOutputLink, Valid: true} skutil.LogErr(frontend.UpdateWebappTaskV2(&vars)) }
// Running benchmarks in parallel leads to multiple chrome instances coming up // at the same time, when there are crashes chrome processes stick around which // can severely impact the machine's performance. To stop this from // happening chrome zombie processes are periodically killed. func ChromeProcessesCleaner(locker sync.Locker, chromeCleanerTimer time.Duration) { for _ = range time.Tick(chromeCleanerTimer) { glog.Info("The chromeProcessesCleaner goroutine has started") glog.Info("Waiting for all existing tasks to complete before killing zombie chrome processes") locker.Lock() util.LogErr(ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, PKILL_TIMEOUT, nil, nil)) locker.Unlock() } }
// UploadDir uploads the specified local dir into the specified Google Storage dir. func (gs *GsUtil) UploadDir(localDir, gsDir string, cleanDir bool) error { if cleanDir { // Empty the remote dir before uploading to it. util.LogErr(gs.deleteRemoteDir(gsDir)) } // Construct a dictionary of file paths to their file infos. pathsToFileInfos := map[string]os.FileInfo{} visit := func(path string, f os.FileInfo, err error) error { if f.IsDir() { return nil } pathsToFileInfos[path] = f return nil } if err := filepath.Walk(localDir, visit); err != nil { return fmt.Errorf("Unable to read the local dir %s: %s", localDir, err) } // The channel where the filepaths to be uploaded will be sent to. chFilePaths := make(chan string, MAX_CHANNEL_SIZE) // File filepaths and send it to the above channel. for path, fileInfo := range pathsToFileInfos { fileName := fileInfo.Name() containingDir := strings.TrimSuffix(path, fileName) subDirs := strings.TrimPrefix(containingDir, localDir) if subDirs != "" { dirTokens := strings.Split(subDirs, "/") for i := range dirTokens { fileName = filepath.Join(dirTokens[len(dirTokens)-i-1], fileName) } } chFilePaths <- fileName } close(chFilePaths) // Kick off goroutines to upload the file paths. var wg sync.WaitGroup for i := 0; i < GOROUTINE_POOL_SIZE; i++ { wg.Add(1) go func(goroutineNum int) { defer wg.Done() for filePath := range chFilePaths { glog.Infof("Uploading %s to %s with goroutine#%d", filePath, gsDir, goroutineNum) if err := gs.UploadFile(filePath, localDir, gsDir); err != nil { glog.Errorf("Goroutine#%d could not upload %s to %s: %s", goroutineNum, filePath, localDir, err) } // Sleep for a second after uploading file to avoid bombarding Cloud // storage. time.Sleep(time.Second) } }(i + 1) } wg.Wait() return nil }
func main() { common.Init() webhook.MustInitRequestSaltFromFile(util.WebhookRequestSaltPath) // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&frontend.RecreatePageSetsUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Creating pagesets")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Creating Pagesets on Workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } cmd := []string{ fmt.Sprintf("cd %s;", util.CtTreeDir), "git pull;", "make all;", // The main command that runs create_pagesets on all workers. fmt.Sprintf("create_pagesets --worker_num=%s --log_dir=%s --pageset_type=%s;", util.WORKER_NUM_KEYWORD, util.GLogDir, *pagesetType), } // Setting a 4 hour timeout since it may take a while to upload page sets to // Google Storage when doing 10k page sets per worker. if _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, 4*time.Hour); err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } *taskCompletedSuccessfully = true }
// SKPs are captured in parallel leading to multiple chrome instances coming up // at the same time, when there are crashes chrome processes stick around which // can severely impact the machine's performance. To stop this from // happening chrome zombie processes are periodically killed. func chromeProcessesCleaner(mutex *sync.RWMutex) { for _ = range time.Tick(*chromeCleanerTimer) { glog.Info("The chromeProcessesCleaner goroutine has started") glog.Info("Waiting for all existing tasks to complete before killing zombie chrome processes") mutex.Lock() skutil.LogErr(util.ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, util.PKILL_TIMEOUT, nil, nil)) mutex.Unlock() } }
func main() { defer common.LogPanic() master_common.Init() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&admin_tasks.RecreatePageSetsUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Creating pagesets", util.GetMasterLogLink(*runID), "")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) if !*master_common.Local { // Cleanup tmp files after the run. defer util.CleanTmpDir() } // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Creating Pagesets on Workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } cmd := append(master_common.WorkerSetupCmds(), // The main command that runs create_pagesets on all workers. fmt.Sprintf( "create_pagesets --worker_num=%s --log_dir=%s --log_id=%s --pageset_type=%s --local=%t;", util.WORKER_NUM_KEYWORD, util.GLogDir, *runID, *pagesetType, *master_common.Local)) _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, util.CREATE_PAGESETS_TIMEOUT) if err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } *taskCompletedSuccessfully = true }
func updateWebappTask() { vars := lua_scripts.UpdateVars{} vars.Id = *gaeTaskID vars.SetCompleted(taskCompletedSuccessfully) if luaOutputRemoteLink != "" { vars.ScriptOutput = sql.NullString{String: luaOutputRemoteLink, Valid: true} } if luaAggregatorOutputRemoteLink != "" { vars.AggregatedOutput = sql.NullString{String: luaAggregatorOutputRemoteLink, Valid: true} } skutil.LogErr(frontend.UpdateWebappTaskV2(&vars)) }
func main() { defer common.LogPanic() master_common.Init() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&chromium_builds.UpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Build chromium", util.GetMasterLogLink(*runID), "")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) if !*master_common.Local { // Cleanup tmp files after the run. defer util.CleanTmpDir() } // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Running build chromium") defer glog.Flush() if *chromiumHash == "" { glog.Error("Must specify --chromium_hash") return } if *skiaHash == "" { glog.Error("Must specify --skia_hash") return } if _, _, err := util.CreateChromiumBuild("", *targetPlatform, *chromiumHash, *skiaHash, *applyPatches); err != nil { glog.Errorf("Error while creating the Chromium build: %s", err) return } taskCompletedSuccessfully = true }
// dequeue returns a task to be performed by the worker. // It returns a triple: workerTask, itemsLeft_in_the_task_queue, error. func (r *RedisRTC) dequeue() (*workerTask, int, error) { c := r.redisPool.Get() defer util.Close(c) // TODO: this needs to be a transaction. util.LogErr(c.Send("MULTI")) util.LogErr(c.Send("ZRANGE", r.queueKey, 0, 0, "WITHSCORES")) util.LogErr(c.Send("ZREMRANGEBYRANK", r.queueKey, 0, 0)) util.LogErr(c.Send("ZCARD", r.queueKey)) combinedVals, err := redis.Values(c.Do("EXEC")) if err != nil { return nil, 0, err } // Get the number of found elements. count := combinedVals[1].(int64) // If there are no values, we are done. if count == 0 { return nil, 0, nil } result := combinedVals[0].([]interface{}) id := string(result[0].([]byte)) priority, err := redis.Int64(result[1], nil) if err != nil { return nil, 0, err } itemsLeft := int(combinedVals[2].(int64)) ret := &workerTask{id, priority} args := append([]interface{}{r.inProgressKey}, id) if _, err := c.Do("SADD", args...); err != nil { return nil, 0, err } return ret, itemsLeft, nil }
func updateWebappTask() { if frontend.CTFE_V2 { vars := frontend.RecreatePageSetsUpdateVars{} vars.Id = *gaeTaskID vars.SetCompleted(*taskCompletedSuccessfully) skutil.LogErr(frontend.UpdateWebappTaskV2(&vars)) return } if err := frontend.UpdateWebappTask(*gaeTaskID, frontend.UpdateAdminTasksWebapp, map[string]string{}); err != nil { glog.Errorf("Error while updating webapp task: %s", err) return } }
// enqueue adds the given task and priority to the task queue. Updating the // priority if necessary. func (r *RedisRTC) enqueue(id string, priority int64) (bool, error) { c := r.redisPool.Get() defer util.Close(c) util.LogErr(c.Send("MULTI")) util.LogErr(c.Send("ZSCORE", r.queueKey, id)) util.LogErr(c.Send("SISMEMBER", r.inProgressKey, id)) util.LogErr(c.Send("EXISTS", r.key(id))) util.LogErr(c.Send("EXISTS", r.errorKey(id))) retVals, err := redis.Values(c.Do("EXEC")) if err != nil { return false, err } // See if the id is in the queue or in progress. inQueueScore, isInProgress := retVals[0], retVals[1].(int64) == 1 foundResult, foundError := retVals[2].(int64), retVals[3].(int64) found := (foundResult + foundError) > 0 // If the calculation is in process we don't have todo anything. if !isInProgress && !found { saveId := true // Only update the queue if this has a lower score. if inQueueScore != nil { oldPriority, _ := redis.Int64(inQueueScore, nil) saveId = priority < oldPriority } if saveId { if _, err = c.Do("ZADD", r.queueKey, priority, id); err != nil { return false, err } } } return found, nil }
// VerifyLocalDevice does not throw an error if an Android device is connected and // online. An error is returned if either "adb" is not installed or if the Android // device is offline or missing. func VerifyLocalDevice() error { // Run "adb version". // Command should return without an error. err := util.ExecuteCmd(util.BINARY_ADB, []string{"version"}, []string{}, util.ADB_VERSION_TIMEOUT, nil, nil) if err != nil { return fmt.Errorf("adb not installed or not found: %s", err) } // Run "adb devices | grep offline". // Command should return with an error. devicesCmd := exec.Command(util.BINARY_ADB, "devices") offlineCmd := exec.Command("grep", "offline") offlineCmd.Stdin, _ = devicesCmd.StdoutPipe() offlineCmd.Stdout = skexec.WriteInfoLog skutil.LogErr(offlineCmd.Start()) skutil.LogErr(devicesCmd.Run()) if err := offlineCmd.Wait(); err == nil { // A nil error here means that an offline device was found. return fmt.Errorf("Android device is offline: %s", err) } // Running "adb devices | grep device$ // Command should return without an error. devicesCmd = exec.Command(util.BINARY_ADB, "devices") missingCmd := exec.Command("grep", "device$") missingCmd.Stdin, _ = devicesCmd.StdoutPipe() missingCmd.Stdout = skexec.WriteInfoLog skutil.LogErr(missingCmd.Start()) skutil.LogErr(devicesCmd.Run()) if err := missingCmd.Wait(); err != nil { // An error here means that the device is missing. return fmt.Errorf("Android device is missing: %s", err) } return nil }
// DownloadChromiumBuild downloads the specified Chromium build from Google // Storage to a local dir. func (gs *GsUtil) DownloadChromiumBuild(chromiumBuild string) error { localDir := filepath.Join(ChromiumBuildsDir, chromiumBuild) gsDir := filepath.Join(CHROMIUM_BUILDS_DIR_NAME, chromiumBuild) if equal, _ := gs.AreTimeStampsEqual(localDir, gsDir); equal { glog.Infof("Not downloading %s because TIMESTAMPS match", gsDir) return nil } glog.Infof("Timestamps between %s and %s are different. Downloading from Google Storage", localDir, gsDir) if err := gs.downloadRemoteDir(localDir, gsDir); err != nil { return fmt.Errorf("Error downloading %s into %s: %s", gsDir, localDir, err) } // Downloaded chrome binary needs to be set as an executable. util.LogErr(os.Chmod(filepath.Join(localDir, "chrome"), 0777)) return nil }
func updateWebappTask() { if frontend.CTFE_V2 { vars := frontend.CaptureSkpsUpdateVars{} vars.Id = *gaeTaskID vars.SetCompleted(taskCompletedSuccessfully) skutil.LogErr(frontend.UpdateWebappTaskV2(&vars)) return } extraData := map[string]string{ "output_link": outputRemoteLink, } if err := frontend.UpdateWebappTask(*gaeTaskID, frontend.UpdateCaptureSKPsTasksWebapp, extraData); err != nil { glog.Errorf("Error while updating webapp task: %s", err) return } }