func main() { defer common.LogPanic() common.Init() if logDirFlag := flag.Lookup("log_dir"); logDirFlag != nil { logDir = logDirFlag.Value.String() } if *dryRun { exec.SetRunForTesting(func(command *exec.Command) error { glog.Infof("dry_run: %s", exec.DebugString(command)) return nil }) } if *local { frontend.InitForTesting("http://localhost:8000/") } else { frontend.MustInit() } workerHealthTick := time.Tick(*workerHealthCheckInterval) pollTick := time.Tick(*pollInterval) // Run immediately, since pollTick will not fire until after pollInterval. pollAndExecOnce() for { select { case <-workerHealthTick: doWorkerHealthCheck() case <-pollTick: pollAndExecOnce() } } }
func main() { defer common.LogPanic() common.Init() args := flag.Args() if len(args) != 3 { glog.Errorf("Expected arguments: branch target buildID") glog.Errorf("i.e.: git_master-skia razor-userdebug 1772442") os.Exit(1) } // Set the arguments necessary to lookup the git hash. branch := args[0] target := args[1] buildID := args[2] glog.Infof("Branch, target, buildID: %s, %s, %s", branch, target, buildID) // Set up the oauth client. var client *http.Client var err error // In this case we don't want a backoff transport since the Apiary backend // seems to fail a lot, so we basically want to fall back to polling if a // call fails. transport := &http.Transport{ Dial: util.DialTimeout, } if *local { // Use a local client secret file to load data. client, err = auth.InstalledAppClient(OAUTH_CACHE_FILEPATH, CLIENT_SECRET_FILEPATH, transport, androidbuildinternal.AndroidbuildInternalScope, storage.CloudPlatformScope) if err != nil { glog.Fatalf("Unable to create installed app oauth client:%s", err) } } else { // Use compute engine service account. client = auth.GCEServiceAccountClient(transport) } f, err := androidbuild.New("/tmp/android-gold-ingest", client) if err != nil { glog.Fatalf("Failed to construct client: %s", err) } for { r, err := f.Get(branch, target, buildID) if err != nil { glog.Errorf("Failed to get requested info: %s", err) time.Sleep(1 * time.Minute) continue } if r != nil { glog.Infof("Successfully found: %#v", *r) } time.Sleep(1 * time.Minute) } }
func main() { defer common.LogPanic() common.Init() v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus := eventbus.New(globalEventBus) // Send events every so often. for _ = range time.Tick(2 * time.Second) { evData := &event.GoogleStorageEventData{ Bucket: "test-bucket", Name: "test-name", Updated: time.Now().String(), } eventBus.Publish(event.GLOBAL_GOOGLE_STORAGE, evData) glog.Infof("Sent Event: %#v ", evData) } }
func main() { defer common.LogPanic() common.InitWithMetrics("grandcentral", graphiteServer) v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus = eventbus.New(globalEventBus) if *testing { *useMetadata = false } serverURL := "https://" + *host if *testing { serverURL = "http://" + *host + *port } runServer(serverURL) }
func main() { defer common.LogPanic() common.Init() if flag.NArg() != 2 { log.Fatal("Usage: imagediff [--out filename] imagepath1.png imagepath2.png\n") } a, err := diff.OpenImage(flag.Arg(0)) if err != nil { log.Fatal(err) } b, err := diff.OpenImage(flag.Arg(1)) if err != nil { log.Fatal(err) } metrics, d := diff.Diff(a, b) fmt.Printf("Dimensions are different: %v\n", metrics.DimDiffer) fmt.Printf("Number of pixels different: %v\n", metrics.NumDiffPixels) fmt.Printf("Pixel diff percent: %v\n", metrics.PixelDiffPercent) if *out == "" { return } else { fmt.Println("Writing image diff.") } f, err := os.Create(*out) if err != nil { log.Fatal(err) } if err := png.Encode(f, d); err != nil { log.Fatal(err) } }
func main() { defer common.LogPanic() common.InitWithMetrics("push", graphiteServer) Init() redirectURL := fmt.Sprintf("http://localhost%s/oauth2callback/", *port) if !*local { redirectURL = "https://push.skia.org/oauth2callback/" } if err := login.InitFromMetadataOrJSON(redirectURL, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST); err != nil { glog.Fatalf("Failed to initialize the login system: %s", err) } r := mux.NewRouter() r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir)) r.HandleFunc("/", mainHandler) r.HandleFunc("/_/change", changeHandler) r.HandleFunc("/_/state", stateHandler) r.HandleFunc("/_/status", statusHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) glog.Infoln("Ready to serve.") glog.Fatal(http.ListenAndServe(*port, nil)) }
func main() { defer common.LogPanic() common.InitWithMetrics("push", graphiteServer) Init() // By default use a set of credentials setup for localhost access. var cookieSalt = "notverysecret" var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com" var clientSecret = "cw0IosPu4yjaG2KWmppj2guj" var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port) if !*local { cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT)) clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID)) clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET)) redirectURL = "https://push.skia.org/oauth2callback/" } login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST, *local) r := mux.NewRouter() r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir)) r.HandleFunc("/", mainHandler) r.HandleFunc("/_/change", changeHandler) r.HandleFunc("/_/state", stateHandler) r.HandleFunc("/_/status", statusHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) glog.Infoln("Ready to serve.") glog.Fatal(http.ListenAndServe(*port, nil)) }
func main() { defer common.LogPanic() common.Init() args := flag.Args() if len(args) != 2 { flag.Usage() os.Exit(1) } bucket, prefix := args[0], args[1] v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus := eventbus.New(globalEventBus) eventBus.SubscribeAsync(event.StorageEvent(bucket, prefix), func(evData interface{}) { data := evData.(*event.GoogleStorageEventData) glog.Infof("Google Storage notification from bucket\n %s: %s : %s", data.Updated, data.Bucket, data.Name) }) select {} }
func main() { defer common.LogPanic() common.InitWithMetrics("grandcentral", graphiteServer) v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus = eventbus.New(globalEventBus) // Add a subscription for the each event type. This prevents the messages // to queue up if there are no other clients connected. eventBus.SubscribeAsync(event.GLOBAL_GOOGLE_STORAGE, func(evData interface{}) {}) eventBus.SubscribeAsync(event.GLOBAL_BUILDBOT, func(evData interface{}) {}) if *testing { *useMetadata = false } serverURL := "https://" + *host if *testing { serverURL = "http://" + *host + *port } runServer(serverURL) }
func main() { defer common.LogPanic() common.InitWithMetrics("skia-ingestion", graphiteServer) // Initialize oauth client and start the ingesters. client, err := auth.NewJWTServiceAccountClient("", *serviceAccountFile, nil, storage.CloudPlatformScope) if err != nil { glog.Fatalf("Failed to auth: %s", err) } // Start the ingesters. config, err := sharedconfig.ConfigFromTomlFile(*configFilename) if err != nil { glog.Fatalf("Unable to read config file %s. Got error: %s", *configFilename, err) } ingesters, err := ingestion.IngestersFromConfig(config, client) if err != nil { glog.Fatalf("Unable to instantiate ingesters: %s", err) } for _, oneIngester := range ingesters { oneIngester.Start() } // Run the ingesters forever. select {} }
// Run the BugChomper server. func main() { defer common.LogPanic() common.InitWithMetrics("bug_chomper", graphiteServer) v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) loadTemplates() if *testing { *useMetadata = false } serverURL := "https://" + *host if *testing { serverURL = "http://" + *host + *port } // By default use a set of credentials setup for localhost access. var cookieSalt = "notverysecret" var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com" var clientSecret = "cw0IosPu4yjaG2KWmppj2guj" var redirectURL = serverURL + "/oauth2callback/" if *useMetadata { cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT)) clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID)) clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET)) } login.Init(clientID, clientSecret, redirectURL, cookieSalt, strings.Join(issue_tracker.OAUTH_SCOPE, " "), login.DEFAULT_DOMAIN_WHITELIST, false) runServer(serverURL) }
func main() { defer common.LogPanic() common.Init() v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus := eventbus.New(globalEventBus) eventBus.SubscribeAsync(event.GLOBAL_GOOGLE_STORAGE, func(evData interface{}) { data := evData.(*event.GoogleStorageEventData) glog.Infof("Google Storage notification from bucket\n %s: %s : %s", data.Updated, data.Bucket, data.Name) }) select {} }
func main() { defer common.LogPanic() common.InitWithMetrics("certpoller", graphiteServer) client := util.NewTimeoutClient() certs := []*cert{} // Populate certs based on cmd-line args. for _, metadata := range flag.Args() { c := &cert{ metadata: metadata, file: fileFromMetadata(metadata), etag: "", } err := get(client, c) if err != nil { glog.Fatalf("Failed to retrieve the cert %s: %s", c, err) } certs = append(certs, c) } for _ = range time.Tick(30 * time.Minute) { for _, c := range certs { if err := get(client, c); err != nil { glog.Errorf("Failed to update cert %s: %s", c.metadata, err) } } } }
func main() { defer common.LogPanic() master_common.InitWithMetrics("ct-poller", graphiteServer) if logDirFlag := flag.Lookup("log_dir"); logDirFlag != nil { logDir = logDirFlag.Value.String() } if *dryRun { exec.SetRunForTesting(func(command *exec.Command) error { glog.Infof("dry_run: %s", exec.DebugString(command)) return nil }) } statusTracker.(*heartbeatStatusTracker).StartMetrics() workerHealthTick := time.Tick(*workerHealthCheckInterval) pollTick := time.Tick(*pollInterval) // Run immediately, since pollTick will not fire until after pollInterval. pollAndExecOnce() for { select { case <-workerHealthTick: doWorkerHealthCheck() case <-pollTick: pollAndExecOnce() } } }
func main() { defer common.LogPanic() common.Init() if *emailTokenPath == "" { glog.Error("Must specify --email_token_path") return } defer glog.Flush() for _, shiftType := range allShiftTypes { res, err := http.Get(shiftType.nextSheriffEndpoint) if err != nil { glog.Fatalf("Could not HTTP Get: %s", err) } defer util.Close(res.Body) var jsonType map[string]interface{} if err := json.NewDecoder(res.Body).Decode(&jsonType); err != nil { glog.Fatalf("Could not unmarshal JSON: %s", err) } sheriffEmail, _ := jsonType["username"].(string) if sheriffEmail == NO_SHERIFF { glog.Infof("Skipping emailing %s because %s was specified", shiftType.shiftName, NO_SHERIFF) continue } sheriffUsername := strings.Split(string(sheriffEmail), "@")[0] emailTemplateParsed := template.Must(template.New("sheriff_email").Parse(EMAIL_TEMPLATE)) emailBytes := new(bytes.Buffer) if err := emailTemplateParsed.Execute(emailBytes, struct { SheriffName string SheriffType string SheriffSchedules string SheriffDoc string ScheduleStart string ScheduleEnd string }{ SheriffName: sheriffUsername, SheriffType: shiftType.shiftName, SheriffSchedules: shiftType.schedulesLink, SheriffDoc: shiftType.documentationLink, ScheduleStart: jsonType["schedule_start"].(string), ScheduleEnd: jsonType["schedule_end"].(string), }); err != nil { glog.Errorf("Failed to execute template: %s", err) return } emailSubject := fmt.Sprintf("%s is the next %s", sheriffUsername, shiftType.shiftName) if err := sendEmail([]string{sheriffEmail, EXTRA_RECIPIENT}, emailSubject, emailBytes.String()); err != nil { glog.Fatalf("Error sending email to sheriff: %s", err) } } }
func main() { defer common.LogPanic() // Setup flags. dbConf := buildbot.DBConfigFromFlags() // Global init. common.InitWithMetrics(APP_NAME, graphiteServer) // Parse the time period. period, err := human.ParseDuration(*timePeriod) if err != nil { glog.Fatal(err) } // Initialize the buildbot database. if !*local { if err := dbConf.GetPasswordFromMetadata(); err != nil { glog.Fatal(err) } } if err := dbConf.InitDB(); err != nil { glog.Fatal(err) } // Initialize the BuildBucket client. c, err := auth.NewClient(*local, path.Join(*workdir, "oauth_token_cache"), buildbucket.DEFAULT_SCOPES...) if err != nil { glog.Fatal(err) } bb := buildbucket.NewClient(c) // Build the queue. repos := gitinfo.NewRepoMap(*workdir) for _, r := range REPOS { if _, err := repos.Repo(r); err != nil { glog.Fatal(err) } } q, err := build_queue.NewBuildQueue(period, repos, *scoreThreshold, *scoreDecay24Hr, BOT_BLACKLIST) if err != nil { glog.Fatal(err) } // Start scheduling builds in a loop. liveness := metrics.NewLiveness(APP_NAME) if err := scheduleBuilds(q, bb); err != nil { glog.Errorf("Failed to schedule builds: %v", err) } for _ = range time.Tick(time.Minute) { liveness.Update() if err := scheduleBuilds(q, bb); err != nil { glog.Errorf("Failed to schedule builds: %v", err) } } }
func main() { defer common.LogPanic() common.InitWithMetrics("docserver", graphiteServer) Init() // Resources are served directly. http.HandleFunc("/res/", autogzip.HandleFunc(makeResourceHandler())) http.HandleFunc("/", autogzip.HandleFunc(mainHandler)) glog.Infoln("Ready to serve.") glog.Fatal(http.ListenAndServe(*port, nil)) }
func main() { defer common.LogPanic() common.Init() out, err := util.SSH(*cmd, util.Slaves, *timeout) if err != nil { glog.Fatal(err) } if *printOutput { for k, v := range out { fmt.Printf("\n=====%s=====\n%s\n", k, v) } } }
func main() { defer common.LogPanic() // Set up flags. dbConf := db.DBConfigFromFlags() // Global init to initialize glog and parse arguments. common.Init() if *promptPassword { if err := dbConf.PromptForPassword(); err != nil { glog.Fatal(err) } } if !*local { if err := dbConf.GetPasswordFromMetadata(); err != nil { glog.Fatal(err) } } vdb, err := dbConf.NewVersionedDB() if err != nil { glog.Fatal(err) } if *targetVersion < 0 { // Get the current database version maxDBVersion := vdb.MaxDBVersion() glog.Infof("Latest database version: %d", maxDBVersion) dbVersion, err := vdb.DBVersion() if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } glog.Infof("Current database version: %d", dbVersion) if dbVersion < maxDBVersion { glog.Infof("Migrating to version: %d", maxDBVersion) err = vdb.Migrate(maxDBVersion) if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } } } else { glog.Infof("Migrating to version: %d", *targetVersion) err = vdb.Migrate(*targetVersion) if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } } glog.Infoln("Database migration finished.") }
func main() { defer common.LogPanic() common.InitWithMetrics("logserver", graphiteServer) if err := os.MkdirAll(*dir, 0777); err != nil { glog.Fatalf("Failed to create dir for log files: %s", err) } go dirWatcher(*dirWatchDuration, *dir) http.Handle("/file_server/", http.StripPrefix("/file_server/", FileServer(http.Dir(*dir)))) http.HandleFunc("/", FileServerWrapperHandler) glog.Fatal(http.ListenAndServe(*port, nil)) }
func main() { defer common.LogPanic() // Calls flag.Parse() common.InitWithMetrics("fuzzer", graphiteServer) if err := writeFlagsToConfig(); err != nil { glog.Fatalf("Problem with configuration: %s", err) } Init() if err := setupOAuth(); err != nil { glog.Fatal(err) } go func() { if err := fcommon.DownloadSkiaVersionForFuzzing(storageClient, config.FrontEnd.SkiaRoot, &config.FrontEnd); err != nil { glog.Fatalf("Problem downloading Skia: %s", err) } fuzzSyncer = syncer.New(storageClient) fuzzSyncer.Start() cache, err := fuzzcache.New(config.FrontEnd.BoltDBPath) if err != nil { glog.Fatalf("Could not create fuzz report cache at %s: %s", config.FrontEnd.BoltDBPath, err) } defer util.Close(cache) if err := gsloader.LoadFromBoltDB(cache); err != nil { glog.Errorf("Could not load from boltdb. Loading from source of truth anyway. %s", err) } var finder functionnamefinder.Finder if !*local { finder = functionnamefinder.NewAsync() } gsLoader := gsloader.New(storageClient, finder, cache) if err := gsLoader.LoadFreshFromGoogleStorage(); err != nil { glog.Fatalf("Error loading in data from GCS: %s", err) } fuzzSyncer.SetGSLoader(gsLoader) updater := frontend.NewVersionUpdater(gsLoader, fuzzSyncer) versionWatcher = fcommon.NewVersionWatcher(storageClient, config.FrontEnd.VersionCheckPeriod, updater.HandlePendingVersion, updater.HandleCurrentVersion) versionWatcher.Start() err = <-versionWatcher.Status glog.Fatal(err) }() runServer() }
func main() { defer common.LogPanic() common.InitWithMetrics("pulld", graphiteServer) Init() pullInit() r := mux.NewRouter() r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir)) r.HandleFunc("/", mainHandler).Methods("GET") r.HandleFunc("/_/list", listHandler).Methods("GET") r.HandleFunc("/_/change", changeHandler).Methods("POST") r.HandleFunc("/pullpullpull", pullHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) glog.Infoln("Ready to serve.") glog.Fatal(http.ListenAndServe(*port, nil)) }
func main() { defer common.LogPanic() common.InitWithMetrics("probeserver", graphiteServer) client, err := auth.NewDefaultJWTServiceAccountClient("https://www.googleapis.com/auth/userinfo.email") if err != nil { glog.Fatalf("Failed to create client for talking to the issue tracker: %s", err) } go monitorIssueTracker(client) glog.Infoln("Looking for Graphite server.") addr, err := net.ResolveTCPAddr("tcp", *graphiteServer) if err != nil { glog.Fatalln("Failed to resolve the Graphite server: ", err) } glog.Infoln("Found Graphite server.") liveness := imetrics.NewLiveness("probes") // We have two sets of metrics, one for the probes and one for the probe // server itself. The server's metrics are handled by common.Init() probeRegistry := metrics.NewRegistry() go graphite.Graphite(probeRegistry, common.SAMPLE_PERIOD, *prefix, addr) // TODO(jcgregorio) Monitor config file and reload if it changes. cfg, err := readConfigFiles(*config) if err != nil { glog.Fatalln("Failed to read config file: ", err) } glog.Infoln("Successfully read config file.") // Register counters for each probe. for name, probe := range cfg { probe.failure = metrics.NewRegisteredGauge(name+".failure", probeRegistry) probe.latency = metrics.NewRegisteredGauge(name+".latency", probeRegistry) } // Create a client that uses our dialer with a timeout. c := &http.Client{ Transport: &http.Transport{ Dial: dialTimeout, }, } probeOneRound(cfg, c) for _ = range time.Tick(*runEvery) { probeOneRound(cfg, c) liveness.Update() } }
func main() { defer common.LogPanic() common.Init() frontend.MustInit() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&admin_tasks.RecreateWebpageArchivesUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Capture archives", util.GetMasterLogLink(*runID), "")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) // Cleanup tmp files after the run. defer util.CleanTmpDir() // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Capture archives on Workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } if *chromiumBuild == "" { glog.Error("Must specify --chromium_build") return } cmd := []string{ fmt.Sprintf("cd %s;", util.CtTreeDir), "git pull;", "make all;", // The main command that runs capture_archives on all workers. fmt.Sprintf("DISPLAY=:0 capture_archives --worker_num=%s --log_dir=%s --log_id=%s --pageset_type=%s --chromium_build=%s;", util.WORKER_NUM_KEYWORD, util.GLogDir, *runID, *pagesetType, *chromiumBuild), } _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, util.CAPTURE_ARCHIVES_TIMEOUT) if err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } *taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() common.Init() var err error var wg sync.WaitGroup // Make a channel to deliver work. targetCh := make(chan Target) // Record the latency measurements in millis for each request. latencySamples := make([]float64, 0) latencies := make(chan float64) go func() { for m := range latencies { latencySamples = append(latencySamples, m) } }() err = startWorkers(targetCh, latencies, &wg) if err != nil { log.Fatalf("Failure starting workers: %v\n", err) } b0 := time.Now() // Pump requests out to all the workers to do. for i := 0; i < *numFetches; i++ { t := targets[rand.Int()%len(targets)] targetCh <- t } close(targetCh) // Wait for all HTTP requests to complete. wg.Wait() b1 := time.Now() fmt.Print("\n") fmt.Printf("Total time of run: %v\n", b1.Sub(b0)) fmt.Printf("Average QPS: %.2f\n", float64(*numFetches)/b1.Sub(b0).Seconds()) fmt.Println(NewSimpleStats(latencySamples, "Latency").String()) }
func main() { defer common.LogPanic() common.InitWithMetrics("android_stats", graphiteServer) pollFreq, err := time.ParseDuration(*frequency) if err != nil { glog.Fatalf("Invalid value for frequency %q: %s", *frequency, err) } if err := generateStats(); err != nil { glog.Fatal(err) } for _ = range time.Tick(pollFreq) { if err := generateStats(); err != nil { glog.Error(err) } } }
func main() { defer common.LogPanic() // Set up flags. dbConf := database.ConfigFromFlags(db.PROD_DB_HOST, db.PROD_DB_PORT, database.USER_ROOT, db.PROD_DB_NAME, db.MigrationSteps()) // Global init to initialize glog and parse arguments. common.Init() v, err := skiaversion.GetVersion() if err != nil { glog.Fatalf("Unable to retrieve version: %s", err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *promptPassword { if err := dbConf.PromptForPassword(); err != nil { glog.Fatal(err) } } vdb, err := dbConf.NewVersionedDB() if err != nil { glog.Fatal(err) } // Get the current database version maxDBVersion := vdb.MaxDBVersion() glog.Infof("Latest database version: %d", maxDBVersion) dbVersion, err := vdb.DBVersion() if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } glog.Infof("Current database version: %d", dbVersion) if dbVersion < maxDBVersion { glog.Infof("Migrating to version: %d", maxDBVersion) err = vdb.Migrate(maxDBVersion) if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } } glog.Infoln("Database migration finished.") }
func main() { defer common.LogPanic() master_common.Init() // Send start email. emailsArr := util.ParseEmails(*emails) emailsArr = append(emailsArr, util.CtAdmins...) if len(emailsArr) == 0 { glog.Error("At least one email address must be specified") return } skutil.LogErr(frontend.UpdateWebappTaskSetStarted(&admin_tasks.RecreatePageSetsUpdateVars{}, *gaeTaskID)) skutil.LogErr(util.SendTaskStartEmail(emailsArr, "Creating pagesets", util.GetMasterLogLink(*runID), "")) // Ensure webapp is updated and completion email is sent even if task fails. defer updateWebappTask() defer sendEmail(emailsArr) if !*master_common.Local { // Cleanup tmp files after the run. defer util.CleanTmpDir() } // Finish with glog flush and how long the task took. defer util.TimeTrack(time.Now(), "Creating Pagesets on Workers") defer glog.Flush() if *pagesetType == "" { glog.Error("Must specify --pageset_type") return } cmd := append(master_common.WorkerSetupCmds(), // The main command that runs create_pagesets on all workers. fmt.Sprintf( "create_pagesets --worker_num=%s --log_dir=%s --log_id=%s --pageset_type=%s --local=%t;", util.WORKER_NUM_KEYWORD, util.GLogDir, *runID, *pagesetType, *master_common.Local)) _, err := util.SSH(strings.Join(cmd, " "), util.Slaves, util.CREATE_PAGESETS_TIMEOUT) if err != nil { glog.Errorf("Error while running cmd %s: %s", cmd, err) return } *taskCompletedSuccessfully = true }
func main() { defer common.LogPanic() common.InitWithMetrics("mathserv", graphiteServer) Init() // By default use a set of credentials setup for localhost access. var cookieSalt = "notverysecret" var clientID = "952643138919-5a692pfevie766aiog15io45kjpsh33v.apps.googleusercontent.com" var clientSecret = "QQfqRYU1ELkds90ku8xlIGl1" var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port) if !*local { cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT)) clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID)) clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET)) redirectURL = "https://mathinate.com/oauth2callback/" } login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, "", *local) r := mux.NewRouter() r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir)) r.HandleFunc("/", mainHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) AttemptLoadCertFromMetadata() glog.Infoln("Ready to serve.") if *certChainFile != "" { glog.Infof("Serving TLS") go func() { redir := mux.NewRouter() redir.HandleFunc("/", redirHandler) glog.Fatal(http.ListenAndServe(*httpPort, redir)) }() glog.Fatal(http.ListenAndServeTLS(*port, *certChainFile, *keyFile, nil)) } else { glog.Infof("Only serving HTTP") glog.Fatal(http.ListenAndServe(*port, nil)) } }
func main() { defer common.LogPanic() flag.Parse() common.InitWithMetrics("pdfxform", graphiteServer) client, err := getClient() if err != nil { glog.Fatal(err) } xformer := pdfXformer{ client: client, results: map[string]map[int]string{}, } err = xformer.uploadErrorImage(*failureImage) if err != nil { // If we can't upload this, we can't upload anything. glog.Fatalf("Filed to upload error image: %s", err) } for _, rasterizer := range []pdf.Rasterizer{pdf.Pdfium{}, pdf.Poppler{}} { if rasterizer.Enabled() { xformer.rasterizers = append(xformer.rasterizers, rasterizer) } else { glog.Infof("rasterizer %s is disabled", rasterizer.String()) } } if len(xformer.rasterizers) == 0 { glog.Fatalf("no rasterizers found") } end := time.Now() start := end.Add(-172 * time.Hour) xformer.processTimeRange(start, end) glog.Flush() // Flush before waiting for next tick; it may be a while. for _ = range time.Tick(time.Minute) { start, end = end, time.Now() xformer.processTimeRange(start, end) glog.Flush() } }