func main() { // Set up flags. dbConf := database.ConfigFromFlags(buildbot.PROD_DB_HOST, buildbot.PROD_DB_PORT, database.USER_ROOT, buildbot.PROD_DB_NAME, buildbot.MigrationSteps()) // Global init to initialize glog and parse arguments. common.Init() if err := dbConf.PromptForPassword(); err != nil { glog.Fatal(err) } vdb, err := dbConf.NewVersionedDB() if err != nil { glog.Fatal(err) } // Get the current database version maxDBVersion := vdb.MaxDBVersion() glog.Infof("Latest database version: %d", maxDBVersion) dbVersion, err := vdb.DBVersion() if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } glog.Infof("Current database version: %d", dbVersion) if dbVersion < maxDBVersion { glog.Infof("Migrating to version: %d", maxDBVersion) err = vdb.Migrate(maxDBVersion) if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } } glog.Infoln("Database migration finished.") }
func main() { common.Init() v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus := eventbus.New(globalEventBus) // Send events every so often. for _ = range time.Tick(2 * time.Second) { evData := &event.GoogleStorageEventData{ Bucket: "test-bucket", Name: "test-name", Updated: time.Now().String(), } eventBus.Publish(event.GLOBAL_GOOGLE_STORAGE, evData) glog.Infof("Sent Event: %#v ", evData) } }
func main() { defer common.LogPanic() common.InitWithMetrics("grandcentral", graphiteServer) v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus = eventbus.New(globalEventBus) if *testing { *useMetadata = false } serverURL := "https://" + *host if *testing { serverURL = "http://" + *host + *port } runServer(serverURL) }
func main() { common.Init() v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus := eventbus.New(globalEventBus) eventBus.SubscribeAsync(event.GLOBAL_GOOGLE_STORAGE, func(evData interface{}) { data := evData.(*event.GoogleStorageEventData) glog.Infof("Google Storage notification from bucket\n %s: %s : %s", data.Updated, data.Bucket, data.Name) }) select {} }
func main() { defer common.LogPanic() common.Init() args := flag.Args() if len(args) != 2 { flag.Usage() os.Exit(1) } bucket, prefix := args[0], args[1] v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus := eventbus.New(globalEventBus) eventBus.SubscribeAsync(event.StorageEvent(bucket, prefix), func(evData interface{}) { data := evData.(*event.GoogleStorageEventData) glog.Infof("Google Storage notification from bucket\n %s: %s : %s", data.Updated, data.Bucket, data.Name) }) select {} }
func main() { defer common.LogPanic() common.InitWithMetrics("grandcentral", graphiteServer) v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) if *nsqdAddress == "" { glog.Fatal("Missing address of nsqd server.") } globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress) if err != nil { glog.Fatalf("Unable to connect to NSQ server: %s", err) } eventBus = eventbus.New(globalEventBus) // Add a subscription for the each event type. This prevents the messages // to queue up if there are no other clients connected. eventBus.SubscribeAsync(event.GLOBAL_GOOGLE_STORAGE, func(evData interface{}) {}) eventBus.SubscribeAsync(event.GLOBAL_BUILDBOT, func(evData interface{}) {}) if *testing { *useMetadata = false } serverURL := "https://" + *host if *testing { serverURL = "http://" + *host + *port } runServer(serverURL) }
func loadBenchmarkImages() { var err error img1, err = OpenImage(filepath.Join(TESTDATA_DIR, "4029959456464745507.png")) if err != nil { glog.Fatal("Failed to open test file: ", err) } img2, err = OpenImage(filepath.Join(TESTDATA_DIR, "16465366847175223174.png")) if err != nil { glog.Fatal("Failed to open test file: ", err) } }
func main() { defer common.LogPanic() // Calls flag.Parse() common.InitWithMetrics("fuzzer", graphiteServer) if err := writeFlagsToConfig(); err != nil { glog.Fatalf("Problem with configuration: %s", err) } Init() if err := setupOAuth(); err != nil { glog.Fatal(err) } go func() { if err := fcommon.DownloadSkiaVersionForFuzzing(storageClient, config.FrontEnd.SkiaRoot, &config.FrontEnd); err != nil { glog.Fatalf("Problem downloading Skia: %s", err) } fuzzSyncer = syncer.New(storageClient) fuzzSyncer.Start() cache, err := fuzzcache.New(config.FrontEnd.BoltDBPath) if err != nil { glog.Fatalf("Could not create fuzz report cache at %s: %s", config.FrontEnd.BoltDBPath, err) } defer util.Close(cache) if err := gsloader.LoadFromBoltDB(cache); err != nil { glog.Errorf("Could not load from boltdb. Loading from source of truth anyway. %s", err) } var finder functionnamefinder.Finder if !*local { finder = functionnamefinder.NewAsync() } gsLoader := gsloader.New(storageClient, finder, cache) if err := gsLoader.LoadFreshFromGoogleStorage(); err != nil { glog.Fatalf("Error loading in data from GCS: %s", err) } fuzzSyncer.SetGSLoader(gsLoader) updater := frontend.NewVersionUpdater(gsLoader, fuzzSyncer) versionWatcher = fcommon.NewVersionWatcher(storageClient, config.FrontEnd.VersionCheckPeriod, updater.HandlePendingVersion, updater.HandleCurrentVersion) versionWatcher.Start() err = <-versionWatcher.Status glog.Fatal(err) }() runServer() }
func main() { defer common.LogPanic() // Set up flags. dbConf := db.DBConfigFromFlags() // Global init to initialize glog and parse arguments. common.Init() if *promptPassword { if err := dbConf.PromptForPassword(); err != nil { glog.Fatal(err) } } if !*local { if err := dbConf.GetPasswordFromMetadata(); err != nil { glog.Fatal(err) } } vdb, err := dbConf.NewVersionedDB() if err != nil { glog.Fatal(err) } if *targetVersion < 0 { // Get the current database version maxDBVersion := vdb.MaxDBVersion() glog.Infof("Latest database version: %d", maxDBVersion) dbVersion, err := vdb.DBVersion() if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } glog.Infof("Current database version: %d", dbVersion) if dbVersion < maxDBVersion { glog.Infof("Migrating to version: %d", maxDBVersion) err = vdb.Migrate(maxDBVersion) if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } } } else { glog.Infof("Migrating to version: %d", *targetVersion) err = vdb.Migrate(*targetVersion) if err != nil { glog.Fatalf("Unable to retrieve database version. Error: %s", err) } } glog.Infoln("Database migration finished.") }
func main() { defer common.LogPanic() // Global init. common.InitWithMetrics(APP_NAME, graphiteServer) // Parse the time period. period, err := human.ParseDuration(*timePeriod) if err != nil { glog.Fatal(err) } // Initialize the buildbot database. db, err := buildbot.NewRemoteDB(*buildbotDbHost) if err != nil { glog.Fatal(err) } // Initialize the BuildBucket client. c, err := auth.NewClient(*local, path.Join(*workdir, "oauth_token_cache"), buildbucket.DEFAULT_SCOPES...) if err != nil { glog.Fatal(err) } bb := buildbucket.NewClient(c) // Build the queue. repos := gitinfo.NewRepoMap(*workdir) for _, r := range REPOS { if _, err := repos.Repo(r); err != nil { glog.Fatal(err) } } q, err := build_queue.NewBuildQueue(period, repos, *scoreThreshold, *scoreDecay24Hr, BOT_BLACKLIST, db) if err != nil { glog.Fatal(err) } // Start scheduling builds in a loop. liveness := metrics.NewLiveness(APP_NAME) if err := scheduleBuilds(q, bb); err != nil { glog.Errorf("Failed to schedule builds: %v", err) } for _ = range time.Tick(time.Minute) { liveness.Update() if err := scheduleBuilds(q, bb); err != nil { glog.Errorf("Failed to schedule builds: %v", err) } } }
// Run the BugChomper server. func main() { common.InitWithMetrics("bug_chomper", graphiteServer) v, err := skiaversion.GetVersion() if err != nil { glog.Fatal(err) } glog.Infof("Version %s, built at %s", v.Commit, v.Date) loadTemplates() if *testing { *useMetadata = false } serverURL := "https://" + *host if *testing { serverURL = "http://" + *host + *port } // By default use a set of credentials setup for localhost access. var cookieSalt = "notverysecret" var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com" var clientSecret = "cw0IosPu4yjaG2KWmppj2guj" var redirectURL = serverURL + "/oauth2callback/" if *useMetadata { cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT)) clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID)) clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET)) } login.Init(clientID, clientSecret, redirectURL, cookieSalt, strings.Join(issue_tracker.OAUTH_SCOPE, " "), login.DEFAULT_DOMAIN_WHITELIST, false) runServer(serverURL) }
func main() { common.Init() args := flag.Args() if err := RunApp(ADBFromFlags(), *app, args); err != nil { glog.Fatal(err) } }
// MustReaddir returns a slice of os.FileInfo for every file in the given dir. This is equivilent to calling dir.Readdir(-1), except this call will stop the program on an error func MustReaddir(dir *os.File) []os.FileInfo { fi, err := dir.Readdir(-1) if err != nil { glog.Fatal(err) } return fi }
// MustOpen opens the file of the given name, returning an *os.File on success, or causing the program to stop with the error message. func MustOpen(name string) *os.File { f, err := os.Open(name) if err != nil { glog.Fatal(err) } return f }
func main() { defer common.LogPanic() common.InitWithMetrics("push", graphiteServer) Init() redirectURL := fmt.Sprintf("http://localhost%s/oauth2callback/", *port) if !*local { redirectURL = "https://push.skia.org/oauth2callback/" } if err := login.InitFromMetadataOrJSON(redirectURL, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST); err != nil { glog.Fatalf("Failed to initialize the login system: %s", err) } r := mux.NewRouter() r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir)) r.HandleFunc("/", mainHandler) r.HandleFunc("/_/change", changeHandler) r.HandleFunc("/_/state", stateHandler) r.HandleFunc("/_/status", statusHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) glog.Infoln("Ready to serve.") glog.Fatal(http.ListenAndServe(*port, nil)) }
func runServer(serverURL string) { r := mux.NewRouter() r.HandleFunc("/", commitsHandler) r.HandleFunc("/buildbots", buildbotDashHandler) r.HandleFunc("/hosts", hostsHandler) r.HandleFunc("/infra", infraHandler) r.HandleFunc("/json/builds", buildsJsonHandler) r.HandleFunc("/json/goldStatus", goldJsonHandler) r.HandleFunc("/json/perfAlerts", perfJsonHandler) r.HandleFunc("/json/slaveHosts", slaveHostsJsonHandler) r.HandleFunc("/json/version", skiaversion.JsonHandler) r.HandleFunc("/json/{repo}/buildProgress", buildProgressHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler) r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir)) builds := r.PathPrefix("/json/{repo}/builds/{master}/{builder}/{number:[0-9]+}").Subrouter() builds.HandleFunc("/comments", addBuildCommentHandler).Methods("POST") builds.HandleFunc("/comments/{commentId:[0-9]+}", deleteBuildCommentHandler).Methods("DELETE") builders := r.PathPrefix("/json/{repo}/builders/{builder}").Subrouter() builders.HandleFunc("/comments", addBuilderCommentHandler).Methods("POST") builders.HandleFunc("/comments/{commentId:[0-9]+}", deleteBuilderCommentHandler).Methods("DELETE") commits := r.PathPrefix("/json/{repo}/commits").Subrouter() commits.HandleFunc("/", commitsJsonHandler) commits.HandleFunc("/{commit:[a-f0-9]+}/comments", addCommitCommentHandler).Methods("POST") commits.HandleFunc("/{commit:[a-f0-9]+}/comments/{commentId:[0-9]+}", deleteCommitCommentHandler).Methods("DELETE") http.Handle("/", util.LoggingGzipRequestResponse(r)) glog.Infof("Ready to serve on %s", serverURL) glog.Fatal(http.ListenAndServe(*port, nil)) }
func runServer() { serverURL := "https://" + *host if *local { serverURL = "http://" + *host + *port } r := mux.NewRouter() r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir)) r.HandleFunc(OAUTH2_CALLBACK_PATH, login.OAuth2CallbackHandler) r.HandleFunc("/", indexHandler) r.HandleFunc("/details", detailsPageHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/json/version", skiaversion.JsonHandler) r.HandleFunc("/json/fuzz-list", fuzzListHandler) r.HandleFunc("/json/details", detailedReportsHandler) r.HandleFunc("/status", statusHandler) r.HandleFunc(`/fuzz/{kind:(binary|api)}/{name:[0-9a-f]+\.(skp)}`, fuzzHandler) r.HandleFunc(`/metadata/{kind:(binary|api)}/{type:(skp)}/{name:[0-9a-f]+_(debug|release)\.(err|dump)}`, metadataHandler) r.HandleFunc("/fuzz_count", fuzzCountHandler) r.HandleFunc("/newBug", newBugHandler) rootHandler := login.ForceAuth(util.LoggingGzipRequestResponse(r), OAUTH2_CALLBACK_PATH) http.Handle("/", rootHandler) glog.Infof("Ready to serve on %s", serverURL) glog.Fatal(http.ListenAndServe(*port, nil)) }
func Init() { rand.Seed(time.Now().UnixNano()) if *resourcesDir == "" { _, filename, _, _ := runtime.Caller(0) *resourcesDir = filepath.Join(filepath.Dir(filename), "../..") } loadTemplates() var err error git, err = gitinfo.CloneOrUpdate(*gitRepoURL, *gitRepoDir, false) if err != nil { glog.Fatal(err) } evt := eventbus.New(nil) tileStats = tilestats.New(evt) // Connect to traceDB and create the builders. db, err := tracedb.NewTraceServiceDBFromAddress(*traceservice, types.PerfTraceBuilder) if err != nil { glog.Fatalf("Failed to connect to tracedb: %s", err) } masterTileBuilder, err = tracedb.NewMasterTileBuilder(db, git, *tileSize, evt) if err != nil { glog.Fatalf("Failed to build trace/db.DB: %s", err) } branchTileBuilder = tracedb.NewBranchTileBuilder(db, git, rietveld.RIETVELD_SKIA_URL, evt) }
func main() { common.InitWithMetrics("push", graphiteServer) Init() // By default use a set of credentials setup for localhost access. var cookieSalt = "notverysecret" var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com" var clientSecret = "cw0IosPu4yjaG2KWmppj2guj" var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port) if !*local { cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT)) clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID)) clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET)) redirectURL = "https://push.skia.org/oauth2callback/" } login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST, *local) r := mux.NewRouter() r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir)) r.HandleFunc("/", mainHandler) r.HandleFunc("/_/change", changeHandler) r.HandleFunc("/_/state", stateHandler) r.HandleFunc("/_/status", statusHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) glog.Infoln("Ready to serve.") glog.Fatal(http.ListenAndServe(*port, nil)) }
func dumpCommits(store tiling.TileStore, n int) { tile, err := store.Get(0, -1) if err != nil { glog.Fatal("Could not read tile: " + err.Error()) } tileLen := tile.LastCommitIndex() + 1 commits := tile.Commits[:tileLen] if n <= 0 { n = tileLen } startIdx := tileLen - n // Keep track of empty traces. notEmpty := map[string]bool{} for i := startIdx; i < tileLen; i++ { count := 0 for traceKey, v := range tile.Traces { if !v.IsMissing(i) { count++ notEmpty[traceKey] = true } } commit := commits[i] // This works because a hash is always ascii. outHash := commit.Hash[:20] fmt.Printf("%v: %5d/%5d : %s : %s \n", time.Unix(commit.CommitTime, 0), count, len(tile.Traces), outHash, commit.Author) } fmt.Printf("Total Commits : %d\n", tileLen) fmt.Printf("Non-empty traces: %d\n", len(notEmpty)) }
func main() { flag.Usage = printUsage common.Init() args := flag.Args() if len(args) == 0 { printUsage() os.Exit(1) } store := filetilestore.NewFileTileStore(*tileDir, *dataset, 0) switch args[0] { case VALIDATE: if !validator.ValidateDataset(store, *verbose, *echoHashes) { glog.Fatal("FAILED Validation.") } case DUMP_COMMITS: checkArgs(args, DUMP_COMMITS, 1) nCommits := parseInt(args[1]) dumpCommits(store, nCommits) case MD5: checkArgs(args, MD5, 2) hash := args[1] nCommits := parseInt(args[2]) md5Commits(store, hash, nCommits) case JSON: checkArgs(args, JSON, 3) nCommits := parseInt(args[1]) nTraces := parseInt(args[2]) fname := args[3] dumpTileToJSON(store, nCommits, nTraces, fname) default: glog.Fatalf("Unknow command: %s", args[0]) } }
func dumpTileToJSON(store tiling.TileStore, nCommits int, nTraces int, fname string) { tile, err := store.Get(0, -1) if err != nil { glog.Fatal("Could not read tile: " + err.Error()) } newTile := tile if (nCommits > 0) || (nTraces > 0) { lastIdx := tile.LastCommitIndex() if nCommits <= 0 { nCommits = lastIdx + 1 } if nTraces <= 0 { nTraces = len(tile.Traces) } commitLen := util.MinInt(nCommits, lastIdx+1) startCommit := lastIdx + 1 - commitLen newTraces := map[string]tiling.Trace{} for key, trace := range tile.Traces { for i := startCommit; i <= lastIdx; i++ { if !trace.IsMissing(i) { newTraces[key] = trace break } } if len(newTraces) >= nTraces { break } } newCommits := tile.Commits[startCommit:] newParamSet := map[string][]string{} tiling.GetParamSet(newTraces, newParamSet) newTile = &tiling.Tile{ Traces: newTraces, ParamSet: newParamSet, Commits: newCommits, Scale: tile.Scale, TileIndex: tile.TileIndex, } } result, err := json.Marshal(newTile) if err != nil { glog.Fatalf("Could not marshal to JSON: %s", err) } err = ioutil.WriteFile(fname, result, 0644) if err != nil { glog.Fatalf("Could not write output file %s", err) } fmt.Printf("Commits included: %d\n", len(newTile.Commits)) fmt.Printf("Traces included: %d\n", len(newTile.Traces)) }
func runServer(serverURL string) { r := mux.NewRouter() r.HandleFunc("/", mainHandler) r.HandleFunc("/googlestorage", googleStorageChangeHandler).Methods("POST") r.HandleFunc("/json/version", skiaversion.JsonHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) glog.Infof("Ready to serve on %s", serverURL) glog.Fatal(http.ListenAndServe(*port, nil)) }
func md5Commits(store tiling.TileStore, targetHash string, nCommits int) { tile, err := store.Get(0, -1) if err != nil { glog.Fatal("Could not read tile: " + err.Error()) } tileLen := tile.LastCommitIndex() + 1 commits := tile.Commits[:tileLen] // Find the target index. endIdx := -1 for i, v := range commits { if strings.HasPrefix(v.Hash, targetHash) { endIdx = i break } } if endIdx == -1 { glog.Fatalf("Unable to find commit %s", targetHash) } endIdx++ startIdx := endIdx - nCommits traceKeys := make([]string, 0, len(tile.Traces)) for k, v := range tile.Traces { for i := startIdx; i < endIdx; i++ { if !v.IsMissing(i) { traceKeys = append(traceKeys, k) break } } } sort.Strings(traceKeys) result := make([][]string, len(traceKeys)) for i, k := range traceKeys { switch trace := tile.Traces[k].(type) { case *gtypes.GoldenTrace: result[i] = trace.Values[startIdx:endIdx] case *types.PerfTrace: result[i] = asStringSlice(trace.Values[startIdx:endIdx]) } } byteStr, err := getBytes(result) if err != nil { glog.Fatalf("Unable to serialize to bytes: %s", err.Error()) } md5Hash := fmt.Sprintf("%x", md5.Sum(byteStr)) fmt.Printf("Commit Range : %s - %s\n", commits[startIdx].Hash, commits[endIdx-1].Hash) fmt.Printf("Hash : %s\n", md5Hash) fmt.Printf("Total traces: %d\n", len(tile.Traces)) fmt.Printf("Non-empty traces: %d\n", len(traceKeys)) }
func main() { common.InitWithMetrics("docserver", graphiteServer) Init() // Resources are served directly. http.HandleFunc("/res/", autogzip.HandleFunc(makeResourceHandler())) http.HandleFunc("/", autogzip.HandleFunc(mainHandler)) glog.Infoln("Ready to serve.") glog.Fatal(http.ListenAndServe(*port, nil)) }
func main() { defer common.LogPanic() common.InitWithMetrics("mathserv", graphiteServer) Init() // By default use a set of credentials setup for localhost access. var cookieSalt = "notverysecret" var clientID = "952643138919-5a692pfevie766aiog15io45kjpsh33v.apps.googleusercontent.com" var clientSecret = "QQfqRYU1ELkds90ku8xlIGl1" var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port) if !*local { cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT)) clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID)) clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET)) redirectURL = "https://mathinate.com/oauth2callback/" } login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, "", *local) r := mux.NewRouter() r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir)) r.HandleFunc("/", mainHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) AttemptLoadCertFromMetadata() glog.Infoln("Ready to serve.") if *certChainFile != "" { glog.Infof("Serving TLS") go func() { redir := mux.NewRouter() redir.HandleFunc("/", redirHandler) glog.Fatal(http.ListenAndServe(*httpPort, redir)) }() glog.Fatal(http.ListenAndServeTLS(*port, *certChainFile, *keyFile, nil)) } else { glog.Infof("Only serving HTTP") glog.Fatal(http.ListenAndServe(*port, nil)) } }
func main() { if len(os.Args) < 2 { glog.Fatalf("usage: %s SKIA_SRC_PATH [PATH_TO_DRAW.CPP]", os.Args[0]) } skiaSrc := os.Args[1] if len(os.Args) < 3 { // execCommand(nil, skiaSrc, "git", "fetch") // execCommand(nil, skiaSrc, "git", "checkout", "origin/master") if err := fiddlerPrerequisites(skiaSrc); err != nil { glog.Fatal(err) } } else { if err := setResourceLimits(); err != nil { glog.Fatal(err) } tempDir, err := ioutil.TempDir("", "fiddle_") if err != nil { glog.Fatal(err) } defer func() { err = os.RemoveAll(tempDir) if err != nil { glog.Fatalf("os.RemoveAll(tempDir) failed: %v", err) } }() if os.Args[2] == "-" { if err := fiddler(skiaSrc, os.Stdin, os.Stdout, tempDir); err != nil { glog.Fatal(err) } } else { inputFile, err := os.Open(os.Args[2]) if err != nil { glog.Fatalf("unable to open \"%s\": %v", os.Args[2], err) } defer util.Close(inputFile) if err = fiddler(skiaSrc, inputFile, os.Stdout, tempDir); err != nil { glog.Fatal(err) } } } }
func main() { common.Init() out, err := util.SSH(*cmd, util.Slaves, *timeout) if err != nil { glog.Fatal(err) } if *printOutput { for k, v := range out { fmt.Printf("\n=====%s=====\n%s\n", k, v) } } }
func runServer(serverURL string) { r := mux.NewRouter() r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir)) r.HandleFunc("/", makeBugChomperPage).Methods("GET") r.HandleFunc("/", submitData).Methods("POST") r.HandleFunc("/json/version", skiaversion.JsonHandler) r.HandleFunc(OAUTH_CALLBACK_PATH, login.OAuth2CallbackHandler) r.HandleFunc("/logout/", login.LogoutHandler) r.HandleFunc("/loginstatus/", login.StatusHandler) http.Handle("/", util.LoggingGzipRequestResponse(r)) glog.Info("Server is running at " + serverURL) glog.Fatal(http.ListenAndServe(*port, nil)) }
func Init() { rand.Seed(time.Now().UnixNano()) if *resourcesDir == "" { _, filename, _, _ := runtime.Caller(0) *resourcesDir = filepath.Join(filepath.Dir(filename), "../..") } indexTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/index.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) clusterTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/clusters.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) alertsTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/alerting.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) clTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/cl.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) activityTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/activitylog.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) compareTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/compare.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) helpTemplate = template.Must(template.ParseFiles( filepath.Join(*resourcesDir, "templates/help.html"), filepath.Join(*resourcesDir, "templates/titlebar.html"), filepath.Join(*resourcesDir, "templates/header.html"), )) nanoTileStore = filetilestore.NewFileTileStore(*tileStoreDir, config.DATASET_NANO, 2*time.Minute) var err error git, err = gitinfo.CloneOrUpdate(*gitRepoURL, *gitRepoDir, false) if err != nil { glog.Fatal(err) } }