Ejemplo n.º 1
0
// SSH connects to the specified workers and runs the specified command. If the
// command does not complete in the given duration then all remaining workers are
// considered timed out. SSH also automatically substitutes the sequential number
// of the worker for the WORKER_NUM_KEYWORD since it is a common use case.
func SSH(cmd string, workers []string, timeout time.Duration) (map[string]string, error) {
	glog.Infof("Running \"%s\" on %s with timeout of %s", cmd, workers, timeout)
	numWorkers := len(workers)

	// Ensure that the key file exists.
	key, err := getKeyFile()
	if err != nil {
		return nil, fmt.Errorf("Failed to get key file: %s", err)
	}

	// Initialize the structure with the configuration for ssh.
	config := &ssh.ClientConfig{
		User: CtUser,
		Auth: []ssh.AuthMethod{
			ssh.PublicKeys(key),
		},
	}

	var wg sync.WaitGroup
	// m protects workersWithOutputs and remainingWorkers
	var m sync.Mutex
	// Will be populated and returned by this function.
	workersWithOutputs := map[string]string{}
	// Keeps track of which workers are still pending.
	remainingWorkers := map[string]int{}

	// Kick off a goroutine on all workers.
	for i, hostname := range workers {
		wg.Add(1)
		m.Lock()
		remainingWorkers[hostname] = 1
		m.Unlock()
		go func(index int, hostname string) {
			defer wg.Done()
			updatedCmd := strings.Replace(cmd, WORKER_NUM_KEYWORD, strconv.Itoa(index+1), -1)
			output, err := executeCmd(updatedCmd, hostname, config, timeout)
			if err != nil {
				glog.Errorf("Could not execute ssh cmd: %s", err)
			}
			m.Lock()
			defer m.Unlock()
			workersWithOutputs[hostname] = output
			delete(remainingWorkers, hostname)
			glog.Infoln()
			glog.Infof("[%d/%d] Worker %s has completed execution", numWorkers-len(remainingWorkers), numWorkers, hostname)
			glog.Infof("Remaining workers: %v", remainingWorkers)
		}(i, hostname)
	}

	wg.Wait()
	glog.Infoln()
	glog.Infof("Finished running \"%s\" on all %d workers", cmd, numWorkers)
	glog.Info("========================================")

	m.Lock()
	defer m.Unlock()
	return workersWithOutputs, nil
}
Ejemplo n.º 2
0
// Init must be called once before DB is used.
//
// Since it used glog, make sure it is also called after flag.Parse is called.
func (c *DatabaseConfig) NewVersionedDB() (*VersionedDB, error) {
	if err := c.validate(); err != nil {
		return nil, err
	}

	// If there is a connection string then connect to the MySQL server.
	// This is for testing only. In production we get the relevant information
	// from the metadata server.
	var err error
	var DB *sql.DB = nil

	glog.Infoln("Opening SQL database.")
	DB, err = sql.Open(DEFAULT_DRIVER, c.MySQLString())
	if err != nil {
		return nil, fmt.Errorf("Failed to open connection to SQL server: %v", err)
	}

	glog.Infoln("Sending Ping.")
	if err := DB.Ping(); err != nil {
		return nil, fmt.Errorf("Failed to ping SQL server: %v", err)
	}

	// As outlined in this comment:
	// https://github.com/go-sql-driver/mysql/issues/257#issuecomment-48985975
	// We can remove this once we have determined it's not necessary.
	DB.SetMaxIdleConns(0)
	DB.SetMaxOpenConns(200)

	result := &VersionedDB{
		DB:             DB,
		migrationSteps: c.MigrationSteps,
	}

	// Make sure the migration table exists.
	if err := result.checkVersionTable(); err != nil {
		return nil, fmt.Errorf("Attempt to create version table returned: %v", err)
	}
	glog.Infoln("Version table OK.")

	// Ping the database occasionally to keep the connection fresh.
	go func() {
		c := time.Tick(1 * time.Minute)
		for _ = range c {
			if err := result.DB.Ping(); err != nil {
				glog.Warningln("Database failed to respond:", err)
			}
			glog.Infof("db: Successful ping")
		}
	}()

	return result, nil
}
Ejemplo n.º 3
0
func main() {
	// Set up flags.
	dbConf := database.ConfigFromFlags(buildbot.PROD_DB_HOST, buildbot.PROD_DB_PORT, database.USER_ROOT, buildbot.PROD_DB_NAME, buildbot.MigrationSteps())

	// Global init to initialize glog and parse arguments.
	common.Init()

	if err := dbConf.PromptForPassword(); err != nil {
		glog.Fatal(err)
	}
	vdb, err := dbConf.NewVersionedDB()
	if err != nil {
		glog.Fatal(err)
	}

	// Get the current database version
	maxDBVersion := vdb.MaxDBVersion()
	glog.Infof("Latest database version: %d", maxDBVersion)

	dbVersion, err := vdb.DBVersion()
	if err != nil {
		glog.Fatalf("Unable to retrieve database version. Error: %s", err)
	}
	glog.Infof("Current database version: %d", dbVersion)

	if dbVersion < maxDBVersion {
		glog.Infof("Migrating to version: %d", maxDBVersion)
		err = vdb.Migrate(maxDBVersion)
		if err != nil {
			glog.Fatalf("Unable to retrieve database version. Error: %s", err)
		}
	}

	glog.Infoln("Database migration finished.")
}
Ejemplo n.º 4
0
// GetRecent returns the most recent n activity records in types.Activity struct format.
func GetRecent(n int) ([]*types.Activity, error) {
	ret := []*types.Activity{}
	rows, err := db.DB.Query("SELECT id, timestamp, userid, action, url FROM activitylog ORDER BY id DESC LIMIT ?", n)
	if err != nil {
		return nil, fmt.Errorf("Failed to read from database: %s", err)
	}
	defer util.Close(rows)
	glog.Infoln("Processing activity rows.")
	for rows.Next() {
		var id int
		var timestamp int64
		var userid string
		var action string
		var url string
		if err := rows.Scan(&id, &timestamp, &userid, &action, &url); err != nil {
			return nil, fmt.Errorf("Failed to read row from database: %s", err)
		}
		r := &types.Activity{
			ID:     id,
			TS:     timestamp,
			UserID: userid,
			Action: action,
			URL:    url,
		}
		ret = append(ret, r)
	}

	return ret, nil
}
Ejemplo n.º 5
0
func main() {
	common.InitWithMetrics("push", graphiteServer)
	Init()

	// By default use a set of credentials setup for localhost access.
	var cookieSalt = "notverysecret"
	var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com"
	var clientSecret = "cw0IosPu4yjaG2KWmppj2guj"
	var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
		redirectURL = "https://push.skia.org/oauth2callback/"
	}
	login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST, *local)

	r := mux.NewRouter()
	r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir))
	r.HandleFunc("/", mainHandler)
	r.HandleFunc("/_/change", changeHandler)
	r.HandleFunc("/_/state", stateHandler)
	r.HandleFunc("/_/status", statusHandler)
	r.HandleFunc("/loginstatus/", login.StatusHandler)
	r.HandleFunc("/logout/", login.LogoutHandler)
	r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler)
	http.Handle("/", util.LoggingGzipRequestResponse(r))
	glog.Infoln("Ready to serve.")
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Ejemplo n.º 6
0
func main() {
	defer common.LogPanic()
	common.InitWithMetrics("push", graphiteServer)
	Init()

	redirectURL := fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		redirectURL = "https://push.skia.org/oauth2callback/"
	}
	if err := login.InitFromMetadataOrJSON(redirectURL, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST); err != nil {
		glog.Fatalf("Failed to initialize the login system: %s", err)
	}

	r := mux.NewRouter()
	r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir))
	r.HandleFunc("/", mainHandler)
	r.HandleFunc("/_/change", changeHandler)
	r.HandleFunc("/_/state", stateHandler)
	r.HandleFunc("/_/status", statusHandler)
	r.HandleFunc("/loginstatus/", login.StatusHandler)
	r.HandleFunc("/logout/", login.LogoutHandler)
	r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler)
	http.Handle("/", util.LoggingGzipRequestResponse(r))
	glog.Infoln("Ready to serve.")
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Ejemplo n.º 7
0
func main() {
	defer common.LogPanic()
	common.InitWithMetrics("probeserver", graphiteServer)

	client, err := auth.NewDefaultJWTServiceAccountClient("https://www.googleapis.com/auth/userinfo.email")
	if err != nil {
		glog.Fatalf("Failed to create client for talking to the issue tracker: %s", err)
	}
	go monitorIssueTracker(client)
	glog.Infoln("Looking for Graphite server.")
	addr, err := net.ResolveTCPAddr("tcp", *graphiteServer)
	if err != nil {
		glog.Fatalln("Failed to resolve the Graphite server: ", err)
	}
	glog.Infoln("Found Graphite server.")

	liveness := imetrics.NewLiveness("probes")

	// We have two sets of metrics, one for the probes and one for the probe
	// server itself. The server's metrics are handled by common.Init()
	probeRegistry := metrics.NewRegistry()
	go graphite.Graphite(probeRegistry, common.SAMPLE_PERIOD, *prefix, addr)

	// TODO(jcgregorio) Monitor config file and reload if it changes.
	cfg, err := readConfigFiles(*config)
	if err != nil {
		glog.Fatalln("Failed to read config file: ", err)
	}
	glog.Infoln("Successfully read config file.")
	// Register counters for each probe.
	for name, probe := range cfg {
		probe.failure = metrics.NewRegisteredGauge(name+".failure", probeRegistry)
		probe.latency = metrics.NewRegisteredGauge(name+".latency", probeRegistry)
	}

	// Create a client that uses our dialer with a timeout.
	c := &http.Client{
		Transport: &http.Transport{
			Dial: dialTimeout,
		},
	}
	probeOneRound(cfg, c)
	for _ = range time.Tick(*runEvery) {
		probeOneRound(cfg, c)
		liveness.Update()
	}
}
Ejemplo n.º 8
0
// tileHandler accepts URIs like /tiles/0/1
// where the URI format is /tiles/<tile-scale>/<tile-number>
//
// It returns JSON of the form:
//
//  {
//    tiles: [20],
//    scale: 0,
//    paramset: {
//      "os": ["Android", "ChromeOS", ..],
//      "arch": ["Arm7", "x86", ...],
//    },
//    commits: [
//      {
//        "commit_time": 140329432,
//        "hash": "0e03478100ea",
//        "author": "*****@*****.**",
//        "commit_msg": "The subject line of the commit.",
//      },
//      ...
//    ],
//    ticks: [
//      [1.5, "Mon"],
//      [3.5, "Tue"]
//    ],
//    skps: [
//      5, 13, 24
//    ]
//  }
//
//  Where skps are the commit indices where the SKPs were updated.
//
func tileHandler(w http.ResponseWriter, r *http.Request) {
	glog.Infof("Tile Handler: %q\n", r.URL.Path)
	handlerStart := time.Now()
	match := tileHandlerPath.FindStringSubmatch(r.URL.Path)
	if r.Method != "GET" || match == nil || len(match) != 3 {
		http.NotFound(w, r)
		return
	}
	tileScale, err := strconv.ParseInt(match[1], 10, 0)
	if err != nil {
		util.ReportError(w, r, err, "Failed parsing tile scale.")
		return
	}
	tileNumber, err := strconv.ParseInt(match[2], 10, 0)
	if err != nil {
		util.ReportError(w, r, err, "Failed parsing tile number.")
		return
	}
	glog.Infof("tile: %d %d", tileScale, tileNumber)
	tile, err := getTile(int(tileScale), int(tileNumber))
	if err != nil {
		util.ReportError(w, r, err, "Failed retrieving tile.")
		return
	}

	guiTile := tiling.NewTileGUI(tile.Scale, tile.TileIndex)
	guiTile.Commits = tile.Commits
	guiTile.ParamSet = tile.ParamSet
	// SkpCommits goes out to the git repo, add caching if this turns out to be
	// slow.
	if skps, err := git.SkpCommits(tile); err != nil {
		guiTile.Skps = []int{}
		glog.Errorf("Failed to calculate skps: %s", err)
	} else {
		guiTile.Skps = skps
	}

	ts := []int64{}
	for _, c := range tile.Commits {
		if c.CommitTime != 0 {
			ts = append(ts, c.CommitTime)
		}
	}
	glog.Infof("%#v", ts)
	guiTile.Ticks = human.FlotTickMarks(ts)

	// Marshal and send
	marshaledResult, err := json.Marshal(guiTile)
	if err != nil {
		util.ReportError(w, r, err, "Failed to marshal JSON.")
		return
	}
	w.Header().Set("Content-Type", "application/json")
	_, err = w.Write(marshaledResult)
	if err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
	}
	glog.Infoln("Total handler time: ", time.Since(handlerStart).Nanoseconds())
}
Ejemplo n.º 9
0
// GetLatestGSDirs gets the appropriate directory names in which data
// would be stored between the given timestamp range.
//
// The returning directories cover the range till the date of startTS, and may
// be precise to the hour.
func GetLatestGSDirs(startTS int64, endTS int64, bsSubdir string) []string {
	startTime := time.Unix(startTS, 0).UTC()
	startYear, startMonth, startDay := startTime.Date()
	glog.Infoln("GS dir start time: ", startTime)
	endTime := time.Unix(endTS, 0).UTC()
	lastAddedTime := startTime
	results := make([]string, 0)
	newYear, newMonth, newDay := endTime.Date()
	newHour := endTime.Hour()
	lastYear, lastMonth, _ := lastAddedTime.Date()
	if lastYear != newYear {
		for i := lastYear; i < newYear; i++ {
			if i != startYear {
				results = append(results, fmt.Sprintf("%04d", i))
			} else {
				for j := startMonth; j <= time.December; j++ {
					if j == startMonth && startDay > 1 {
						for k := startDay; k <= lastDate(i, j); k++ {
							results = append(results, fmt.Sprintf("%04d/%02d/%02d", i, j, k))
						}
					} else {
						results = append(results, fmt.Sprintf("%04d/%02d", i, j))
					}
				}
			}
		}
		lastAddedTime = time.Date(newYear, time.January, 1, 0, 0, 0, 0, time.UTC)
	}
	lastYear, lastMonth, _ = lastAddedTime.Date()
	if lastMonth != newMonth {
		for i := lastMonth; i < newMonth; i++ {
			if i != startMonth {
				results = append(results, fmt.Sprintf("%04d/%02d", lastYear, i))
			} else {
				for j := startDay; j <= lastDate(lastYear, i); j++ {
					results = append(results, fmt.Sprintf("%04d/%02d/%02d", lastYear, i, j))
				}
			}
		}
		lastAddedTime = time.Date(newYear, newMonth, 1, 0, 0, 0, 0, time.UTC)
	}
	lastYear, lastMonth, lastDay := lastAddedTime.Date()
	if lastDay != newDay {
		for i := lastDay; i < newDay; i++ {
			results = append(results, fmt.Sprintf("%04d/%02d/%02d", lastYear, lastMonth, i))
		}
		lastAddedTime = time.Date(newYear, newMonth, newDay, 0, 0, 0, 0, time.UTC)
	}
	lastYear, lastMonth, lastDay = lastAddedTime.Date()
	lastHour := lastAddedTime.Hour()
	for i := lastHour; i < newHour+1; i++ {
		results = append(results, fmt.Sprintf("%04d/%02d/%02d/%02d", lastYear, lastMonth, lastDay, i))
	}
	for i := range results {
		results[i] = fmt.Sprintf("%s/%s", bsSubdir, results[i])
	}
	return results
}
Ejemplo n.º 10
0
// getTile retrieves a tile from the disk
func getTile(tileScale, tileNumber int) (*tiling.Tile, error) {
	start := time.Now()
	tile, err := nanoTileStore.Get(int(tileScale), int(tileNumber))
	glog.Infoln("Time for tile load: ", time.Since(start).Nanoseconds())
	if err != nil || tile == nil {
		return nil, fmt.Errorf("Unable to get tile from tilestore: %s", err)
	}
	return tile, nil
}
Ejemplo n.º 11
0
// singleHandler is similar to /query/0/-1/traces?<param filters>, but takes an
// optional commit hash and returns a single value for each trace at that commit,
// or the latest value if a hash is not given or found. The resulting JSON is in
// SingleResponse format that looks like:
//
//  {
//    "traces": [
//      {
//        val: 1.1,
//        params: {"os: "Android", ...}
//      },
//      ...
//    ],
//    "hash": "abc123",
//  }
//
func singleHandler(w http.ResponseWriter, r *http.Request) {
	glog.Infof("Single Handler: %q\n", r.URL.Path)
	handlerStart := time.Now()
	match := singleHandlerPath.FindStringSubmatch(r.URL.Path)
	if r.Method != "GET" || match == nil || len(match) != 2 {
		http.NotFound(w, r)
		return
	}
	if err := r.ParseForm(); err != nil {
		util.ReportError(w, r, err, "Failed to parse query params.")
	}
	hash := match[1]

	tileNum, idx, err := git.TileAddressFromHash(hash, time.Time(ingester.BEGINNING_OF_TIME))
	if err != nil {
		glog.Infof("Did not find hash '%s', use latest: %q.\n", hash, err)
		tileNum = -1
		idx = -1
	}
	glog.Infof("Hash: %s tileNum: %d, idx: %d\n", hash, tileNum, idx)
	tile, err := getTile(0, tileNum)
	if err != nil {
		util.ReportError(w, r, err, "Failed retrieving tile.")
		return
	}

	if idx < 0 {
		idx = len(tile.Commits) - 1 // Defaults to the last slice element.
	}
	glog.Infof("Tile: %d; Idx: %d\n", tileNum, idx)

	ret := SingleResponse{
		Traces: []*SingleTrace{},
		Hash:   tile.Commits[idx].Hash,
	}
	for _, tr := range tile.Traces {
		if tiling.Matches(tr, r.Form) {
			v, err := vec.FillAt(tr.(*types.PerfTrace).Values, idx)
			if err != nil {
				util.ReportError(w, r, err, "Error while getting value at slice index.")
				return
			}
			t := &SingleTrace{
				Val:    v,
				Params: tr.Params(),
			}
			ret.Traces = append(ret.Traces, t)
		}
	}
	w.Header().Set("Content-Type", "application/json")
	enc := json.NewEncoder(w)
	if err := enc.Encode(ret); err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
	}
	glog.Infoln("Total handler time: ", time.Since(handlerStart).Nanoseconds())
}
Ejemplo n.º 12
0
func main() {
	common.InitWithMetrics("docserver", graphiteServer)
	Init()

	// Resources are served directly.
	http.HandleFunc("/res/", autogzip.HandleFunc(makeResourceHandler()))
	http.HandleFunc("/", autogzip.HandleFunc(mainHandler))

	glog.Infoln("Ready to serve.")
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Ejemplo n.º 13
0
func main() {
	defer common.LogPanic()
	// Set up flags.
	dbConf := db.DBConfigFromFlags()

	// Global init to initialize glog and parse arguments.
	common.Init()

	if *promptPassword {
		if err := dbConf.PromptForPassword(); err != nil {
			glog.Fatal(err)
		}
	}
	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	vdb, err := dbConf.NewVersionedDB()
	if err != nil {
		glog.Fatal(err)
	}

	if *targetVersion < 0 {
		// Get the current database version
		maxDBVersion := vdb.MaxDBVersion()
		glog.Infof("Latest database version: %d", maxDBVersion)

		dbVersion, err := vdb.DBVersion()
		if err != nil {
			glog.Fatalf("Unable to retrieve database version. Error: %s", err)
		}
		glog.Infof("Current database version: %d", dbVersion)

		if dbVersion < maxDBVersion {
			glog.Infof("Migrating to version: %d", maxDBVersion)
			err = vdb.Migrate(maxDBVersion)
			if err != nil {
				glog.Fatalf("Unable to retrieve database version. Error: %s", err)
			}
		}
	} else {
		glog.Infof("Migrating to version: %d", *targetVersion)
		err = vdb.Migrate(*targetVersion)
		if err != nil {
			glog.Fatalf("Unable to retrieve database version. Error: %s", err)
		}
	}
	glog.Infoln("Database migration finished.")
}
Ejemplo n.º 14
0
func main() {
	common.InitWithMetrics("pulld", graphiteServer)
	Init()
	pullInit()

	r := mux.NewRouter()
	r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir))
	r.HandleFunc("/", mainHandler).Methods("GET")
	r.HandleFunc("/_/list", listHandler).Methods("GET")
	r.HandleFunc("/_/change", changeHandler).Methods("POST")
	r.HandleFunc("/pullpullpull", pullHandler)
	http.Handle("/", util.LoggingGzipRequestResponse(r))
	glog.Infoln("Ready to serve.")
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Ejemplo n.º 15
0
// Migrates the database to the specified target version. Use DBVersion() to
// retrieve the current version of the database.
func (vdb *VersionedDB) Migrate(targetVersion int) (rv error) {
	if (targetVersion < 0) || (targetVersion > vdb.MaxDBVersion()) {
		glog.Fatalf("Target db version must be in range: [0 .. %d]", vdb.MaxDBVersion())
	}

	currentVersion, err := vdb.DBVersion()
	if err != nil {
		return err
	}

	if currentVersion > vdb.MaxDBVersion() {
		glog.Fatalf("Version table is out of date with current DB version.")
	}

	if targetVersion == currentVersion {
		return nil
	}

	// start a transaction
	txn, err := vdb.DB.Begin()
	if err != nil {
		return err
	}
	defer func() { rv = CommitOrRollback(txn, rv) }()

	// run through the transactions
	runSteps := vdb.getMigrations(currentVersion, targetVersion)
	if len(runSteps) == 0 {
		glog.Fatalln("Unable to find migration steps.")
	}

	for _, step := range runSteps {
		for _, stmt := range step {
			glog.Infoln("EXECUTING: \n", stmt)
			if _, err = txn.Exec(stmt); err != nil {
				return err
			}
		}
	}

	// update the dbversion table
	if err = vdb.setDBVersion(txn, targetVersion); err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 16
0
func main() {
	defer common.LogPanic()
	common.InitWithMetrics("mathserv", graphiteServer)
	Init()

	// By default use a set of credentials setup for localhost access.
	var cookieSalt = "notverysecret"
	var clientID = "952643138919-5a692pfevie766aiog15io45kjpsh33v.apps.googleusercontent.com"
	var clientSecret = "QQfqRYU1ELkds90ku8xlIGl1"
	var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
		redirectURL = "https://mathinate.com/oauth2callback/"
	}
	login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, "", *local)

	r := mux.NewRouter()
	r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir))
	r.HandleFunc("/", mainHandler)
	r.HandleFunc("/loginstatus/", login.StatusHandler)
	r.HandleFunc("/logout/", login.LogoutHandler)
	r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler)
	http.Handle("/", util.LoggingGzipRequestResponse(r))
	AttemptLoadCertFromMetadata()
	glog.Infoln("Ready to serve.")

	if *certChainFile != "" {
		glog.Infof("Serving TLS")
		go func() {
			redir := mux.NewRouter()
			redir.HandleFunc("/", redirHandler)
			glog.Fatal(http.ListenAndServe(*httpPort, redir))
		}()
		glog.Fatal(http.ListenAndServeTLS(*port, *certChainFile, *keyFile, nil))
	} else {
		glog.Infof("Only serving HTTP")
		glog.Fatal(http.ListenAndServe(*port, nil))
	}
}
Ejemplo n.º 17
0
func (g *logger) Println(args ...interface{}) {
	glog.Infoln(args...)
}
Ejemplo n.º 18
0
func main() {
	defer common.LogPanic()
	var err error

	mainTimer := timer.New("main init")
	// Setup DB flags.
	dbConf := database.ConfigFromFlags(db.PROD_DB_HOST, db.PROD_DB_PORT, database.USER_RW, db.PROD_DB_NAME, db.MigrationSteps())

	// Global init to initialize
	common.InitWithMetrics("skiacorrectness", graphiteServer)

	v, err := skiaversion.GetVersion()
	if err != nil {
		glog.Fatalf("Unable to retrieve version: %s", err)
	}
	glog.Infof("Version %s, built at %s", v.Commit, v.Date)

	// Enable the memory profiler if memProfile was set.
	// TODO(stephana): This should be moved to a HTTP endpoint that
	// only responds to internal IP addresses/ports.
	if *memProfile > 0 {
		time.AfterFunc(*memProfile, func() {
			glog.Infof("Writing Memory Profile")
			f, err := ioutil.TempFile("./", "memory-profile")
			if err != nil {
				glog.Fatalf("Unable to create memory profile file: %s", err)
			}
			if err := pprof.WriteHeapProfile(f); err != nil {
				glog.Fatalf("Unable to write memory profile file: %v", err)
			}
			util.Close(f)
			glog.Infof("Memory profile written to %s", f.Name())

			os.Exit(0)
		})
	}

	if *cpuProfile > 0 {
		glog.Infof("Writing CPU Profile")
		f, err := ioutil.TempFile("./", "cpu-profile")
		if err != nil {
			glog.Fatalf("Unable to create cpu profile file: %s", err)
		}

		if err := pprof.StartCPUProfile(f); err != nil {
			glog.Fatalf("Unable to write cpu profile file: %v", err)
		}
		time.AfterFunc(*cpuProfile, func() {
			pprof.StopCPUProfile()
			util.Close(f)
			glog.Infof("CPU profile written to %s", f.Name())
			os.Exit(0)
		})
	}

	// Init this module.
	Init()

	// Initialize submodules.
	filediffstore.Init()

	// Set up login
	// TODO (stephana): Factor out to go/login/login.go and removed hard coded
	// values.
	var cookieSalt = "notverysecret"
	var clientID = "31977622648-ubjke2f3staq6ouas64r31h8f8tcbiqp.apps.googleusercontent.com"
	var clientSecret = "rK-kRY71CXmcg0v9I9KIgWci"
	var useRedirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
		useRedirectURL = *redirectURL
	}
	login.Init(clientID, clientSecret, useRedirectURL, cookieSalt, login.DEFAULT_SCOPE, *authWhiteList, *local)

	// get the Oauthclient if necessary.
	client := getOAuthClient(*oauthCacheFile)

	// Set up the cache implementation to use.
	cacheFactory := filediffstore.MemCacheFactory
	if *redisHost != "" {
		cacheFactory = func(uniqueId string, codec util.LRUCodec) util.LRUCache {
			return redisutil.NewRedisLRUCache(*redisHost, *redisDB, uniqueId, codec)
		}
	}

	// Get the expecations storage, the filediff storage and the tilestore.
	diffStore, err := filediffstore.NewFileDiffStore(client, *imageDir, *gsBucketName, filediffstore.DEFAULT_GS_IMG_DIR_NAME, cacheFactory, filediffstore.RECOMMENDED_WORKER_POOL_SIZE)
	if err != nil {
		glog.Fatalf("Allocating DiffStore failed: %s", err)
	}

	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	vdb, err := dbConf.NewVersionedDB()
	if err != nil {
		glog.Fatal(err)
	}

	if !vdb.IsLatestVersion() {
		glog.Fatal("Wrong DB version. Please updated to latest version.")
	}

	digestStore, err := digeststore.New(*storageDir)
	if err != nil {
		glog.Fatal(err)
	}

	eventBus := eventbus.New(nil)
	storages = &storage.Storage{
		DiffStore:         diffStore,
		ExpectationsStore: expstorage.NewCachingExpectationStore(expstorage.NewSQLExpectationStore(vdb), eventBus),
		IgnoreStore:       ignore.NewSQLIgnoreStore(vdb),
		TileStore:         filetilestore.NewFileTileStore(*tileStoreDir, config.DATASET_GOLD, 2*time.Minute),
		DigestStore:       digestStore,
		NCommits:          *nCommits,
		EventBus:          eventBus,
		TrybotResults:     trybot.NewTrybotResultStorage(vdb),
		RietveldAPI:       rietveld.New(*rietveldURL, nil),
	}

	if err := history.Init(storages, *nTilesToBackfill); err != nil {
		glog.Fatalf("Unable to initialize history package: %s", err)
	}

	if blamer, err = blame.New(storages); err != nil {
		glog.Fatalf("Unable to create blamer: %s", err)
	}

	if err := ignore.Init(storages.IgnoreStore); err != nil {
		glog.Fatalf("Failed to start monitoring for expired ignore rules: %s", err)
	}

	// Enable the experimental features.
	tallies, err = tally.New(storages)
	if err != nil {
		glog.Fatalf("Failed to build tallies: %s", err)
	}

	paramsetSum = paramsets.New(tallies, storages)

	if !*local {
		*issueTrackerKey = metadata.Must(metadata.ProjectGet(metadata.APIKEY))
	}

	issueTracker = issues.NewIssueTracker(*issueTrackerKey)

	summaries, err = summary.New(storages, tallies, blamer)
	if err != nil {
		glog.Fatalf("Failed to build summary: %s", err)
	}

	statusWatcher, err = status.New(storages)
	if err != nil {
		glog.Fatalf("Failed to initialize status watcher: %s", err)
	}

	imgFS := NewURLAwareFileServer(*imageDir, IMAGE_URL_PREFIX)
	pathToURLConverter = imgFS.GetURL

	if err := warmer.Init(storages, summaries, tallies); err != nil {
		glog.Fatalf("Failed to initialize the warmer: %s", err)
	}
	mainTimer.Stop()

	// Everything is wired up at this point. Run the self tests if requested.
	if *selfTest {
		search.SelfTest(storages, tallies, blamer, paramsetSum)
	}

	router := mux.NewRouter()

	// Set up the resource to serve the image files.
	router.PathPrefix(IMAGE_URL_PREFIX).Handler(imgFS.Handler)

	// New Polymer based UI endpoints.
	router.PathPrefix("/res/").HandlerFunc(makeResourceHandler())

	// TODO(stephana): Remove the 'poly' prefix from all the handlers and clean
	// up main2.go by either merging it it into main.go or making it clearer that
	// it contains all the handlers. Make it clearer what variables are shared
	// between the different file.

	// All the handlers will be prefixed with poly to differentiate it from the
	// angular code until the angular code is removed.
	router.HandleFunc(OAUTH2_CALLBACK_PATH, login.OAuth2CallbackHandler)
	router.HandleFunc("/", byBlameHandler).Methods("GET")
	router.HandleFunc("/list", templateHandler("list.html")).Methods("GET")
	router.HandleFunc("/_/details", polyDetailsHandler).Methods("GET")
	router.HandleFunc("/_/diff", polyDiffJSONDigestHandler).Methods("GET")
	router.HandleFunc("/_/hashes", polyAllHashesHandler).Methods("GET")
	router.HandleFunc("/_/ignores", polyIgnoresJSONHandler).Methods("GET")
	router.HandleFunc("/_/ignores/add/", polyIgnoresAddHandler).Methods("POST")
	router.HandleFunc("/_/ignores/del/{id}", polyIgnoresDeleteHandler).Methods("POST")
	router.HandleFunc("/_/ignores/save/{id}", polyIgnoresUpdateHandler).Methods("POST")
	router.HandleFunc("/_/list", polyListTestsHandler).Methods("GET")
	router.HandleFunc("/_/paramset", polyParamsHandler).Methods("GET")
	router.HandleFunc("/_/nxn", nxnJSONHandler).Methods("GET")

	// TODO(stephana): Once /_/search3 is stable it will replace /_/search and the
	// /search*.html pages will be consolidated into one.
	router.HandleFunc("/_/search", polySearchJSONHandler).Methods("GET")
	router.HandleFunc("/_/search3", search3JSONHandler).Methods("GET")

	router.HandleFunc("/_/status/{test}", polyTestStatusHandler).Methods("GET")
	router.HandleFunc("/_/test", polyTestHandler).Methods("POST")
	router.HandleFunc("/_/triage", polyTriageHandler).Methods("POST")
	router.HandleFunc("/_/triagelog", polyTriageLogHandler).Methods("GET")
	router.HandleFunc("/_/triagelog/undo", triageUndoHandler).Methods("POST")
	router.HandleFunc("/_/failure", failureListJSONHandler).Methods("GET")
	router.HandleFunc("/_/failure/clear", failureClearJSONHandler).Methods("POST")
	router.HandleFunc("/_/trybot", listTrybotsJSONHandler).Methods("GET")

	router.HandleFunc("/byblame", byBlameHandler).Methods("GET")
	router.HandleFunc("/cluster", templateHandler("cluster.html")).Methods("GET")
	router.HandleFunc("/search2", search2Handler).Methods("GET")
	router.HandleFunc("/cmp/{test}", templateHandler("compare.html")).Methods("GET")
	router.HandleFunc("/detail", templateHandler("single.html")).Methods("GET")
	router.HandleFunc("/diff", templateHandler("diff.html")).Methods("GET")
	router.HandleFunc("/help", templateHandler("help.html")).Methods("GET")
	router.HandleFunc("/ignores", templateHandler("ignores.html")).Methods("GET")
	router.HandleFunc("/loginstatus/", login.StatusHandler)
	router.HandleFunc("/logout/", login.LogoutHandler)
	router.HandleFunc("/search", templateHandler("search.html")).Methods("GET")
	router.HandleFunc("/triagelog", templateHandler("triagelog.html")).Methods("GET")
	router.HandleFunc("/trybot", templateHandler("trybot.html")).Methods("GET")
	router.HandleFunc("/failures", templateHandler("failures.html")).Methods("GET")

	// Add the necessary middleware and have the router handle all requests.
	// By structuring the middleware this way we only log requests that are
	// authenticated.
	rootHandler := util.LoggingGzipRequestResponse(router)
	if *forceLogin {
		rootHandler = login.ForceAuth(rootHandler, OAUTH2_CALLBACK_PATH)
	}

	// The polyStatusHandler is being polled, so we exclude it from logging.
	http.HandleFunc("/_/trstatus", polyStatusHandler)
	http.Handle("/", rootHandler)

	// Start the server
	glog.Infoln("Serving on http://127.0.0.1" + *port)
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Ejemplo n.º 19
0
func main() {
	defer common.LogPanic()
	// Setup DB flags.
	dbConf := idb.DBConfigFromFlags()

	common.InitWithMetrics("skiaperf", graphiteServer)
	Init()
	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	if err := dbConf.InitDB(); err != nil {
		glog.Fatal(err)
	}

	stats.Start(masterTileBuilder, git)
	alerting.Start(masterTileBuilder)

	var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		redirectURL = "https://perf.skia.org/oauth2callback/"
	}
	if err := login.InitFromMetadataOrJSON(redirectURL, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST); err != nil {
		glog.Fatalf("Failed to initialize the login system: %s", err)
	}

	// Resources are served directly.
	router := mux.NewRouter()

	router.PathPrefix("/res/").HandlerFunc(makeResourceHandler())

	router.HandleFunc("/", templateHandler("index.html"))
	router.HandleFunc("/frame/", templateHandler("frame.html"))
	router.HandleFunc("/shortcuts/", shortcutHandler)
	router.PathPrefix("/tiles/").HandlerFunc(tileHandler)
	router.PathPrefix("/single/").HandlerFunc(singleHandler)
	router.PathPrefix("/query/").HandlerFunc(queryHandler)
	router.HandleFunc("/commits/", commitsHandler)
	router.HandleFunc("/_/commits/", commitsJSONHandler)
	router.HandleFunc("/shortcommits/", shortCommitsHandler)
	router.HandleFunc("/trybots/", trybotHandler)
	router.HandleFunc("/clusters/", templateHandler("clusters.html"))
	router.HandleFunc("/clustering/", clusteringHandler)
	router.PathPrefix("/cl/").HandlerFunc(clHandler)
	router.PathPrefix("/activitylog/").HandlerFunc(activityHandler)
	router.HandleFunc("/alerts/", templateHandler("alerting.html"))
	router.HandleFunc("/alerting/", alertingHandler)
	router.HandleFunc("/alert_reset/", alertResetHandler)
	router.HandleFunc("/annotate/", annotate.Handler)
	router.HandleFunc("/compare/", templateHandler("compare.html"))
	router.HandleFunc("/kernel/", templateHandler("kernel.html"))
	router.HandleFunc("/_/kernel/", kernelJSONHandler)
	router.HandleFunc("/calc/", calcHandler)
	router.HandleFunc("/help/", helpHandler)
	router.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler)
	router.HandleFunc("/logout/", login.LogoutHandler)
	router.HandleFunc("/loginstatus/", login.StatusHandler)

	http.Handle("/", util.LoggingGzipRequestResponse(router))

	glog.Infoln("Ready to serve.")
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Ejemplo n.º 20
0
func main() {
	defer common.LogPanic()
	worker_common.Init()
	if !*worker_common.Local {
		defer util.CleanTmpDir()
	}
	defer util.TimeTrack(time.Now(), "Running Chromium Perf")
	defer glog.Flush()

	// Validate required arguments.
	if *chromiumBuildNoPatch == "" {
		glog.Error("Must specify --chromium_build_nopatch")
		return
	}
	if *chromiumBuildWithPatch == "" {
		glog.Error("Must specify --chromium_build_withpatch")
		return
	}
	if *runID == "" {
		glog.Error("Must specify --run_id")
		return
	}
	if *runIDNoPatch == "" {
		glog.Error("Must specify --run_id_nopatch")
		return
	}
	if *runIDWithPatch == "" {
		glog.Error("Must specify --run_id_withpatch")
		return
	}
	if *benchmarkName == "" {
		glog.Error("Must specify --benchmark_name")
		return
	}

	// Reset the local chromium checkout.
	if err := util.ResetCheckout(util.ChromiumSrcDir); err != nil {
		glog.Errorf("Could not reset %s: %s", util.ChromiumSrcDir, err)
		return
	}
	// Sync the local chromium checkout.
	if err := util.SyncDir(util.ChromiumSrcDir); err != nil {
		glog.Errorf("Could not gclient sync %s: %s", util.ChromiumSrcDir, err)
		return
	}

	// Create the task file so that the master knows this worker is still busy.
	skutil.LogErr(util.CreateTaskFile(util.ACTIVITY_RUNNING_CHROMIUM_PERF))
	defer util.DeleteTaskFile(util.ACTIVITY_RUNNING_CHROMIUM_PERF)

	if *targetPlatform == util.PLATFORM_ANDROID {
		if err := adb.VerifyLocalDevice(); err != nil {
			// Android device missing or offline.
			glog.Errorf("Could not find Android device: %s", err)
			return
		}
		// Kill adb server to make sure we start from a clean slate.
		skutil.LogErr(util.ExecuteCmd(util.BINARY_ADB, []string{"kill-server"}, []string{},
			util.ADB_ROOT_TIMEOUT, nil, nil))
		// Make sure adb shell is running as root.
		skutil.LogErr(util.ExecuteCmd(util.BINARY_ADB, []string{"root"}, []string{},
			util.ADB_ROOT_TIMEOUT, nil, nil))
	}

	// Instantiate GsUtil object.
	gs, err := util.NewGsUtil(nil)
	if err != nil {
		glog.Error(err)
		return
	}

	// Download the benchmark patch for this run from Google storage.
	benchmarkPatchName := *runID + ".benchmark.patch"
	benchmarkPatchLocalPath := filepath.Join(os.TempDir(), benchmarkPatchName)
	remoteDir := filepath.Join(util.ChromiumPerfRunsDir, *runID)
	benchmarkPatchRemotePath := filepath.Join(remoteDir, benchmarkPatchName)
	respBody, err := gs.GetRemoteFileContents(benchmarkPatchRemotePath)
	if err != nil {
		glog.Errorf("Could not fetch %s: %s", benchmarkPatchRemotePath, err)
		return
	}
	defer skutil.Close(respBody)
	buf := new(bytes.Buffer)
	if _, err := buf.ReadFrom(respBody); err != nil {
		glog.Errorf("Could not read from %s: %s", benchmarkPatchRemotePath, err)
		return
	}
	if err := ioutil.WriteFile(benchmarkPatchLocalPath, buf.Bytes(), 0666); err != nil {
		glog.Errorf("Unable to create file %s: %s", benchmarkPatchLocalPath, err)
		return
	}
	defer skutil.Remove(benchmarkPatchLocalPath)
	// Apply benchmark patch to the local chromium checkout.
	if buf.Len() > 10 {
		if err := util.ApplyPatch(benchmarkPatchLocalPath, util.ChromiumSrcDir); err != nil {
			glog.Errorf("Could not apply Telemetry's patch in %s: %s", util.ChromiumSrcDir, err)
			return
		}
	}

	// Download the specified chromium builds.
	for _, chromiumBuild := range []string{*chromiumBuildNoPatch, *chromiumBuildWithPatch} {
		if err := gs.DownloadChromiumBuild(chromiumBuild); err != nil {
			glog.Error(err)
			return
		}
		//Delete the chromium build to save space when we are done.
		defer skutil.RemoveAll(filepath.Join(util.ChromiumBuildsDir, chromiumBuild))
	}

	chromiumBinaryNoPatch := filepath.Join(util.ChromiumBuildsDir, *chromiumBuildNoPatch, util.BINARY_CHROME)
	chromiumBinaryWithPatch := filepath.Join(util.ChromiumBuildsDir, *chromiumBuildWithPatch, util.BINARY_CHROME)

	// Download pagesets if they do not exist locally.
	if err := gs.DownloadWorkerArtifacts(util.PAGESETS_DIR_NAME, *pagesetType, *workerNum); err != nil {
		glog.Error(err)
		return
	}
	pathToPagesets := filepath.Join(util.PagesetsDir, *pagesetType)

	// Download archives if they do not exist locally.
	if err := gs.DownloadWorkerArtifacts(util.WEB_ARCHIVES_DIR_NAME, *pagesetType, *workerNum); err != nil {
		glog.Error(err)
		return
	}

	// Establish nopatch output paths.
	localOutputDirNoPatch := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, *runIDNoPatch)
	skutil.RemoveAll(localOutputDirNoPatch)
	skutil.MkdirAll(localOutputDirNoPatch, 0700)
	defer skutil.RemoveAll(localOutputDirNoPatch)
	remoteDirNoPatch := filepath.Join(util.BenchmarkRunsDir, *runIDNoPatch)

	// Establish withpatch output paths.
	localOutputDirWithPatch := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, *runIDWithPatch)
	skutil.RemoveAll(localOutputDirWithPatch)
	skutil.MkdirAll(localOutputDirWithPatch, 0700)
	defer skutil.RemoveAll(localOutputDirWithPatch)
	remoteDirWithPatch := filepath.Join(util.BenchmarkRunsDir, *runIDWithPatch)

	// Construct path to the ct_run_benchmark python script.
	_, currentFile, _, _ := runtime.Caller(0)
	pathToPyFiles := filepath.Join(
		filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))),
		"py")

	fileInfos, err := ioutil.ReadDir(pathToPagesets)
	if err != nil {
		glog.Errorf("Unable to read the pagesets dir %s: %s", pathToPagesets, err)
		return
	}

	numWorkers := WORKER_POOL_SIZE
	if *targetPlatform == util.PLATFORM_ANDROID || !*runInParallel {
		// Do not run page sets in parallel if the target platform is Android.
		// This is because the nopatch/withpatch APK needs to be installed prior to
		// each run and this will interfere with the parallel runs. Instead of trying
		// to find a complicated solution to this, it makes sense for Android to
		// continue to be serial because it will help guard against
		// crashes/flakiness/inconsistencies which are more prevalent in mobile runs.
		numWorkers = 1
		glog.Infoln("===== Going to run the task serially =====")
	} else {
		glog.Infoln("===== Going to run the task with parallel chrome processes =====")
	}

	// Create channel that contains all pageset file names. This channel will
	// be consumed by the worker pool.
	pagesetRequests := util.GetClosedChannelOfPagesets(fileInfos)

	var wg sync.WaitGroup
	// Use a RWMutex for the chromeProcessesCleaner goroutine to communicate to
	// the workers (acting as "readers") when it wants to be the "writer" and
	// kill all zombie chrome processes.
	var mutex sync.RWMutex

	// Loop through workers in the worker pool.
	for i := 0; i < numWorkers; i++ {
		// Increment the WaitGroup counter.
		wg.Add(1)

		// Create and run a goroutine closure that captures SKPs.
		go func() {
			// Decrement the WaitGroup counter when the goroutine completes.
			defer wg.Done()

			for pagesetName := range pagesetRequests {

				mutex.RLock()

				if err := runBenchmark(pagesetName, pathToPagesets, pathToPyFiles, localOutputDirNoPatch, *chromiumBuildNoPatch, chromiumBinaryNoPatch, *runIDNoPatch, *browserExtraArgsNoPatch); err != nil {
					glog.Errorf("Error while running nopatch benchmark: %s", err)
					return
				}
				if err := runBenchmark(pagesetName, pathToPagesets, pathToPyFiles, localOutputDirWithPatch, *chromiumBuildWithPatch, chromiumBinaryWithPatch, *runIDWithPatch, *browserExtraArgsWithPatch); err != nil {
					glog.Errorf("Error while running withpatch benchmark: %s", err)
					return
				}
				mutex.RUnlock()
			}
		}()
	}

	if !*worker_common.Local {
		// Start the cleaner.
		go util.ChromeProcessesCleaner(&mutex, *chromeCleanerTimer)
	}

	// Wait for all spawned goroutines to complete.
	wg.Wait()

	// If "--output-format=csv-pivot-table" was specified then merge all CSV files and upload.
	if strings.Contains(*benchmarkExtraArgs, "--output-format=csv-pivot-table") {
		if err := mergeUploadCSVFiles(localOutputDirNoPatch, pathToPyFiles, *runIDNoPatch, remoteDirNoPatch, gs); err != nil {
			glog.Errorf("Error while processing nopatch CSV files: %s", err)
			return
		}
		if err := mergeUploadCSVFiles(localOutputDirWithPatch, pathToPyFiles, *runIDWithPatch, remoteDirWithPatch, gs); err != nil {
			glog.Errorf("Error while processing withpatch CSV files: %s", err)
			return
		}
	}
}
Ejemplo n.º 21
0
func main() {
	// Setup DB flags.
	dbConf := db.DBConfigFromFlags()

	common.InitWithMetrics("skiaperf", graphiteServer)
	Init()
	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	if err := dbConf.InitDB(); err != nil {
		glog.Fatal(err)
	}

	stats.Start(nanoTileStore, git)
	alerting.Start(nanoTileStore, *apikey)

	// By default use a set of credentials setup for localhost access.
	var cookieSalt = "notverysecret"
	var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com"
	var clientSecret = "cw0IosPu4yjaG2KWmppj2guj"
	var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
		redirectURL = "https://perf.skia.org/oauth2callback/"
	}
	login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST, *local)
	glog.Infoln("Begin loading data.")

	// Resources are served directly.
	router := mux.NewRouter()

	router.PathPrefix("/res/").HandlerFunc(makeResourceHandler())

	router.HandleFunc("/", mainHandler)
	router.HandleFunc("/shortcuts/", shortcutHandler)
	router.PathPrefix("/tiles/").HandlerFunc(tileHandler)
	router.PathPrefix("/single/").HandlerFunc(singleHandler)
	router.PathPrefix("/query/").HandlerFunc(queryHandler)
	router.HandleFunc("/commits/", commitsHandler)
	router.HandleFunc("/shortcommits/", shortCommitsHandler)
	router.HandleFunc("/trybots/", trybotHandler)
	router.HandleFunc("/clusters/", clustersHandler)
	router.HandleFunc("/clustering/", clusteringHandler)
	router.PathPrefix("/cl/").HandlerFunc(clHandler)
	router.PathPrefix("/activitylog/").HandlerFunc(activityHandler)
	router.HandleFunc("/alerts/", alertsHandler)
	router.HandleFunc("/alerting/", alertingHandler)
	router.HandleFunc("/alert_reset/", alertResetHandler)
	router.HandleFunc("/annotate/", annotate.Handler)
	router.HandleFunc("/compare/", compareHandler)
	router.HandleFunc("/calc/", calcHandler)
	router.HandleFunc("/help/", helpHandler)
	router.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler)
	router.HandleFunc("/logout/", login.LogoutHandler)
	router.HandleFunc("/loginstatus/", login.StatusHandler)

	http.Handle("/", util.LoggingGzipRequestResponse(router))

	glog.Infoln("Ready to serve.")
	glog.Fatal(http.ListenAndServe(*port, nil))
}