Ejemplo n.º 1
0
// downloadSkia uses git to clone Skia from googlesource.com and check it out to the specified version.
// Upon sucess, the SkiaVersion in config is set to be the current version and any dependencies
// needed to compile Skia have been installed (e.g. the latest version of gyp).
// It returns an error on failure.
func DownloadSkia(version, path string, v config.VersionSetter) error {
	glog.Infof("Cloning Skia version %s to %s", version, path)

	repo, err := gitinfo.CloneOrUpdate("https://skia.googlesource.com/skia", path, false)
	if err != nil {
		return fmt.Errorf("Failed cloning Skia: %s", err)
	}

	if err = repo.SetToCommit(version); err != nil {
		return fmt.Errorf("Problem setting Skia to version %s: %s", version, err)
	}

	syncCmd := &exec.Command{
		Name: "bin/sync-and-gyp",
		Dir:  path,
	}

	if err := exec.Run(syncCmd); err != nil {
		return fmt.Errorf("Failed syncing and setting up gyp: %s", err)
	}

	if v != nil {
		if lc, err := repo.Details(version, false); err != nil {
			glog.Errorf("Could not get git details for skia version %s: %s", version, err)
		} else {
			v.SetSkiaVersion(lc)
		}
	}
	return nil
}
Ejemplo n.º 2
0
func Init() {
	rand.Seed(time.Now().UnixNano())
	if *resourcesDir == "" {
		_, filename, _, _ := runtime.Caller(0)
		*resourcesDir = filepath.Join(filepath.Dir(filename), "../..")
	}

	loadTemplates()

	var err error
	git, err = gitinfo.CloneOrUpdate(*gitRepoURL, *gitRepoDir, false)
	if err != nil {
		glog.Fatal(err)
	}
	evt := eventbus.New(nil)
	tileStats = tilestats.New(evt)

	// Connect to traceDB and create the builders.
	db, err := tracedb.NewTraceServiceDBFromAddress(*traceservice, types.PerfTraceBuilder)
	if err != nil {
		glog.Fatalf("Failed to connect to tracedb: %s", err)
	}

	masterTileBuilder, err = tracedb.NewMasterTileBuilder(db, git, *tileSize, evt)
	if err != nil {
		glog.Fatalf("Failed to build trace/db.DB: %s", err)
	}
	branchTileBuilder = tracedb.NewBranchTileBuilder(db, git, rietveld.RIETVELD_SKIA_URL, evt)
}
Ejemplo n.º 3
0
// NewDefaultRepoManager returns a RepoManager instance which operates in the given
// working directory and updates at the given frequency.
func NewDefaultRepoManager(workdir string, frequency time.Duration, depot_tools string) (RepoManager, error) {
	chromiumParentDir := path.Join(workdir, "chromium")
	skiaDir := path.Join(workdir, "skia")
	skiaRepo, err := gitinfo.CloneOrUpdate(REPO_SKIA, skiaDir, true)
	if err != nil {
		return nil, err
	}
	gclient := GCLIENT
	rollDep := ROLL_DEP
	if depot_tools != "" {
		gclient = path.Join(depot_tools, gclient)
		rollDep = path.Join(depot_tools, rollDep)
	}

	r := &repoManager{
		chromiumDir:       path.Join(chromiumParentDir, "src"),
		chromiumParentDir: chromiumParentDir,
		depot_tools:       depot_tools,
		gclient:           gclient,
		rollDep:           rollDep,
		skiaDir:           skiaDir,
		skiaRepo:          skiaRepo,
	}
	if err := r.update(); err != nil {
		return nil, err
	}
	go func() {
		for _ = range time.Tick(frequency) {
			util.LogErr(r.update())
		}
	}()
	return r, nil
}
Ejemplo n.º 4
0
func Init() {
	rand.Seed(time.Now().UnixNano())
	if *resourcesDir == "" {
		_, filename, _, _ := runtime.Caller(0)
		*resourcesDir = filepath.Join(filepath.Dir(filename), "../..")
	}

	indexTemplate = template.Must(template.ParseFiles(
		filepath.Join(*resourcesDir, "templates/index.html"),
		filepath.Join(*resourcesDir, "templates/titlebar.html"),
		filepath.Join(*resourcesDir, "templates/header.html"),
	))
	clusterTemplate = template.Must(template.ParseFiles(
		filepath.Join(*resourcesDir, "templates/clusters.html"),
		filepath.Join(*resourcesDir, "templates/titlebar.html"),
		filepath.Join(*resourcesDir, "templates/header.html"),
	))
	alertsTemplate = template.Must(template.ParseFiles(
		filepath.Join(*resourcesDir, "templates/alerting.html"),
		filepath.Join(*resourcesDir, "templates/titlebar.html"),
		filepath.Join(*resourcesDir, "templates/header.html"),
	))
	clTemplate = template.Must(template.ParseFiles(
		filepath.Join(*resourcesDir, "templates/cl.html"),
		filepath.Join(*resourcesDir, "templates/titlebar.html"),
		filepath.Join(*resourcesDir, "templates/header.html"),
	))
	activityTemplate = template.Must(template.ParseFiles(
		filepath.Join(*resourcesDir, "templates/activitylog.html"),
		filepath.Join(*resourcesDir, "templates/titlebar.html"),
		filepath.Join(*resourcesDir, "templates/header.html"),
	))
	compareTemplate = template.Must(template.ParseFiles(
		filepath.Join(*resourcesDir, "templates/compare.html"),
		filepath.Join(*resourcesDir, "templates/titlebar.html"),
		filepath.Join(*resourcesDir, "templates/header.html"),
	))
	helpTemplate = template.Must(template.ParseFiles(
		filepath.Join(*resourcesDir, "templates/help.html"),
		filepath.Join(*resourcesDir, "templates/titlebar.html"),
		filepath.Join(*resourcesDir, "templates/header.html"),
	))

	nanoTileStore = filetilestore.NewFileTileStore(*tileStoreDir, config.DATASET_NANO, 2*time.Minute)

	var err error
	git, err = gitinfo.CloneOrUpdate(*gitRepoURL, *gitRepoDir, false)
	if err != nil {
		glog.Fatal(err)
	}
}
Ejemplo n.º 5
0
// IngestersFromConfig creates a list of ingesters from a config struct.
// Usually the struct is created from parsing a config file.
func IngestersFromConfig(config *sharedconfig.Config, client *http.Client) ([]*Ingester, error) {
	ret := []*Ingester{}

	// Set up the gitinfo object.
	var vcs vcsinfo.VCS
	var err error
	if vcs, err = gitinfo.CloneOrUpdate(config.GitRepoURL, config.GitRepoDir, true); err != nil {
		return nil, err
	}

	// Set up the Google storage client.
	if client == nil {
		client = &http.Client{
			Transport: util.NewBackOffTransport(),
		}
	}

	// for each defined ingester create an instance.
	for id, ingesterConf := range config.Ingesters {
		processorConstructor, ok := constructors[id]
		if !ok {
			return nil, fmt.Errorf("Unknow ingester: '%s'", id)
		}

		// Instantiate the sources
		sources := make([]Source, 0, len(ingesterConf.Sources))
		for _, dataSource := range ingesterConf.Sources {
			oneSource, err := getSource(id, dataSource, client)
			if err != nil {
				return nil, fmt.Errorf("Error instantiating sources for ingester '%s': %s", id, err)
			}
			sources = append(sources, oneSource)
		}

		// instantiate the processor
		processor, err := processorConstructor(vcs, ingesterConf)
		if err != nil {
			return nil, err
		}

		// create the ingester and add it to the result.
		ingester, err := NewIngester(id, ingesterConf, vcs, sources, processor)
		if err != nil {
			return nil, err
		}
		ret = append(ret, ingester)
	}

	return ret, nil
}
Ejemplo n.º 6
0
// newDocSet does the core of the work for both NewDocSet and NewDocSetForIssue.
//
// The repo is checked out into repoDir.
// If a valid issue and patchset are supplied then the repo will be patched with that CL.
// If refresh is true then the git repo will be periodically refreshed (git pull).
func newDocSet(repoDir, repo string, issue, patchset int64, refresh bool) (*DocSet, error) {
	if issue > 0 {
		issueInfo, err := rc.Issue(issue)
		if err != nil {
			err := fmt.Errorf("Failed to retrieve issue status %d: %s", issue, err)
			glog.Error(err)
			return nil, err
		}
		if issueInfo.Closed {
			return nil, fmt.Errorf("Issue %d is closed.", issue)
		}
	}
	git, err := gitinfo.CloneOrUpdate(repo, repoDir, false)
	if err != nil {
		glog.Fatalf("Failed to CloneOrUpdate repo %q: %s", repo, err)
	}
	if issue > 0 {
		cmd := exec.Command("patch", "-p1")
		if _, err := cmd.StdinPipe(); err != nil {
			return nil, err
		}
		cmd.Dir = repoDir
		diff, err := rc.Patchset(issue, patchset)
		if err != nil {
			return nil, fmt.Errorf("Failed to retrieve patchset: %s", err)
		}
		cmd.Stdin = diff
		defer util.Close(diff)
		b, err := cmd.CombinedOutput()
		if err != nil {
			return nil, fmt.Errorf("Error while patching %#v - %s: %s", *cmd, string(b), err)
		}
	}
	d := &DocSet{
		repoDir: repoDir,
	}
	d.BuildNavigation()
	if refresh {
		go func() {
			for _ = range time.Tick(config.REFRESH) {
				util.LogErr(git.Update(true, false))
				d.BuildNavigation()
			}
		}()
	}
	return d, nil
}
Ejemplo n.º 7
0
func Init() {
	rand.Seed(time.Now().UnixNano())
	if *resourcesDir == "" {
		_, filename, _, _ := runtime.Caller(0)
		*resourcesDir = filepath.Join(filepath.Dir(filename), "../..")
	}

	loadTemplates()

	var err error
	git, err = gitinfo.CloneOrUpdate(*gitRepoURL, *gitRepoDir, false)
	if err != nil {
		glog.Fatal(err)
	}
	nanoTileStore, err = perftracedb.NewBuilder(git, *traceservice, config.INITIAL_TILE_SIZE, types.PerfTraceBuilder)
	if err != nil {
		glog.Fatalf("Failed to build trace/db.DB: %s", err)
	}
}
Ejemplo n.º 8
0
// GetTileBuilderFromEnv looks at the TEST_TRACEDB_ADDRESS environement variable for the
// name of directory that contains tiles. If it's defined it will return a
// TileStore instance. If the not the calling test will fail.
func GetTileBuilderFromEnv(t assert.TestingT) tracedb.MasterTileBuilder {
	traceDBAddress := os.Getenv("TEST_TRACEDB_ADDRESS")
	assert.NotEqual(t, "", traceDBAddress, "Please define the TEST_TRACEDB_ADDRESS environment variable to point to the traceDB.")

	gitURL := os.Getenv("TEST_GIT_URL")
	assert.NotEqual(t, "", traceDBAddress, "Please define the TEST_TRACEDB_ADDRESS environment variable to point to the Git URL.")

	gitRepoDir, err := ioutil.TempDir("", "gitrepo")
	assert.Nil(t, err)

	git, err := gitinfo.CloneOrUpdate(gitURL, gitRepoDir, false)
	if err != nil {
		glog.Fatal(err)
	}

	eventBus := eventbus.New(nil)
	db, err := tracedb.NewTraceServiceDBFromAddress(traceDBAddress, types.GoldenTraceBuilder)
	assert.Nil(t, err)

	tileBuilder, err := tracedb.NewMasterTileBuilder(db, git, 50, eventBus)
	assert.Nil(t, err)
	return tileBuilder
}
Ejemplo n.º 9
0
func main() {
	defer common.LogPanic()
	// Setup flags.
	influxdb.SetupFlags()

	common.InitWithMetrics("status", graphiteServer)
	v, err := skiaversion.GetVersion()
	if err != nil {
		glog.Fatal(err)
	}
	glog.Infof("Version %s, built at %s", v.Commit, v.Date)

	Init()
	if *testing {
		*useMetadata = false
	}
	serverURL := "https://" + *host
	if *testing {
		serverURL = "http://" + *host + *port
	}

	// Create buildbot remote DB.
	db, err = buildbot.NewRemoteDB(*buildbotDbHost)
	if err != nil {
		glog.Fatal(err)
	}

	// Setup InfluxDB client.
	dbClient, err = influxdb.NewClientFromFlagsAndMetadata(*testing)
	if err != nil {
		glog.Fatal(err)
	}

	// By default use a set of credentials setup for localhost access.
	var cookieSalt = "notverysecret"
	var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com"
	var clientSecret = "cw0IosPu4yjaG2KWmppj2guj"
	var redirectURL = serverURL + "/oauth2callback/"
	if *useMetadata {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
	}
	login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST, false)

	// Check out source code.
	skiaRepo, err := gitinfo.CloneOrUpdate("https://skia.googlesource.com/skia.git", path.Join(*workdir, "skia"), true)
	if err != nil {
		glog.Fatalf("Failed to check out Skia: %v", err)
	}

	infraRepoPath := path.Join(*workdir, "infra")
	infraRepo, err := gitinfo.CloneOrUpdate("https://skia.googlesource.com/buildbot.git", infraRepoPath, true)
	if err != nil {
		glog.Fatalf("Failed to checkout Infra: %v", err)
	}

	glog.Info("CloneOrUpdate complete")

	// Create the commit caches.
	commitCaches = map[string]*commit_cache.CommitCache{}
	skiaCache, err := commit_cache.New(skiaRepo, path.Join(*workdir, "commit_cache.gob"), DEFAULT_COMMITS_TO_LOAD, db)
	if err != nil {
		glog.Fatalf("Failed to create commit cache: %v", err)
	}
	commitCaches[SKIA_REPO] = skiaCache

	infraCache, err := commit_cache.New(infraRepo, path.Join(*workdir, "commit_cache_infra.gob"), DEFAULT_COMMITS_TO_LOAD, db)
	if err != nil {
		glog.Fatalf("Failed to create commit cache: %v", err)
	}
	commitCaches[INFRA_REPO] = infraCache
	glog.Info("commit_cache complete")

	// Load Perf and Gold data in a loop.
	perfStatus, err = dbClient.Int64PollingStatus("select value from \"alerting.new.value\" where app='skiaperf' and host='skia-perf' order by time desc limit 1", time.Minute)
	if err != nil {
		glog.Fatalf("Failed to create polling Perf status: %v", err)
	}
	goldGMStatus, err = dbClient.Int64PollingStatus(fmt.Sprintf(GOLD_STATUS_QUERY_TMPL, "gm"), time.Minute)
	if err != nil {
		glog.Fatalf("Failed to create polling Gold status: %v", err)
	}
	goldImageStatus, err = dbClient.Int64PollingStatus(fmt.Sprintf(GOLD_STATUS_QUERY_TMPL, "image"), time.Minute)
	if err != nil {
		glog.Fatalf("Failed to create polling Gold status: %v", err)
	}

	// Load slave_hosts_cfg and device cfgs in a loop.
	slaveHosts, err = buildbot.SlaveHostsCfgPoller(infraRepoPath)
	if err != nil {
		glog.Fatal(err)
	}
	androidDevices, err = device_cfg.AndroidDeviceCfgPoller(*workdir)
	if err != nil {
		glog.Fatal(err)
	}
	sshDevices, err = device_cfg.SSHDeviceCfgPoller(*workdir)
	if err != nil {
		glog.Fatal(err)
	}

	runServer(serverURL)
}
Ejemplo n.º 10
0
func main() {
	defer common.LogPanic()
	// Setup flags.
	dbConf := buildbot.DBConfigFromFlags()
	influxdb.SetupFlags()

	// Global init to initialize glog and parse arguments.
	common.InitWithMetrics("datahopper", graphiteServer)

	// Initialize the buildbot database.
	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	if err := dbConf.InitDB(); err != nil {
		glog.Fatal(err)
	}

	// Shared repo objects.
	skiaRepo, err := gitinfo.CloneOrUpdate(SKIA_REPO, path.Join(*workdir, "datahopper_skia"), true)
	if err != nil {
		glog.Fatal(err)
	}
	infraRepo, err := gitinfo.CloneOrUpdate(INFRA_REPO, path.Join(*workdir, "datahopper_infra"), true)
	if err != nil {
		glog.Fatal(err)
	}
	go func() {
		for _ = range time.Tick(5 * time.Minute) {
			if err := skiaRepo.Update(true, true); err != nil {
				glog.Errorf("Failed to sync Skia repo: %v", err)
			}
			if err := infraRepo.Update(true, true); err != nil {
				glog.Errorf("Failed to sync Infra repo: %v", err)
			}
		}
	}()

	// Data generation goroutines.

	// Buildbot data ingestion.
	go buildbot.IngestNewBuildsLoop(*workdir)

	// Measure buildbot data ingestion progress.
	totalGuage := metrics.GetOrRegisterGauge("buildbot.builds.total", metrics.DefaultRegistry)
	ingestGuage := metrics.GetOrRegisterGauge("buildbot.builds.ingested", metrics.DefaultRegistry)
	go func() {
		for _ = range time.Tick(common.SAMPLE_PERIOD) {
			totalBuilds, err := buildbot.NumTotalBuilds()
			if err != nil {
				glog.Error(err)
				continue
			}
			ingestedBuilds, err := buildbot.NumIngestedBuilds()
			if err != nil {
				glog.Error(err)
				continue
			}
			totalGuage.Update(int64(totalBuilds))
			ingestGuage.Update(int64(ingestedBuilds))
		}
	}()

	// Average duration of buildsteps over a time period.
	go func() {
		period := 24 * time.Hour
		type stepData struct {
			Name     string  `db:"name"`
			Duration float64 `db:"duration"`
		}
		stmt, err := buildbot.DB.Preparex(fmt.Sprintf("SELECT name, AVG(finished-started) AS duration FROM %s WHERE started > ? AND finished > started GROUP BY name ORDER BY duration;", buildbot.TABLE_BUILD_STEPS))
		if err != nil {
			glog.Fatalf("Failed to prepare buildbot database query: %v", err)
		}
		defer util.Close(stmt)
		for _ = range time.Tick(common.SAMPLE_PERIOD) {
			glog.Info("Loading buildstep duration data.")
			t := time.Now().UTC().Add(-period).Unix()
			steps := []stepData{}
			if err := stmt.Select(&steps, t); err != nil {
				glog.Error(err)
				continue
			}
			for _, s := range steps {
				v := int64(s.Duration * float64(time.Millisecond))
				metric := fmt.Sprintf("buildbot.buildsteps.%s.duration", fixName(s.Name))
				metrics.GetOrRegisterGauge(metric, metrics.DefaultRegistry).Update(v)
			}
		}
	}()

	// Average duration of builds over a time period.
	go func() {
		period := 24 * time.Hour
		type buildData struct {
			Builder  string  `db:"builder"`
			Duration float64 `db:"duration"`
		}
		stmt, err := buildbot.DB.Preparex(fmt.Sprintf("SELECT builder, AVG(finished-started) AS duration FROM %s WHERE started > ? AND finished > started GROUP BY builder ORDER BY duration;", buildbot.TABLE_BUILDS))
		if err != nil {
			glog.Fatalf("Failed to prepare buildbot database query: %v", err)
		}
		defer util.Close(stmt)
		for _ = range time.Tick(common.SAMPLE_PERIOD) {
			glog.Info("Loading build duration data.")
			t := time.Now().UTC().Add(-period).Unix()
			builds := []buildData{}
			if err := stmt.Select(&builds, t); err != nil {
				glog.Error(err)
				continue
			}
			for _, s := range builds {
				v := int64(s.Duration * float64(time.Millisecond))
				metric := fmt.Sprintf("buildbot.builds.%s.duration", fixName(s.Builder))
				metrics.GetOrRegisterGauge(metric, metrics.DefaultRegistry).Update(v)
			}
		}
	}()

	// Average build step time broken down by builder.
	go func() {
		period := 24 * time.Hour
		type stepData struct {
			Builder  string  `db:"builder"`
			StepName string  `db:"stepName"`
			Duration float64 `db:"duration"`
		}
		stmt, err := buildbot.DB.Preparex(fmt.Sprintf("SELECT b.builder as builder, s.name as stepName, AVG(s.finished-s.started) AS duration FROM %s s INNER JOIN %s b ON (s.buildId = b.id) WHERE s.started > ? AND s.finished > s.started GROUP BY b.builder, s.name ORDER BY b.builder, duration;", buildbot.TABLE_BUILD_STEPS, buildbot.TABLE_BUILDS))
		if err != nil {
			glog.Fatalf("Failed to prepare buildbot database query: %v", err)
		}
		defer util.Close(stmt)
		for _ = range time.Tick(common.SAMPLE_PERIOD) {
			glog.Info("Loading per-builder buildstep duration data.")
			t := time.Now().UTC().Add(-period).Unix()
			steps := []stepData{}
			if err := stmt.Select(&steps, t); err != nil {
				glog.Error(err)
				continue
			}
			for _, s := range steps {
				v := int64(s.Duration * float64(time.Millisecond))
				metric := fmt.Sprintf("buildbot.buildstepsbybuilder.%s.%s.duration", fixName(s.Builder), fixName(s.StepName))
				metrics.GetOrRegisterGauge(metric, metrics.DefaultRegistry).Update(v)
			}
		}
	}()

	// Number of commits in the repo.
	go func() {
		skiaGauge := metrics.GetOrRegisterGauge("repo.skia.commits", metrics.DefaultRegistry)
		infraGauge := metrics.GetOrRegisterGauge("repo.infra.commits", metrics.DefaultRegistry)
		for _ = range time.Tick(5 * time.Minute) {
			skiaGauge.Update(int64(skiaRepo.NumCommits()))
			infraGauge.Update(int64(infraRepo.NumCommits()))
		}
	}()

	// Wait while the above goroutines generate data.
	select {}
}
Ejemplo n.º 11
0
func main() {
	defer common.LogPanic()

	// Global init to initialize glog and parse arguments.
	common.InitWithMetrics("datahopper", graphiteServer)

	// Shared repo objects.
	skiaRepo, err := gitinfo.CloneOrUpdate(SKIA_REPO, path.Join(*workdir, "datahopper_skia"), true)
	if err != nil {
		glog.Fatal(err)
	}
	infraRepo, err := gitinfo.CloneOrUpdate(INFRA_REPO, path.Join(*workdir, "datahopper_infra"), true)
	if err != nil {
		glog.Fatal(err)
	}
	go func() {
		for _ = range time.Tick(5 * time.Minute) {
			if err := skiaRepo.Update(true, true); err != nil {
				glog.Errorf("Failed to sync Skia repo: %v", err)
			}
			if err := infraRepo.Update(true, true); err != nil {
				glog.Errorf("Failed to sync Infra repo: %v", err)
			}
		}
	}()

	// Data generation goroutines.
	db, err := buildbot.NewLocalDB(path.Join(*workdir, "buildbot.db"))
	if err != nil {
		glog.Fatal(err)
	}

	// Buildbot data ingestion.
	if err := buildbot.IngestNewBuildsLoop(db, *workdir); err != nil {
		glog.Fatal(err)
	}

	// Run a server for the buildbot data.
	if err := buildbot.RunBuildServer(*grpcPort, db); err != nil {
		glog.Fatal(err)
	}

	// Measure buildbot data ingestion progress.
	totalGuage := go_metrics.GetOrRegisterGauge("buildbot.builds.total", go_metrics.DefaultRegistry)
	ingestGuage := go_metrics.GetOrRegisterGauge("buildbot.builds.ingested", go_metrics.DefaultRegistry)
	go func() {
		for _ = range time.Tick(common.SAMPLE_PERIOD) {
			totalBuilds, err := buildbot.NumTotalBuilds()
			if err != nil {
				glog.Error(err)
				continue
			}
			ingestedBuilds, err := db.NumIngestedBuilds()
			if err != nil {
				glog.Error(err)
				continue
			}
			totalGuage.Update(int64(totalBuilds))
			ingestGuage.Update(int64(ingestedBuilds))
		}
	}()

	// Average build and step time.
	go func() {
		period := 24 * time.Hour
		for _ = range time.Tick(10 * time.Minute) {
			glog.Info("Loading build and buildstep duration data.")
			end := time.Now().UTC()
			start := end.Add(-period)
			builds, err := db.GetBuildsFromDateRange(start, end)
			if err != nil {
				glog.Errorf("Failed to obtain build and buildstep duration data: %s", err)
				continue
			}
			for _, b := range builds {
				if !b.IsFinished() {
					continue
				}
				// Report build time.
				// app.host.measurement.measurement.builder.measurement*
				d := b.Finished.Sub(b.Started)
				metric := fmt.Sprintf("buildbot.builds.%s.duration", fixName(b.Builder))
				metrics.GetOrRegisterSlidingWindow(metric, metrics.DEFAULT_WINDOW).Update(int64(d))
				for _, s := range b.Steps {
					if !s.IsFinished() {
						continue
					}
					// app.host.measurement.measurement.builder.step.measurement*
					d := s.Finished.Sub(s.Started)
					metric := fmt.Sprintf("buildbot.buildstepsbybuilder.%s.%s.duration", fixName(b.Builder), fixName(s.Name))
					metrics.GetOrRegisterSlidingWindow(metric, metrics.DEFAULT_WINDOW).Update(int64(d))
				}
			}
		}
	}()

	// Number of commits in the repo.
	go func() {
		skiaGauge := go_metrics.GetOrRegisterGauge("repo.skia.commits", go_metrics.DefaultRegistry)
		infraGauge := go_metrics.GetOrRegisterGauge("repo.infra.commits", go_metrics.DefaultRegistry)
		for _ = range time.Tick(5 * time.Minute) {
			skiaGauge.Update(int64(skiaRepo.NumCommits()))
			infraGauge.Update(int64(infraRepo.NumCommits()))
		}
	}()

	// Run a backup server.
	go func() {
		glog.Fatal(buildbot.RunBackupServer(db, *httpPort))
	}()

	// Wait while the above goroutines generate data.
	select {}
}
Ejemplo n.º 12
0
func main() {
	defer common.LogPanic()
	var err error

	mainTimer := timer.New("main init")
	// Setup DB flags.
	dbConf := database.ConfigFromFlags(db.PROD_DB_HOST, db.PROD_DB_PORT, database.USER_RW, db.PROD_DB_NAME, db.MigrationSteps())

	// Global init to initialize
	common.InitWithMetrics("skiacorrectness", graphiteServer)

	v, err := skiaversion.GetVersion()
	if err != nil {
		glog.Fatalf("Unable to retrieve version: %s", err)
	}
	glog.Infof("Version %s, built at %s", v.Commit, v.Date)

	// Enable the memory profiler if memProfile was set.
	// TODO(stephana): This should be moved to a HTTP endpoint that
	// only responds to internal IP addresses/ports.
	if *memProfile > 0 {
		time.AfterFunc(*memProfile, func() {
			glog.Infof("Writing Memory Profile")
			f, err := ioutil.TempFile("./", "memory-profile")
			if err != nil {
				glog.Fatalf("Unable to create memory profile file: %s", err)
			}
			if err := pprof.WriteHeapProfile(f); err != nil {
				glog.Fatalf("Unable to write memory profile file: %v", err)
			}
			util.Close(f)
			glog.Infof("Memory profile written to %s", f.Name())

			os.Exit(0)
		})
	}

	if *cpuProfile > 0 {
		glog.Infof("Writing CPU Profile")
		f, err := ioutil.TempFile("./", "cpu-profile")
		if err != nil {
			glog.Fatalf("Unable to create cpu profile file: %s", err)
		}

		if err := pprof.StartCPUProfile(f); err != nil {
			glog.Fatalf("Unable to write cpu profile file: %v", err)
		}
		time.AfterFunc(*cpuProfile, func() {
			pprof.StopCPUProfile()
			util.Close(f)
			glog.Infof("CPU profile written to %s", f.Name())
			os.Exit(0)
		})
	}

	// Init this module.
	Init()

	// Initialize submodules.
	filediffstore.Init()

	// Set up login
	// TODO (stephana): Factor out to go/login/login.go and removed hard coded
	// values.
	var cookieSalt = "notverysecret"
	var clientID = "31977622648-ubjke2f3staq6ouas64r31h8f8tcbiqp.apps.googleusercontent.com"
	var clientSecret = "rK-kRY71CXmcg0v9I9KIgWci"
	var useRedirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
		useRedirectURL = *redirectURL
	}
	login.Init(clientID, clientSecret, useRedirectURL, cookieSalt, login.DEFAULT_SCOPE, *authWhiteList, *local)

	// Get the client to be used to access GS and the Monorail issue tracker.
	client, err := auth.NewJWTServiceAccountClient("", *serviceAccountFile, nil, gstorage.CloudPlatformScope, "https://www.googleapis.com/auth/userinfo.email")
	if err != nil {
		glog.Fatalf("Failed to authenticate service account: %s", err)
	}

	// Set up the cache implementation to use.
	cacheFactory := filediffstore.MemCacheFactory
	if *redisHost != "" {
		cacheFactory = func(uniqueId string, codec util.LRUCodec) util.LRUCache {
			return redisutil.NewRedisLRUCache(*redisHost, *redisDB, uniqueId, codec)
		}
	}

	// Get the expecations storage, the filediff storage and the tilestore.
	diffStore, err := filediffstore.NewFileDiffStore(client, *imageDir, *gsBucketName, filediffstore.DEFAULT_GS_IMG_DIR_NAME, cacheFactory, filediffstore.RECOMMENDED_WORKER_POOL_SIZE)
	if err != nil {
		glog.Fatalf("Allocating DiffStore failed: %s", err)
	}

	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	vdb, err := dbConf.NewVersionedDB()
	if err != nil {
		glog.Fatal(err)
	}

	if !vdb.IsLatestVersion() {
		glog.Fatal("Wrong DB version. Please updated to latest version.")
	}

	digestStore, err := digeststore.New(*storageDir)
	if err != nil {
		glog.Fatal(err)
	}

	git, err := gitinfo.CloneOrUpdate(*gitRepoURL, *gitRepoDir, false)
	if err != nil {
		glog.Fatal(err)
	}

	evt := eventbus.New(nil)

	// Connect to traceDB and create the builders.
	db, err := tracedb.NewTraceServiceDBFromAddress(*traceservice, types.GoldenTraceBuilder)
	if err != nil {
		glog.Fatalf("Failed to connect to tracedb: %s", err)
	}

	masterTileBuilder, err := tracedb.NewMasterTileBuilder(db, git, *nCommits, evt)
	if err != nil {
		glog.Fatalf("Failed to build trace/db.DB: %s", err)
	}
	branchTileBuilder := tracedb.NewBranchTileBuilder(db, git, rietveld.RIETVELD_SKIA_URL, evt)

	storages = &storage.Storage{
		DiffStore:         diffStore,
		ExpectationsStore: expstorage.NewCachingExpectationStore(expstorage.NewSQLExpectationStore(vdb), evt),
		IgnoreStore:       ignore.NewSQLIgnoreStore(vdb),
		MasterTileBuilder: masterTileBuilder,
		BranchTileBuilder: branchTileBuilder,
		DigestStore:       digestStore,
		NCommits:          *nCommits,
		EventBus:          evt,
		TrybotResults:     trybot.NewTrybotResultStorage(vdb),
		RietveldAPI:       rietveld.New(*rietveldURL, nil),
	}

	if err := history.Init(storages, *nTilesToBackfill); err != nil {
		glog.Fatalf("Unable to initialize history package: %s", err)
	}

	if blamer, err = blame.New(storages); err != nil {
		glog.Fatalf("Unable to create blamer: %s", err)
	}

	if err := ignore.Init(storages.IgnoreStore); err != nil {
		glog.Fatalf("Failed to start monitoring for expired ignore rules: %s", err)
	}

	// Enable the experimental features.
	tallies, err = tally.New(storages)
	if err != nil {
		glog.Fatalf("Failed to build tallies: %s", err)
	}

	paramsetSum = paramsets.New(tallies, storages)

	if !*local {
		*issueTrackerKey = metadata.Must(metadata.ProjectGet(metadata.APIKEY))
	}

	issueTracker = issues.NewMonorailIssueTracker(client)

	summaries, err = summary.New(storages, tallies, blamer)
	if err != nil {
		glog.Fatalf("Failed to build summary: %s", err)
	}

	statusWatcher, err = status.New(storages)
	if err != nil {
		glog.Fatalf("Failed to initialize status watcher: %s", err)
	}

	imgFS := NewURLAwareFileServer(*imageDir, IMAGE_URL_PREFIX)
	pathToURLConverter = imgFS.GetURL

	if err := warmer.Init(storages, summaries, tallies); err != nil {
		glog.Fatalf("Failed to initialize the warmer: %s", err)
	}
	mainTimer.Stop()

	// Everything is wired up at this point. Run the self tests if requested.
	if *selfTest {
		search.SelfTest(storages, tallies, blamer, paramsetSum)
	}

	router := mux.NewRouter()

	// Set up the resource to serve the image files.
	router.PathPrefix(IMAGE_URL_PREFIX).Handler(imgFS.Handler)

	// New Polymer based UI endpoints.
	router.PathPrefix("/res/").HandlerFunc(makeResourceHandler())

	// TODO(stephana): Remove the 'poly' prefix from all the handlers and clean
	// up main2.go by either merging it it into main.go or making it clearer that
	// it contains all the handlers. Make it clearer what variables are shared
	// between the different file.

	// All the handlers will be prefixed with poly to differentiate it from the
	// angular code until the angular code is removed.
	router.HandleFunc(OAUTH2_CALLBACK_PATH, login.OAuth2CallbackHandler)
	router.HandleFunc("/", byBlameHandler).Methods("GET")
	router.HandleFunc("/list", templateHandler("list.html")).Methods("GET")
	router.HandleFunc("/_/details", polyDetailsHandler).Methods("GET")
	router.HandleFunc("/_/diff", polyDiffJSONDigestHandler).Methods("GET")
	router.HandleFunc("/_/hashes", polyAllHashesHandler).Methods("GET")
	router.HandleFunc("/_/ignores", polyIgnoresJSONHandler).Methods("GET")
	router.HandleFunc("/_/ignores/add/", polyIgnoresAddHandler).Methods("POST")
	router.HandleFunc("/_/ignores/del/{id}", polyIgnoresDeleteHandler).Methods("POST")
	router.HandleFunc("/_/ignores/save/{id}", polyIgnoresUpdateHandler).Methods("POST")
	router.HandleFunc("/_/list", polyListTestsHandler).Methods("GET")
	router.HandleFunc("/_/paramset", polyParamsHandler).Methods("GET")
	router.HandleFunc("/_/nxn", nxnJSONHandler).Methods("GET")

	// TODO(stephana): Once /_/search3 is stable it will replace /_/search and the
	// /search*.html pages will be consolidated into one.
	router.HandleFunc("/_/search", polySearchJSONHandler).Methods("GET")
	router.HandleFunc("/_/search3", search3JSONHandler).Methods("GET")

	router.HandleFunc("/_/status/{test}", polyTestStatusHandler).Methods("GET")
	router.HandleFunc("/_/test", polyTestHandler).Methods("POST")
	router.HandleFunc("/_/triage", polyTriageHandler).Methods("POST")
	router.HandleFunc("/_/triagelog", polyTriageLogHandler).Methods("GET")
	router.HandleFunc("/_/triagelog/undo", triageUndoHandler).Methods("POST")
	router.HandleFunc("/_/failure", failureListJSONHandler).Methods("GET")
	router.HandleFunc("/_/failure/clear", failureClearJSONHandler).Methods("POST")
	router.HandleFunc("/_/trybot", listTrybotsJSONHandler).Methods("GET")

	router.HandleFunc("/byblame", byBlameHandler).Methods("GET")
	router.HandleFunc("/cluster", templateHandler("cluster.html")).Methods("GET")
	router.HandleFunc("/search2", search2Handler).Methods("GET")
	router.HandleFunc("/cmp/{test}", templateHandler("compare.html")).Methods("GET")
	router.HandleFunc("/detail", templateHandler("single.html")).Methods("GET")
	router.HandleFunc("/diff", templateHandler("diff.html")).Methods("GET")
	router.HandleFunc("/help", templateHandler("help.html")).Methods("GET")
	router.HandleFunc("/ignores", templateHandler("ignores.html")).Methods("GET")
	router.HandleFunc("/loginstatus/", login.StatusHandler)
	router.HandleFunc("/logout/", login.LogoutHandler)
	router.HandleFunc("/search", templateHandler("search.html")).Methods("GET")
	router.HandleFunc("/triagelog", templateHandler("triagelog.html")).Methods("GET")
	router.HandleFunc("/trybot", templateHandler("trybot.html")).Methods("GET")
	router.HandleFunc("/failures", templateHandler("failures.html")).Methods("GET")

	// Add the necessary middleware and have the router handle all requests.
	// By structuring the middleware this way we only log requests that are
	// authenticated.
	rootHandler := util.LoggingGzipRequestResponse(router)
	if *forceLogin {
		rootHandler = login.ForceAuth(rootHandler, OAUTH2_CALLBACK_PATH)
	}

	// The polyStatusHandler is being polled, so we exclude it from logging.
	http.HandleFunc("/_/trstatus", polyStatusHandler)
	http.Handle("/", rootHandler)

	// Start the server
	glog.Infoln("Serving on http://127.0.0.1" + *port)
	glog.Fatal(http.ListenAndServe(*port, nil))
}