Ejemplo n.º 1
0
func Init() {
	rand.Seed(time.Now().UnixNano())
	if *resourcesDir == "" {
		_, filename, _, _ := runtime.Caller(0)
		*resourcesDir = filepath.Join(filepath.Dir(filename), "../..")
	}

	loadTemplates()

	var err error
	git, err = gitinfo.CloneOrUpdate(*gitRepoURL, *gitRepoDir, false)
	if err != nil {
		glog.Fatal(err)
	}
	evt := eventbus.New(nil)
	tileStats = tilestats.New(evt)

	// Connect to traceDB and create the builders.
	db, err := tracedb.NewTraceServiceDBFromAddress(*traceservice, types.PerfTraceBuilder)
	if err != nil {
		glog.Fatalf("Failed to connect to tracedb: %s", err)
	}

	masterTileBuilder, err = tracedb.NewMasterTileBuilder(db, git, *tileSize, evt)
	if err != nil {
		glog.Fatalf("Failed to build trace/db.DB: %s", err)
	}
	branchTileBuilder = tracedb.NewBranchTileBuilder(db, git, rietveld.RIETVELD_SKIA_URL, evt)
}
Ejemplo n.º 2
0
// newPerfProcessor implements the ingestion.Constructor signature.
func newPerfProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig, client *http.Client) (ingestion.Processor, error) {
	traceDB, err := tracedb.NewTraceServiceDBFromAddress(config.ExtraParams[CONFIG_TRACESERVICE], types.PerfTraceBuilder)
	if err != nil {
		return nil, err
	}

	return &perfProcessor{
		traceDB: traceDB,
		vcs:     vcs,
	}, nil
}
Ejemplo n.º 3
0
// newPerfTrybotProcessor implements the ingestion.Constructor signature.
func newPerfTrybotProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig) (ingestion.Processor, error) {
	traceDB, err := tracedb.NewTraceServiceDBFromAddress(config.ExtraParams[CONFIG_TRACESERVICE], types.PerfTraceBuilder)
	if err != nil {
		return nil, err
	}

	return &perfTrybotProcessor{
		traceDB: traceDB,
		review:  rietveld.New(CODE_REVIEW_URL, nil),
		cache:   map[string]time.Time{},
	}, nil
}
Ejemplo n.º 4
0
// implements the ingestion.Constructor signature.
func newGoldProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig) (ingestion.Processor, error) {
	traceDB, err := tracedb.NewTraceServiceDBFromAddress(config.ExtraParams[CONFIG_TRACESERVICE], types.GoldenTraceBuilder)
	if err != nil {
		return nil, err
	}

	ret := &goldProcessor{
		traceDB: traceDB,
		vcs:     vcs,
	}
	ret.extractID = ret.getCommitID
	return ret, nil
}
Ejemplo n.º 5
0
// GetTileBuilderFromEnv looks at the TEST_TRACEDB_ADDRESS environement variable for the
// name of directory that contains tiles. If it's defined it will return a
// TileStore instance. If the not the calling test will fail.
func GetTileBuilderFromEnv(t assert.TestingT) tracedb.MasterTileBuilder {
	traceDBAddress := os.Getenv("TEST_TRACEDB_ADDRESS")
	assert.NotEqual(t, "", traceDBAddress, "Please define the TEST_TRACEDB_ADDRESS environment variable to point to the traceDB.")

	gitURL := os.Getenv("TEST_GIT_URL")
	assert.NotEqual(t, "", traceDBAddress, "Please define the TEST_TRACEDB_ADDRESS environment variable to point to the Git URL.")

	gitRepoDir, err := ioutil.TempDir("", "gitrepo")
	assert.Nil(t, err)

	git, err := gitinfo.CloneOrUpdate(gitURL, gitRepoDir, false)
	if err != nil {
		glog.Fatal(err)
	}

	eventBus := eventbus.New(nil)
	db, err := tracedb.NewTraceServiceDBFromAddress(traceDBAddress, types.GoldenTraceBuilder)
	assert.Nil(t, err)

	tileBuilder, err := tracedb.NewMasterTileBuilder(db, git, 50, eventBus)
	assert.Nil(t, err)
	return tileBuilder
}
func TestNewTraceDBBuilder(t *testing.T) {
	defer cleanup()

	// First spin up a traceservice server that we wil talk to.
	server, err := traceservice.NewTraceServiceServer(FILENAME)
	if err != nil {
		t.Fatalf("Failed to initialize the tracestore server: %s", err)
	}

	// Start the server on an open port.
	lis, err := net.Listen("tcp", "localhost:0")
	if err != nil {
		t.Fatalf("failed to listen: %v", err)
	}
	port := lis.Addr().String()
	s := grpc.NewServer()
	traceservice.RegisterTraceServiceServer(s, server)
	go func() {
		t.Fatalf("Failed while serving: %s", s.Serve(lis))
	}()

	// Get a Git repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	// Set up a connection to the server.
	conn, err := grpc.Dial(port, grpc.WithInsecure())
	if err != nil {
		t.Fatalf("did not connect: %v", err)
	}
	ts, err := db.NewTraceServiceDB(conn, types.PerfTraceBuilder)
	if err != nil {
		t.Fatalf("Failed to create tracedb.DB: %s", err)
	}
	defer util.Close(ts)

	git, err := gitinfo.NewGitInfo(filepath.Join(tr.Dir, "testrepo"), false, false)
	if err != nil {
		t.Fatal(err)
	}
	hashes := git.LastN(50)
	assert.Equal(t, 50, len(hashes))

	// Populate the tracedb with some data.
	commitID := &db.CommitID{
		Timestamp: git.Timestamp(hashes[1]).Unix(),
		ID:        hashes[1],
		Source:    "master",
	}

	entries := map[string]*db.Entry{
		"key:8888:android": &db.Entry{
			Params: map[string]string{
				"config":   "8888",
				"platform": "android",
				"type":     "skp",
			},
			Value: types.BytesFromFloat64(0.01),
		},
		"key:gpu:win8": &db.Entry{
			Params: map[string]string{
				"config":   "gpu",
				"platform": "win8",
				"type":     "skp",
			},
			Value: types.BytesFromFloat64(1.234),
		},
	}

	err = ts.Add(commitID, entries)
	if err != nil {
		t.Fatalf("Failed to add data to traceservice: %s", err)
	}

	evt := eventbus.New(nil)
	traceDB, err := db.NewTraceServiceDBFromAddress(port, types.PerfTraceBuilder)
	assert.Nil(t, err)

	builder, err := db.NewMasterTileBuilder(traceDB, git, 50, evt)
	if err != nil {
		t.Fatalf("Failed to construct TraceStore: %s", err)
	}
	tile := builder.GetTile()
	assert.Equal(t, 50, len(tile.Commits))
	assert.Equal(t, 2, len(tile.Traces))
	assert.Equal(t, commitID.ID, tile.Commits[1].Hash)
	assert.Equal(t, "Joe Gregorio ([email protected])", tile.Commits[1].Author)

	ptrace := tile.Traces["key:8888:android"].(*types.PerfTrace)
	assert.Equal(t, 0.01, ptrace.Values[1])
	assert.Equal(t, config.MISSING_DATA_SENTINEL, ptrace.Values[0])

	ptrace = tile.Traces["key:gpu:win8"].(*types.PerfTrace)
	assert.Equal(t, 1.234, ptrace.Values[1])
	assert.Equal(t, config.MISSING_DATA_SENTINEL, ptrace.Values[0])

	// Now add more data to the tracestore, trigger LoadTile() and check the results.
	commitID = &db.CommitID{
		Timestamp: git.Timestamp(hashes[2]).Unix(),
		ID:        hashes[2],
		Source:    "master",
	}

	entries = map[string]*db.Entry{
		"key:8888:android": &db.Entry{
			Params: map[string]string{
				"config":   "8888",
				"platform": "android",
				"type":     "skp",
			},
			Value: types.BytesFromFloat64(0.02),
		},
		"key:565:ubuntu": &db.Entry{
			Params: map[string]string{
				"config":   "565",
				"platform": "ubuntu",
				"type":     "skp",
			},
			Value: types.BytesFromFloat64(2.345),
		},
	}

	err = ts.Add(commitID, entries)
	if err != nil {
		t.Fatalf("Failed to add data to traceservice: %s", err)
	}

	// Access the LoadTile method without exposing it in the MasterBuilder interface.
	type hasLoadTile interface {
		LoadTile() error
	}
	masterBuilder := builder.(hasLoadTile)
	if err := masterBuilder.LoadTile(); err != nil {
		t.Fatalf("Failed to force load Tile: %s", err)
	}

	tile = builder.GetTile()
	assert.Equal(t, 50, len(tile.Commits))
	assert.Equal(t, 3, len(tile.Traces))
	assert.Equal(t, commitID.ID, tile.Commits[2].Hash)

	ptrace = tile.Traces["key:8888:android"].(*types.PerfTrace)
	assert.Equal(t, 0.01, ptrace.Values[1])
	assert.Equal(t, 0.02, ptrace.Values[2])
	assert.Equal(t, config.MISSING_DATA_SENTINEL, ptrace.Values[0])
	assert.Equal(t, config.MISSING_DATA_SENTINEL, ptrace.Values[3])

	ptrace = tile.Traces["key:gpu:win8"].(*types.PerfTrace)
	assert.Equal(t, 1.234, ptrace.Values[1])
	assert.Equal(t, config.MISSING_DATA_SENTINEL, ptrace.Values[0])

	ptrace = tile.Traces["key:565:ubuntu"].(*types.PerfTrace)
	assert.Equal(t, 2.345, ptrace.Values[2])
	assert.Equal(t, config.MISSING_DATA_SENTINEL, ptrace.Values[3])
}
Ejemplo n.º 7
0
func main() {
	defer common.LogPanic()
	var err error

	mainTimer := timer.New("main init")
	// Setup DB flags.
	dbConf := database.ConfigFromFlags(db.PROD_DB_HOST, db.PROD_DB_PORT, database.USER_RW, db.PROD_DB_NAME, db.MigrationSteps())

	// Global init to initialize
	common.InitWithMetrics("skiacorrectness", graphiteServer)

	v, err := skiaversion.GetVersion()
	if err != nil {
		glog.Fatalf("Unable to retrieve version: %s", err)
	}
	glog.Infof("Version %s, built at %s", v.Commit, v.Date)

	// Enable the memory profiler if memProfile was set.
	// TODO(stephana): This should be moved to a HTTP endpoint that
	// only responds to internal IP addresses/ports.
	if *memProfile > 0 {
		time.AfterFunc(*memProfile, func() {
			glog.Infof("Writing Memory Profile")
			f, err := ioutil.TempFile("./", "memory-profile")
			if err != nil {
				glog.Fatalf("Unable to create memory profile file: %s", err)
			}
			if err := pprof.WriteHeapProfile(f); err != nil {
				glog.Fatalf("Unable to write memory profile file: %v", err)
			}
			util.Close(f)
			glog.Infof("Memory profile written to %s", f.Name())

			os.Exit(0)
		})
	}

	if *cpuProfile > 0 {
		glog.Infof("Writing CPU Profile")
		f, err := ioutil.TempFile("./", "cpu-profile")
		if err != nil {
			glog.Fatalf("Unable to create cpu profile file: %s", err)
		}

		if err := pprof.StartCPUProfile(f); err != nil {
			glog.Fatalf("Unable to write cpu profile file: %v", err)
		}
		time.AfterFunc(*cpuProfile, func() {
			pprof.StopCPUProfile()
			util.Close(f)
			glog.Infof("CPU profile written to %s", f.Name())
			os.Exit(0)
		})
	}

	// Init this module.
	Init()

	// Initialize submodules.
	filediffstore.Init()

	// Set up login
	// TODO (stephana): Factor out to go/login/login.go and removed hard coded
	// values.
	var cookieSalt = "notverysecret"
	var clientID = "31977622648-ubjke2f3staq6ouas64r31h8f8tcbiqp.apps.googleusercontent.com"
	var clientSecret = "rK-kRY71CXmcg0v9I9KIgWci"
	var useRedirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
		useRedirectURL = *redirectURL
	}
	login.Init(clientID, clientSecret, useRedirectURL, cookieSalt, login.DEFAULT_SCOPE, *authWhiteList, *local)

	// Get the client to be used to access GS and the Monorail issue tracker.
	client, err := auth.NewJWTServiceAccountClient("", *serviceAccountFile, nil, gstorage.CloudPlatformScope, "https://www.googleapis.com/auth/userinfo.email")
	if err != nil {
		glog.Fatalf("Failed to authenticate service account: %s", err)
	}

	// Set up the cache implementation to use.
	cacheFactory := filediffstore.MemCacheFactory
	if *redisHost != "" {
		cacheFactory = func(uniqueId string, codec util.LRUCodec) util.LRUCache {
			return redisutil.NewRedisLRUCache(*redisHost, *redisDB, uniqueId, codec)
		}
	}

	// Get the expecations storage, the filediff storage and the tilestore.
	diffStore, err := filediffstore.NewFileDiffStore(client, *imageDir, *gsBucketName, filediffstore.DEFAULT_GS_IMG_DIR_NAME, cacheFactory, filediffstore.RECOMMENDED_WORKER_POOL_SIZE)
	if err != nil {
		glog.Fatalf("Allocating DiffStore failed: %s", err)
	}

	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	vdb, err := dbConf.NewVersionedDB()
	if err != nil {
		glog.Fatal(err)
	}

	if !vdb.IsLatestVersion() {
		glog.Fatal("Wrong DB version. Please updated to latest version.")
	}

	digestStore, err := digeststore.New(*storageDir)
	if err != nil {
		glog.Fatal(err)
	}

	git, err := gitinfo.CloneOrUpdate(*gitRepoURL, *gitRepoDir, false)
	if err != nil {
		glog.Fatal(err)
	}

	evt := eventbus.New(nil)

	// Connect to traceDB and create the builders.
	db, err := tracedb.NewTraceServiceDBFromAddress(*traceservice, types.GoldenTraceBuilder)
	if err != nil {
		glog.Fatalf("Failed to connect to tracedb: %s", err)
	}

	masterTileBuilder, err := tracedb.NewMasterTileBuilder(db, git, *nCommits, evt)
	if err != nil {
		glog.Fatalf("Failed to build trace/db.DB: %s", err)
	}
	branchTileBuilder := tracedb.NewBranchTileBuilder(db, git, rietveld.RIETVELD_SKIA_URL, evt)

	storages = &storage.Storage{
		DiffStore:         diffStore,
		ExpectationsStore: expstorage.NewCachingExpectationStore(expstorage.NewSQLExpectationStore(vdb), evt),
		IgnoreStore:       ignore.NewSQLIgnoreStore(vdb),
		MasterTileBuilder: masterTileBuilder,
		BranchTileBuilder: branchTileBuilder,
		DigestStore:       digestStore,
		NCommits:          *nCommits,
		EventBus:          evt,
		TrybotResults:     trybot.NewTrybotResultStorage(vdb),
		RietveldAPI:       rietveld.New(*rietveldURL, nil),
	}

	if err := history.Init(storages, *nTilesToBackfill); err != nil {
		glog.Fatalf("Unable to initialize history package: %s", err)
	}

	if blamer, err = blame.New(storages); err != nil {
		glog.Fatalf("Unable to create blamer: %s", err)
	}

	if err := ignore.Init(storages.IgnoreStore); err != nil {
		glog.Fatalf("Failed to start monitoring for expired ignore rules: %s", err)
	}

	// Enable the experimental features.
	tallies, err = tally.New(storages)
	if err != nil {
		glog.Fatalf("Failed to build tallies: %s", err)
	}

	paramsetSum = paramsets.New(tallies, storages)

	if !*local {
		*issueTrackerKey = metadata.Must(metadata.ProjectGet(metadata.APIKEY))
	}

	issueTracker = issues.NewMonorailIssueTracker(client)

	summaries, err = summary.New(storages, tallies, blamer)
	if err != nil {
		glog.Fatalf("Failed to build summary: %s", err)
	}

	statusWatcher, err = status.New(storages)
	if err != nil {
		glog.Fatalf("Failed to initialize status watcher: %s", err)
	}

	imgFS := NewURLAwareFileServer(*imageDir, IMAGE_URL_PREFIX)
	pathToURLConverter = imgFS.GetURL

	if err := warmer.Init(storages, summaries, tallies); err != nil {
		glog.Fatalf("Failed to initialize the warmer: %s", err)
	}
	mainTimer.Stop()

	// Everything is wired up at this point. Run the self tests if requested.
	if *selfTest {
		search.SelfTest(storages, tallies, blamer, paramsetSum)
	}

	router := mux.NewRouter()

	// Set up the resource to serve the image files.
	router.PathPrefix(IMAGE_URL_PREFIX).Handler(imgFS.Handler)

	// New Polymer based UI endpoints.
	router.PathPrefix("/res/").HandlerFunc(makeResourceHandler())

	// TODO(stephana): Remove the 'poly' prefix from all the handlers and clean
	// up main2.go by either merging it it into main.go or making it clearer that
	// it contains all the handlers. Make it clearer what variables are shared
	// between the different file.

	// All the handlers will be prefixed with poly to differentiate it from the
	// angular code until the angular code is removed.
	router.HandleFunc(OAUTH2_CALLBACK_PATH, login.OAuth2CallbackHandler)
	router.HandleFunc("/", byBlameHandler).Methods("GET")
	router.HandleFunc("/list", templateHandler("list.html")).Methods("GET")
	router.HandleFunc("/_/details", polyDetailsHandler).Methods("GET")
	router.HandleFunc("/_/diff", polyDiffJSONDigestHandler).Methods("GET")
	router.HandleFunc("/_/hashes", polyAllHashesHandler).Methods("GET")
	router.HandleFunc("/_/ignores", polyIgnoresJSONHandler).Methods("GET")
	router.HandleFunc("/_/ignores/add/", polyIgnoresAddHandler).Methods("POST")
	router.HandleFunc("/_/ignores/del/{id}", polyIgnoresDeleteHandler).Methods("POST")
	router.HandleFunc("/_/ignores/save/{id}", polyIgnoresUpdateHandler).Methods("POST")
	router.HandleFunc("/_/list", polyListTestsHandler).Methods("GET")
	router.HandleFunc("/_/paramset", polyParamsHandler).Methods("GET")
	router.HandleFunc("/_/nxn", nxnJSONHandler).Methods("GET")

	// TODO(stephana): Once /_/search3 is stable it will replace /_/search and the
	// /search*.html pages will be consolidated into one.
	router.HandleFunc("/_/search", polySearchJSONHandler).Methods("GET")
	router.HandleFunc("/_/search3", search3JSONHandler).Methods("GET")

	router.HandleFunc("/_/status/{test}", polyTestStatusHandler).Methods("GET")
	router.HandleFunc("/_/test", polyTestHandler).Methods("POST")
	router.HandleFunc("/_/triage", polyTriageHandler).Methods("POST")
	router.HandleFunc("/_/triagelog", polyTriageLogHandler).Methods("GET")
	router.HandleFunc("/_/triagelog/undo", triageUndoHandler).Methods("POST")
	router.HandleFunc("/_/failure", failureListJSONHandler).Methods("GET")
	router.HandleFunc("/_/failure/clear", failureClearJSONHandler).Methods("POST")
	router.HandleFunc("/_/trybot", listTrybotsJSONHandler).Methods("GET")

	router.HandleFunc("/byblame", byBlameHandler).Methods("GET")
	router.HandleFunc("/cluster", templateHandler("cluster.html")).Methods("GET")
	router.HandleFunc("/search2", search2Handler).Methods("GET")
	router.HandleFunc("/cmp/{test}", templateHandler("compare.html")).Methods("GET")
	router.HandleFunc("/detail", templateHandler("single.html")).Methods("GET")
	router.HandleFunc("/diff", templateHandler("diff.html")).Methods("GET")
	router.HandleFunc("/help", templateHandler("help.html")).Methods("GET")
	router.HandleFunc("/ignores", templateHandler("ignores.html")).Methods("GET")
	router.HandleFunc("/loginstatus/", login.StatusHandler)
	router.HandleFunc("/logout/", login.LogoutHandler)
	router.HandleFunc("/search", templateHandler("search.html")).Methods("GET")
	router.HandleFunc("/triagelog", templateHandler("triagelog.html")).Methods("GET")
	router.HandleFunc("/trybot", templateHandler("trybot.html")).Methods("GET")
	router.HandleFunc("/failures", templateHandler("failures.html")).Methods("GET")

	// Add the necessary middleware and have the router handle all requests.
	// By structuring the middleware this way we only log requests that are
	// authenticated.
	rootHandler := util.LoggingGzipRequestResponse(router)
	if *forceLogin {
		rootHandler = login.ForceAuth(rootHandler, OAUTH2_CALLBACK_PATH)
	}

	// The polyStatusHandler is being polled, so we exclude it from logging.
	http.HandleFunc("/_/trstatus", polyStatusHandler)
	http.Handle("/", rootHandler)

	// Start the server
	glog.Infoln("Serving on http://127.0.0.1" + *port)
	glog.Fatal(http.ListenAndServe(*port, nil))
}