Esempio n. 1
0
// NewBranchTileBuilder returns an instance of BranchTileBuilder that allows
// creating tiles based on the given VCS or code review system based on
// querying db.
//
// TODO(stephana): The EventBus is used to update the internal cache as commits are updated.
func NewBranchTileBuilder(db DB, git *gitinfo.GitInfo, reviewURL string, evt *eventbus.EventBus) BranchTileBuilder {
	return &tileBuilder{
		db:        db,
		vcs:       git,
		review:    rietveld.New(reviewURL, util.NewTimeoutClient()),
		reviewURL: reviewURL,
		cache:     lru.New(MAX_ISSUE_CACHE_SIZE),
		tcache:    lru.New(MAX_TILE_CACHE_SIZE),
	}
}
Esempio n. 2
0
// newPerfTrybotProcessor implements the ingestion.Constructor signature.
func newPerfTrybotProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig) (ingestion.Processor, error) {
	traceDB, err := tracedb.NewTraceServiceDBFromAddress(config.ExtraParams[CONFIG_TRACESERVICE], types.PerfTraceBuilder)
	if err != nil {
		return nil, err
	}

	return &perfTrybotProcessor{
		traceDB: traceDB,
		review:  rietveld.New(CODE_REVIEW_URL, nil),
		cache:   map[string]time.Time{},
	}, nil
}
Esempio n. 3
0
func NewBuilder(git *gitinfo.GitInfo, address string, tileSize int, traceBuilder tiling.TraceBuilder) (*Builder, error) {
	review := rietveld.New(rietveld.RIETVELD_SKIA_URL, util.NewTimeoutClient())
	builder, err := db.NewBuilder(git, address, tileSize, traceBuilder)
	if err != nil {
		return nil, fmt.Errorf("Failed to construct Builder: %s", err)
	}

	return &Builder{
		Builder: builder,
		vcs:     git,
		review:  review,
		cache:   map[string]*rietveld.Issue{},
	}, nil
}
Esempio n. 4
0
func TestTrybotPerfIngestion(t *testing.T) {
	b, err := ioutil.ReadFile(filepath.Join("testdata", "rietveld_response.txt"))
	assert.NoError(t, err)
	m := mockhttpclient.NewURLMock()
	m.Mock("https://codereview.chromium.org/api/1467533002/1", b)

	server, serverAddr := ingestion.StartTraceDBTestServer(t, "./trybot_test_trace.db")
	defer server.Stop()
	defer testutils.Remove(t, "./trybot_test_trace.db")

	ingesterConf := &sharedconfig.IngesterConfig{
		ExtraParams: map[string]string{
			CONFIG_TRACESERVICE: serverAddr,
		},
	}
	processor, err := newPerfTrybotProcessor(nil, ingesterConf, nil)
	assert.Nil(t, err)

	processor.(*perfTrybotProcessor).review = rietveld.New("https://codereview.chromium.org", m.Client())

	fsResult, err := ingestion.FileSystemResult(filepath.Join(TEST_DATA_DIR, "trybot.json"), TEST_DATA_DIR)
	assert.Nil(t, err)
	err = processor.Process(fsResult)
	assert.Nil(t, err)
	assert.Equal(t, 1, len(processor.(*perfTrybotProcessor).cache))

	// Steal the traceDB used by the processor to verify the results.
	traceDB := processor.(*perfTrybotProcessor).traceDB

	startTime := time.Time{}
	commitIDs, err := traceDB.List(startTime, time.Now())
	assert.Nil(t, err)

	assert.Equal(t, 1, len(commitIDs))
	assert.Equal(t, &tracedb.CommitID{
		Timestamp: 1448036640,
		ID:        "1",
		Source:    "https://codereview.chromium.org/1467533002",
	}, commitIDs[0])

	// Get a tile and make sure we have the right number of traces.
	tile, _, err := traceDB.TileFromCommits(commitIDs)
	assert.Nil(t, err)

	traces := tile.Traces
	assert.Equal(t, 2, len(traces))

	assert.Nil(t, traceDB.Close())
}
Esempio n. 5
0
func newGoldTrybotProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig) (ingestion.Processor, error) {
	processor, err := newGoldProcessor(vcs, config)
	if err != nil {
		return nil, err
	}

	// Get the underlying goldProcessor.
	gProcessor := processor.(*goldProcessor)
	ret := &goldTrybotProcessor{
		goldProcessor: gProcessor,
		review:        rietveld.New(config.ExtraParams[CONFIG_CODE_REVIEW_URL], nil),
		cache:         map[string]time.Time{},
	}

	// Change the function to extract the commitID.
	gProcessor.extractID = ret.getCommitID
	return ret, nil
}
Esempio n. 6
0
// setup initializes a fake AutoRoller for testing. It returns the working
// directory, AutoRoller instance, URLMock for faking HTTP requests, and an
// rietveld.Issue representing the first CL that was uploaded by the AutoRoller.
func setup(t *testing.T) (string, *AutoRoller, *mockRepoManager, *mockRietveld, *rietveld.Issue) {
	testutils.SkipIfShort(t)

	// Setup mocks.
	urlMock := mockhttpclient.NewURLMock()
	urlMock.Mock(fmt.Sprintf("%s/xsrf_token", autoroll.RIETVELD_URL), []byte("abc123"))
	rv := &mockRietveld{
		fakeIssueNum: 10001,
		r:            rietveld.New(autoroll.RIETVELD_URL, urlMock.Client()),
		t:            t,
		urlMock:      urlMock,
	}

	rm := &mockRepoManager{t: t}
	repo_manager.NewRepoManager = func(workdir string, frequency time.Duration, depot_tools string) (repo_manager.RepoManager, error) {
		return rm, nil
	}

	workdir, err := ioutil.TempDir("", "test_autoroll_mode_")
	assert.Nil(t, err)

	// Set up more test data.
	initialCommit := "abc1231010101010101010101010101010101010"
	rm.mockSkiaCommit(initialCommit)
	rm.mockSkiaCommit("def4561010101010101010101010101010101010")
	rm.mockLastRollRev(initialCommit)
	rm.mockRolledPast(initialCommit, true)
	roll1 := rm.rollerWillUpload(rv, rm.LastRollRev(), rm.SkiaHead(), noTrybots, false)

	// Create the roller.
	roller, err := NewAutoRoller(workdir, []string{}, []string{}, rv.r, time.Hour, time.Hour, "depot_tools")
	assert.Nil(t, err)

	// Verify that the bot ran successfully.
	checkStatus(t, roller, rv, rm, STATUS_IN_PROGRESS, roll1, noTrybots, false, nil, nil, false)

	return workdir, roller, rm, rv, roll1
}
Esempio n. 7
0
func main() {
	defer common.LogPanic()
	var err error

	mainTimer := timer.New("main init")
	// Setup DB flags.
	dbConf := database.ConfigFromFlags(db.PROD_DB_HOST, db.PROD_DB_PORT, database.USER_RW, db.PROD_DB_NAME, db.MigrationSteps())

	// Global init to initialize
	common.InitWithMetrics("skiacorrectness", graphiteServer)

	v, err := skiaversion.GetVersion()
	if err != nil {
		glog.Fatalf("Unable to retrieve version: %s", err)
	}
	glog.Infof("Version %s, built at %s", v.Commit, v.Date)

	// Enable the memory profiler if memProfile was set.
	// TODO(stephana): This should be moved to a HTTP endpoint that
	// only responds to internal IP addresses/ports.
	if *memProfile > 0 {
		time.AfterFunc(*memProfile, func() {
			glog.Infof("Writing Memory Profile")
			f, err := ioutil.TempFile("./", "memory-profile")
			if err != nil {
				glog.Fatalf("Unable to create memory profile file: %s", err)
			}
			if err := pprof.WriteHeapProfile(f); err != nil {
				glog.Fatalf("Unable to write memory profile file: %v", err)
			}
			util.Close(f)
			glog.Infof("Memory profile written to %s", f.Name())

			os.Exit(0)
		})
	}

	if *cpuProfile > 0 {
		glog.Infof("Writing CPU Profile")
		f, err := ioutil.TempFile("./", "cpu-profile")
		if err != nil {
			glog.Fatalf("Unable to create cpu profile file: %s", err)
		}

		if err := pprof.StartCPUProfile(f); err != nil {
			glog.Fatalf("Unable to write cpu profile file: %v", err)
		}
		time.AfterFunc(*cpuProfile, func() {
			pprof.StopCPUProfile()
			util.Close(f)
			glog.Infof("CPU profile written to %s", f.Name())
			os.Exit(0)
		})
	}

	// Init this module.
	Init()

	// Initialize submodules.
	filediffstore.Init()

	// Set up login
	// TODO (stephana): Factor out to go/login/login.go and removed hard coded
	// values.
	var cookieSalt = "notverysecret"
	var clientID = "31977622648-ubjke2f3staq6ouas64r31h8f8tcbiqp.apps.googleusercontent.com"
	var clientSecret = "rK-kRY71CXmcg0v9I9KIgWci"
	var useRedirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
		useRedirectURL = *redirectURL
	}
	login.Init(clientID, clientSecret, useRedirectURL, cookieSalt, login.DEFAULT_SCOPE, *authWhiteList, *local)

	// get the Oauthclient if necessary.
	client := getOAuthClient(*oauthCacheFile)

	// Set up the cache implementation to use.
	cacheFactory := filediffstore.MemCacheFactory
	if *redisHost != "" {
		cacheFactory = func(uniqueId string, codec util.LRUCodec) util.LRUCache {
			return redisutil.NewRedisLRUCache(*redisHost, *redisDB, uniqueId, codec)
		}
	}

	// Get the expecations storage, the filediff storage and the tilestore.
	diffStore, err := filediffstore.NewFileDiffStore(client, *imageDir, *gsBucketName, filediffstore.DEFAULT_GS_IMG_DIR_NAME, cacheFactory, filediffstore.RECOMMENDED_WORKER_POOL_SIZE)
	if err != nil {
		glog.Fatalf("Allocating DiffStore failed: %s", err)
	}

	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	vdb, err := dbConf.NewVersionedDB()
	if err != nil {
		glog.Fatal(err)
	}

	if !vdb.IsLatestVersion() {
		glog.Fatal("Wrong DB version. Please updated to latest version.")
	}

	digestStore, err := digeststore.New(*storageDir)
	if err != nil {
		glog.Fatal(err)
	}

	eventBus := eventbus.New(nil)
	storages = &storage.Storage{
		DiffStore:         diffStore,
		ExpectationsStore: expstorage.NewCachingExpectationStore(expstorage.NewSQLExpectationStore(vdb), eventBus),
		IgnoreStore:       ignore.NewSQLIgnoreStore(vdb),
		TileStore:         filetilestore.NewFileTileStore(*tileStoreDir, config.DATASET_GOLD, 2*time.Minute),
		DigestStore:       digestStore,
		NCommits:          *nCommits,
		EventBus:          eventBus,
		TrybotResults:     trybot.NewTrybotResultStorage(vdb),
		RietveldAPI:       rietveld.New(*rietveldURL, nil),
	}

	if err := history.Init(storages, *nTilesToBackfill); err != nil {
		glog.Fatalf("Unable to initialize history package: %s", err)
	}

	if blamer, err = blame.New(storages); err != nil {
		glog.Fatalf("Unable to create blamer: %s", err)
	}

	if err := ignore.Init(storages.IgnoreStore); err != nil {
		glog.Fatalf("Failed to start monitoring for expired ignore rules: %s", err)
	}

	// Enable the experimental features.
	tallies, err = tally.New(storages)
	if err != nil {
		glog.Fatalf("Failed to build tallies: %s", err)
	}

	paramsetSum = paramsets.New(tallies, storages)

	if !*local {
		*issueTrackerKey = metadata.Must(metadata.ProjectGet(metadata.APIKEY))
	}

	issueTracker = issues.NewIssueTracker(*issueTrackerKey)

	summaries, err = summary.New(storages, tallies, blamer)
	if err != nil {
		glog.Fatalf("Failed to build summary: %s", err)
	}

	statusWatcher, err = status.New(storages)
	if err != nil {
		glog.Fatalf("Failed to initialize status watcher: %s", err)
	}

	imgFS := NewURLAwareFileServer(*imageDir, IMAGE_URL_PREFIX)
	pathToURLConverter = imgFS.GetURL

	if err := warmer.Init(storages, summaries, tallies); err != nil {
		glog.Fatalf("Failed to initialize the warmer: %s", err)
	}
	mainTimer.Stop()

	// Everything is wired up at this point. Run the self tests if requested.
	if *selfTest {
		search.SelfTest(storages, tallies, blamer, paramsetSum)
	}

	router := mux.NewRouter()

	// Set up the resource to serve the image files.
	router.PathPrefix(IMAGE_URL_PREFIX).Handler(imgFS.Handler)

	// New Polymer based UI endpoints.
	router.PathPrefix("/res/").HandlerFunc(makeResourceHandler())

	// TODO(stephana): Remove the 'poly' prefix from all the handlers and clean
	// up main2.go by either merging it it into main.go or making it clearer that
	// it contains all the handlers. Make it clearer what variables are shared
	// between the different file.

	// All the handlers will be prefixed with poly to differentiate it from the
	// angular code until the angular code is removed.
	router.HandleFunc(OAUTH2_CALLBACK_PATH, login.OAuth2CallbackHandler)
	router.HandleFunc("/", byBlameHandler).Methods("GET")
	router.HandleFunc("/list", templateHandler("list.html")).Methods("GET")
	router.HandleFunc("/_/details", polyDetailsHandler).Methods("GET")
	router.HandleFunc("/_/diff", polyDiffJSONDigestHandler).Methods("GET")
	router.HandleFunc("/_/hashes", polyAllHashesHandler).Methods("GET")
	router.HandleFunc("/_/ignores", polyIgnoresJSONHandler).Methods("GET")
	router.HandleFunc("/_/ignores/add/", polyIgnoresAddHandler).Methods("POST")
	router.HandleFunc("/_/ignores/del/{id}", polyIgnoresDeleteHandler).Methods("POST")
	router.HandleFunc("/_/ignores/save/{id}", polyIgnoresUpdateHandler).Methods("POST")
	router.HandleFunc("/_/list", polyListTestsHandler).Methods("GET")
	router.HandleFunc("/_/paramset", polyParamsHandler).Methods("GET")
	router.HandleFunc("/_/nxn", nxnJSONHandler).Methods("GET")

	// TODO(stephana): Once /_/search3 is stable it will replace /_/search and the
	// /search*.html pages will be consolidated into one.
	router.HandleFunc("/_/search", polySearchJSONHandler).Methods("GET")
	router.HandleFunc("/_/search3", search3JSONHandler).Methods("GET")

	router.HandleFunc("/_/status/{test}", polyTestStatusHandler).Methods("GET")
	router.HandleFunc("/_/test", polyTestHandler).Methods("POST")
	router.HandleFunc("/_/triage", polyTriageHandler).Methods("POST")
	router.HandleFunc("/_/triagelog", polyTriageLogHandler).Methods("GET")
	router.HandleFunc("/_/triagelog/undo", triageUndoHandler).Methods("POST")
	router.HandleFunc("/_/failure", failureListJSONHandler).Methods("GET")
	router.HandleFunc("/_/failure/clear", failureClearJSONHandler).Methods("POST")
	router.HandleFunc("/_/trybot", listTrybotsJSONHandler).Methods("GET")

	router.HandleFunc("/byblame", byBlameHandler).Methods("GET")
	router.HandleFunc("/cluster", templateHandler("cluster.html")).Methods("GET")
	router.HandleFunc("/search2", search2Handler).Methods("GET")
	router.HandleFunc("/cmp/{test}", templateHandler("compare.html")).Methods("GET")
	router.HandleFunc("/detail", templateHandler("single.html")).Methods("GET")
	router.HandleFunc("/diff", templateHandler("diff.html")).Methods("GET")
	router.HandleFunc("/help", templateHandler("help.html")).Methods("GET")
	router.HandleFunc("/ignores", templateHandler("ignores.html")).Methods("GET")
	router.HandleFunc("/loginstatus/", login.StatusHandler)
	router.HandleFunc("/logout/", login.LogoutHandler)
	router.HandleFunc("/search", templateHandler("search.html")).Methods("GET")
	router.HandleFunc("/triagelog", templateHandler("triagelog.html")).Methods("GET")
	router.HandleFunc("/trybot", templateHandler("trybot.html")).Methods("GET")
	router.HandleFunc("/failures", templateHandler("failures.html")).Methods("GET")

	// Add the necessary middleware and have the router handle all requests.
	// By structuring the middleware this way we only log requests that are
	// authenticated.
	rootHandler := util.LoggingGzipRequestResponse(router)
	if *forceLogin {
		rootHandler = login.ForceAuth(rootHandler, OAUTH2_CALLBACK_PATH)
	}

	// The polyStatusHandler is being polled, so we exclude it from logging.
	http.HandleFunc("/_/trstatus", polyStatusHandler)
	http.Handle("/", rootHandler)

	// Start the server
	glog.Infoln("Serving on http://127.0.0.1" + *port)
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Esempio n. 8
0
func TestPertTrace(t *testing.T) {
	b, err := ioutil.ReadFile(filepath.Join("testdata", "rietveld_response.txt"))
	assert.Nil(t, err)
	m := mockhttpclient.NewURLMock()
	// Mock this only once to confirm that caching works.
	m.MockOnce("https://codereview.chromium.org/api/1490543002", b)

	review := rietveld.New(rietveld.RIETVELD_SKIA_URL, util.NewTimeoutClient())

	vcsCommits := []*vcsinfo.LongCommit{
		&vcsinfo.LongCommit{
			ShortCommit: &vcsinfo.ShortCommit{
				Hash:    "foofoofoo",
				Author:  "*****@*****.**",
				Subject: "some commit",
			},
		},
	}
	vcs := ingestion.MockVCS(vcsCommits)

	builder := &Builder{
		Builder: nil,
		vcs:     vcs,
		review:  review,
		cache:   map[string]*rietveld.Issue{},
	}

	commits := []*db.CommitID{
		&db.CommitID{
			Timestamp: time.Now(),
			ID:        "1",
			Source:    "https://codereview.chromium.org/1490543002",
		},
		&db.CommitID{
			Timestamp: time.Now(),
			ID:        "foofoofoo",
			Source:    "master",
		},
	}

	long := builder.convertToLongCommits(commits, "master")
	assert.Equal(t, 1, len(long), "Only one commit should match master.")
	assert.Equal(t, "foofoofoo", long[0].ID)
	assert.Equal(t, "some commit", long[0].Desc)
	assert.Equal(t, "*****@*****.**", long[0].Author)

	long = builder.convertToLongCommits(commits, "https://codereview.chromium.org/1490543002")
	assert.Equal(t, 1, len(long), "Only one commit should match the trybot.")
	assert.Equal(t, "1", long[0].ID)
	assert.Equal(t, "no merge conflicts here.", long[0].Desc)
	assert.Equal(t, "jcgregorio", long[0].Author)

	long = builder.convertToLongCommits(commits, "")
	assert.Equal(t, 2, len(long), "Both commits should now appear.")
	assert.Equal(t, "1", long[0].ID)
	assert.Equal(t, "no merge conflicts here.", long[0].Desc)
	assert.Equal(t, "jcgregorio", long[0].Author)
	assert.Equal(t, "foofoofoo", long[1].ID)
	assert.Equal(t, "some commit", long[1].Desc)
	assert.Equal(t, "*****@*****.**", long[1].Author)

	badCommits := []*db.CommitID{
		&db.CommitID{
			Timestamp: time.Now(),
			ID:        "2",
			Source:    "https://codereview.chromium.org/99999999",
		},
		&db.CommitID{
			Timestamp: time.Now(),
			ID:        "barbarbar",
			Source:    "master",
		},
	}
	long = builder.convertToLongCommits(badCommits, "")
	assert.Equal(t, 2, len(long), "Both commits should now appear.")
	assert.Equal(t, "2", long[0].ID)
	assert.Equal(t, "", long[0].Desc)
	assert.Equal(t, "", long[0].Author)
	assert.Equal(t, "barbarbar", long[1].ID)
	assert.Equal(t, "", long[1].Desc)
	assert.Equal(t, "", long[1].Author)

}
Esempio n. 9
0
func main() {
	defer common.LogPanic()
	common.InitWithMetrics("autoroll", graphiteServer)
	Init()

	v, err := skiaversion.GetVersion()
	if err != nil {
		glog.Fatal(err)
	}
	glog.Infof("Version %s, built at %s", v.Commit, v.Date)

	if *testing {
		*useMetadata = false
	}

	// Create the Rietveld client.
	client, err := auth.NewClientFromIdAndSecret(rietveld.CLIENT_ID, rietveld.CLIENT_SECRET, path.Join(*workdir, "oauth_cache"), rietveld.OAUTH_SCOPES...)
	if err != nil {
		glog.Fatal(err)
	}
	r := rietveld.New(RIETVELD_URL, client)

	// Retrieve the list of extra CQ trybots.
	// TODO(borenet): Make this editable on the web front-end.
	cqExtraTrybots, err := getCQExtraTrybots()
	if err != nil {
		glog.Fatal(err)
	}

	// Retrieve the initial email list.
	emails, err := getSheriff()
	if err != nil {
		glog.Fatal(err)
	}

	// Start the autoroller.
	arb, err = autoroller.NewAutoRoller(*workdir, cqExtraTrybots, emails, r, time.Minute, 15*time.Minute, *depot_tools)
	if err != nil {
		glog.Fatal(err)
	}

	// Update the current sheriff in a loop.
	go func() {
		for _ = range time.Tick(30 * time.Minute) {
			emails, err := getSheriff()
			if err != nil {
				glog.Errorf("Failed to retrieve current sheriff: %s", err)
			} else {
				arb.SetEmails(emails)
			}
		}
	}()

	serverURL := "https://" + *host
	if *testing {
		serverURL = "http://" + *host + *port
	}

	// By default use a set of credentials setup for localhost access.
	var cookieSalt = "notverysecret"
	var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com"
	var clientSecret = "cw0IosPu4yjaG2KWmppj2guj"
	var redirectURL = serverURL + "/oauth2callback/"
	if *useMetadata {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
	}
	login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST, false)

	runServer(serverURL)
}