Ejemplo n.º 1
0
func main() {
	defer common.LogPanic()
	common.InitWithMetrics("skia-ingestion", graphiteServer)

	// Initialize oauth client and start the ingesters.
	client, err := auth.NewJWTServiceAccountClient("", *serviceAccountFile, nil, storage.CloudPlatformScope)
	if err != nil {
		glog.Fatalf("Failed to auth: %s", err)
	}

	// Start the ingesters.
	config, err := sharedconfig.ConfigFromTomlFile(*configFilename)
	if err != nil {
		glog.Fatalf("Unable to read config file %s. Got error: %s", *configFilename, err)
	}

	ingesters, err := ingestion.IngestersFromConfig(config, client)
	if err != nil {
		glog.Fatalf("Unable to instantiate ingesters: %s", err)
	}
	for _, oneIngester := range ingesters {
		oneIngester.Start()
	}

	// Run the ingesters forever.
	select {}
}
Ejemplo n.º 2
0
func main() {
	defer common.LogPanic()

	var err error

	// Global init to initialize glog and parse arguments.
	common.InitWithMetrics("internal", graphiteServer)

	if !*local {
		*targetList = metadata.Must(metadata.ProjectGet("datahopper_internal_targets"))
	}
	targets := strings.Split(*targetList, " ")
	glog.Infof("Targets: %#v", targets)

	codenameDB, err = leveldb.OpenFile(*codenameDbDir, nil)
	if err != nil && errors.IsCorrupted(err) {
		codenameDB, err = leveldb.RecoverFile(*codenameDbDir, nil)
	}
	if err != nil {
		glog.Fatalf("Failed to open codename leveldb at %s: %s", *codenameDbDir, err)
	}
	// Initialize the buildbot database.
	db, err = buildbot.NewRemoteDB(*buildbotDbHost)
	if err != nil {
		glog.Fatal(err)
	}

	var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		redirectURL = "https://internal.skia.org/oauth2callback/"
	}
	if err := login.InitFromMetadataOrJSON(redirectURL, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST); err != nil {
		glog.Fatalf("Failed to initialize login system: %s", err)

	}

	if *local {
		webhook.InitRequestSaltForTesting()
	} else {
		webhook.MustInitRequestSaltFromMetadata()
	}

	repos = gitinfo.NewRepoMap(*workdir)

	// Ingest Android framework builds.
	go func() {
		glog.Infof("Starting.")

		// In this case we don't want a backoff transport since the Apiary backend
		// seems to fail a lot, so we basically want to fall back to polling if a
		// call fails.
		client, err := auth.NewJWTServiceAccountClient("", "", &http.Transport{Dial: util.DialTimeout}, androidbuildinternal.AndroidbuildInternalScope, storage.CloudPlatformScope)
		if err != nil {
			glog.Fatalf("Unable to create authenticated client: %s", err)
		}

		buildService, err := androidbuildinternal.New(client)
		if err != nil {
			glog.Fatalf("Failed to obtain Android build service: %v", err)
		}
		glog.Infof("Ready to start loop.")
		step(targets, buildService)
		for _ = range time.Tick(*period) {
			step(targets, buildService)
		}
	}()

	r := mux.NewRouter()
	r.HandleFunc("/", indexHandler)
	r.HandleFunc("/builders/{codename}/builds/{buildNumber}", redirectHandler)
	r.HandleFunc("/builders/{codename}", builderRedirectHandler)
	r.HandleFunc("/ingestBuild", ingestBuildHandler)
	r.HandleFunc("/loginstatus/", login.StatusHandler)
	r.HandleFunc("/logout/", login.LogoutHandler)
	r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler)
	http.Handle("/", util.LoggingGzipRequestResponse(r))
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Ejemplo n.º 3
0
func TestPDFProcessor(t *testing.T) {
	testutils.SkipIfShort(t)

	// Get the service account client from meta data or a local config file.
	client, err := auth.NewJWTServiceAccountClient("", auth.DEFAULT_JWT_FILENAME, nil, storage.ScopeFullControl)
	assert.Nil(t, err)

	cacheDir, err := fileutil.EnsureDirExists(CACHE_DIR)
	assert.Nil(t, err)

	// Clean up after the test.
	defer func() {
		defer util.RemoveAll(cacheDir)
		deleteFolderContent(t, TEST_BUCKET, IMAGES_OUT_DIR, client)
		deleteFolderContent(t, TEST_BUCKET, JSON_OUT_DIR, client)
	}()

	// Configure the processor.
	ingesterConf := &sharedconfig.IngesterConfig{
		ExtraParams: map[string]string{
			CONFIG_INPUT_IMAGES_BUCKET:  TEST_BUCKET,
			CONFIG_INPUT_IMAGES_DIR:     IMAGES_IN_DIR,
			CONFIG_OUTPUT_JSON_BUCKET:   TEST_BUCKET,
			CONFIG_OUTPUT_JSON_DIR:      JSON_OUT_DIR,
			CONFIG_OUTPUT_IMAGES_BUCKET: TEST_BUCKET,
			CONFIG_OUTPUT_IMAGES_DIR:    IMAGES_OUT_DIR,
			CONFIG_PDF_CACHEDIR:         cacheDir,
		},
	}
	processor, err := newPDFProcessor(nil, ingesterConf, client)
	assert.Nil(t, err)

	// Load the example file and process it.
	fsResult, err := ingestion.FileSystemResult(TEST_INGESTION_FILE, "./")
	assert.Nil(t, err)

	err = processor.Process(fsResult)
	assert.Nil(t, err)

	// Fetch the json output and parse it.
	pProcessor := processor.(*pdfProcessor)

	// download the result.
	resultFileName := filepath.Join(CACHE_DIR, "result-file.json")
	assert.Nil(t, pProcessor.download(TEST_BUCKET, JSON_OUT_DIR, fsResult.Name(), resultFileName))

	// Make sure we get the expected result.
	fsResult, err = ingestion.FileSystemResult(TEST_INGESTION_FILE, "./")
	assert.Nil(t, err)
	r, err := fsResult.Open()
	assert.Nil(t, err)
	fsDMResults, err := goldingestion.ParseDMResultsFromReader(r)
	assert.Nil(t, err)

	foundResult, err := ingestion.FileSystemResult(resultFileName, "./")
	assert.Nil(t, err)
	r, err = foundResult.Open()
	assert.Nil(t, err)
	foundDMResults, err := goldingestion.ParseDMResultsFromReader(r)
	assert.Nil(t, err)

	dmResult1 := *fsDMResults
	dmResult2 := *foundDMResults
	dmResult1.Results = nil
	dmResult2.Results = nil
	assert.Equal(t, dmResult1, dmResult2)

	foundIdx := 0
	srcResults := fsDMResults.Results
	tgtResults := foundDMResults.Results
	for _, result := range srcResults {
		assert.True(t, foundIdx < len(tgtResults))
		if result.Options["ext"] == "pdf" {
			for ; (foundIdx < len(tgtResults)) && (result.Key["name"] == tgtResults[foundIdx].Key["name"]); foundIdx++ {
				assert.True(t, tgtResults[foundIdx].Key["rasterizer"] != "")
				delete(tgtResults[foundIdx].Key, "rasterizer")
				assert.Equal(t, result.Key, tgtResults[foundIdx].Key)
				assert.Equal(t, "png", tgtResults[foundIdx].Options["ext"])
			}
		}
	}
	assert.Equal(t, len(foundDMResults.Results), foundIdx)
}
Ejemplo n.º 4
0
func main() {
	defer common.LogPanic()
	var err error

	mainTimer := timer.New("main init")
	// Setup DB flags.
	dbConf := database.ConfigFromFlags(db.PROD_DB_HOST, db.PROD_DB_PORT, database.USER_RW, db.PROD_DB_NAME, db.MigrationSteps())

	// Global init to initialize
	common.InitWithMetrics("skiacorrectness", graphiteServer)

	v, err := skiaversion.GetVersion()
	if err != nil {
		glog.Fatalf("Unable to retrieve version: %s", err)
	}
	glog.Infof("Version %s, built at %s", v.Commit, v.Date)

	// Enable the memory profiler if memProfile was set.
	// TODO(stephana): This should be moved to a HTTP endpoint that
	// only responds to internal IP addresses/ports.
	if *memProfile > 0 {
		time.AfterFunc(*memProfile, func() {
			glog.Infof("Writing Memory Profile")
			f, err := ioutil.TempFile("./", "memory-profile")
			if err != nil {
				glog.Fatalf("Unable to create memory profile file: %s", err)
			}
			if err := pprof.WriteHeapProfile(f); err != nil {
				glog.Fatalf("Unable to write memory profile file: %v", err)
			}
			util.Close(f)
			glog.Infof("Memory profile written to %s", f.Name())

			os.Exit(0)
		})
	}

	if *cpuProfile > 0 {
		glog.Infof("Writing CPU Profile")
		f, err := ioutil.TempFile("./", "cpu-profile")
		if err != nil {
			glog.Fatalf("Unable to create cpu profile file: %s", err)
		}

		if err := pprof.StartCPUProfile(f); err != nil {
			glog.Fatalf("Unable to write cpu profile file: %v", err)
		}
		time.AfterFunc(*cpuProfile, func() {
			pprof.StopCPUProfile()
			util.Close(f)
			glog.Infof("CPU profile written to %s", f.Name())
			os.Exit(0)
		})
	}

	// Init this module.
	Init()

	// Initialize submodules.
	filediffstore.Init()

	// Set up login
	// TODO (stephana): Factor out to go/login/login.go and removed hard coded
	// values.
	var cookieSalt = "notverysecret"
	var clientID = "31977622648-ubjke2f3staq6ouas64r31h8f8tcbiqp.apps.googleusercontent.com"
	var clientSecret = "rK-kRY71CXmcg0v9I9KIgWci"
	var useRedirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
		useRedirectURL = *redirectURL
	}
	login.Init(clientID, clientSecret, useRedirectURL, cookieSalt, login.DEFAULT_SCOPE, *authWhiteList, *local)

	// Get the client to be used to access GS and the Monorail issue tracker.
	client, err := auth.NewJWTServiceAccountClient("", *serviceAccountFile, nil, gstorage.CloudPlatformScope, "https://www.googleapis.com/auth/userinfo.email")
	if err != nil {
		glog.Fatalf("Failed to authenticate service account: %s", err)
	}

	// Set up the cache implementation to use.
	cacheFactory := filediffstore.MemCacheFactory
	if *redisHost != "" {
		cacheFactory = func(uniqueId string, codec util.LRUCodec) util.LRUCache {
			return redisutil.NewRedisLRUCache(*redisHost, *redisDB, uniqueId, codec)
		}
	}

	// Get the expecations storage, the filediff storage and the tilestore.
	diffStore, err := filediffstore.NewFileDiffStore(client, *imageDir, *gsBucketName, filediffstore.DEFAULT_GS_IMG_DIR_NAME, cacheFactory, filediffstore.RECOMMENDED_WORKER_POOL_SIZE)
	if err != nil {
		glog.Fatalf("Allocating DiffStore failed: %s", err)
	}

	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	vdb, err := dbConf.NewVersionedDB()
	if err != nil {
		glog.Fatal(err)
	}

	if !vdb.IsLatestVersion() {
		glog.Fatal("Wrong DB version. Please updated to latest version.")
	}

	digestStore, err := digeststore.New(*storageDir)
	if err != nil {
		glog.Fatal(err)
	}

	git, err := gitinfo.CloneOrUpdate(*gitRepoURL, *gitRepoDir, false)
	if err != nil {
		glog.Fatal(err)
	}

	evt := eventbus.New(nil)

	// Connect to traceDB and create the builders.
	db, err := tracedb.NewTraceServiceDBFromAddress(*traceservice, types.GoldenTraceBuilder)
	if err != nil {
		glog.Fatalf("Failed to connect to tracedb: %s", err)
	}

	masterTileBuilder, err := tracedb.NewMasterTileBuilder(db, git, *nCommits, evt)
	if err != nil {
		glog.Fatalf("Failed to build trace/db.DB: %s", err)
	}
	branchTileBuilder := tracedb.NewBranchTileBuilder(db, git, rietveld.RIETVELD_SKIA_URL, evt)

	storages = &storage.Storage{
		DiffStore:         diffStore,
		ExpectationsStore: expstorage.NewCachingExpectationStore(expstorage.NewSQLExpectationStore(vdb), evt),
		IgnoreStore:       ignore.NewSQLIgnoreStore(vdb),
		MasterTileBuilder: masterTileBuilder,
		BranchTileBuilder: branchTileBuilder,
		DigestStore:       digestStore,
		NCommits:          *nCommits,
		EventBus:          evt,
		TrybotResults:     trybot.NewTrybotResultStorage(vdb),
		RietveldAPI:       rietveld.New(*rietveldURL, nil),
	}

	if err := history.Init(storages, *nTilesToBackfill); err != nil {
		glog.Fatalf("Unable to initialize history package: %s", err)
	}

	if blamer, err = blame.New(storages); err != nil {
		glog.Fatalf("Unable to create blamer: %s", err)
	}

	if err := ignore.Init(storages.IgnoreStore); err != nil {
		glog.Fatalf("Failed to start monitoring for expired ignore rules: %s", err)
	}

	// Enable the experimental features.
	tallies, err = tally.New(storages)
	if err != nil {
		glog.Fatalf("Failed to build tallies: %s", err)
	}

	paramsetSum = paramsets.New(tallies, storages)

	if !*local {
		*issueTrackerKey = metadata.Must(metadata.ProjectGet(metadata.APIKEY))
	}

	issueTracker = issues.NewMonorailIssueTracker(client)

	summaries, err = summary.New(storages, tallies, blamer)
	if err != nil {
		glog.Fatalf("Failed to build summary: %s", err)
	}

	statusWatcher, err = status.New(storages)
	if err != nil {
		glog.Fatalf("Failed to initialize status watcher: %s", err)
	}

	imgFS := NewURLAwareFileServer(*imageDir, IMAGE_URL_PREFIX)
	pathToURLConverter = imgFS.GetURL

	if err := warmer.Init(storages, summaries, tallies); err != nil {
		glog.Fatalf("Failed to initialize the warmer: %s", err)
	}
	mainTimer.Stop()

	// Everything is wired up at this point. Run the self tests if requested.
	if *selfTest {
		search.SelfTest(storages, tallies, blamer, paramsetSum)
	}

	router := mux.NewRouter()

	// Set up the resource to serve the image files.
	router.PathPrefix(IMAGE_URL_PREFIX).Handler(imgFS.Handler)

	// New Polymer based UI endpoints.
	router.PathPrefix("/res/").HandlerFunc(makeResourceHandler())

	// TODO(stephana): Remove the 'poly' prefix from all the handlers and clean
	// up main2.go by either merging it it into main.go or making it clearer that
	// it contains all the handlers. Make it clearer what variables are shared
	// between the different file.

	// All the handlers will be prefixed with poly to differentiate it from the
	// angular code until the angular code is removed.
	router.HandleFunc(OAUTH2_CALLBACK_PATH, login.OAuth2CallbackHandler)
	router.HandleFunc("/", byBlameHandler).Methods("GET")
	router.HandleFunc("/list", templateHandler("list.html")).Methods("GET")
	router.HandleFunc("/_/details", polyDetailsHandler).Methods("GET")
	router.HandleFunc("/_/diff", polyDiffJSONDigestHandler).Methods("GET")
	router.HandleFunc("/_/hashes", polyAllHashesHandler).Methods("GET")
	router.HandleFunc("/_/ignores", polyIgnoresJSONHandler).Methods("GET")
	router.HandleFunc("/_/ignores/add/", polyIgnoresAddHandler).Methods("POST")
	router.HandleFunc("/_/ignores/del/{id}", polyIgnoresDeleteHandler).Methods("POST")
	router.HandleFunc("/_/ignores/save/{id}", polyIgnoresUpdateHandler).Methods("POST")
	router.HandleFunc("/_/list", polyListTestsHandler).Methods("GET")
	router.HandleFunc("/_/paramset", polyParamsHandler).Methods("GET")
	router.HandleFunc("/_/nxn", nxnJSONHandler).Methods("GET")

	// TODO(stephana): Once /_/search3 is stable it will replace /_/search and the
	// /search*.html pages will be consolidated into one.
	router.HandleFunc("/_/search", polySearchJSONHandler).Methods("GET")
	router.HandleFunc("/_/search3", search3JSONHandler).Methods("GET")

	router.HandleFunc("/_/status/{test}", polyTestStatusHandler).Methods("GET")
	router.HandleFunc("/_/test", polyTestHandler).Methods("POST")
	router.HandleFunc("/_/triage", polyTriageHandler).Methods("POST")
	router.HandleFunc("/_/triagelog", polyTriageLogHandler).Methods("GET")
	router.HandleFunc("/_/triagelog/undo", triageUndoHandler).Methods("POST")
	router.HandleFunc("/_/failure", failureListJSONHandler).Methods("GET")
	router.HandleFunc("/_/failure/clear", failureClearJSONHandler).Methods("POST")
	router.HandleFunc("/_/trybot", listTrybotsJSONHandler).Methods("GET")

	router.HandleFunc("/byblame", byBlameHandler).Methods("GET")
	router.HandleFunc("/cluster", templateHandler("cluster.html")).Methods("GET")
	router.HandleFunc("/search2", search2Handler).Methods("GET")
	router.HandleFunc("/cmp/{test}", templateHandler("compare.html")).Methods("GET")
	router.HandleFunc("/detail", templateHandler("single.html")).Methods("GET")
	router.HandleFunc("/diff", templateHandler("diff.html")).Methods("GET")
	router.HandleFunc("/help", templateHandler("help.html")).Methods("GET")
	router.HandleFunc("/ignores", templateHandler("ignores.html")).Methods("GET")
	router.HandleFunc("/loginstatus/", login.StatusHandler)
	router.HandleFunc("/logout/", login.LogoutHandler)
	router.HandleFunc("/search", templateHandler("search.html")).Methods("GET")
	router.HandleFunc("/triagelog", templateHandler("triagelog.html")).Methods("GET")
	router.HandleFunc("/trybot", templateHandler("trybot.html")).Methods("GET")
	router.HandleFunc("/failures", templateHandler("failures.html")).Methods("GET")

	// Add the necessary middleware and have the router handle all requests.
	// By structuring the middleware this way we only log requests that are
	// authenticated.
	rootHandler := util.LoggingGzipRequestResponse(router)
	if *forceLogin {
		rootHandler = login.ForceAuth(rootHandler, OAUTH2_CALLBACK_PATH)
	}

	// The polyStatusHandler is being polled, so we exclude it from logging.
	http.HandleFunc("/_/trstatus", polyStatusHandler)
	http.Handle("/", rootHandler)

	// Start the server
	glog.Infoln("Serving on http://127.0.0.1" + *port)
	glog.Fatal(http.ListenAndServe(*port, nil))
}