Ejemplo n.º 1
0
// TickMarks produces human readable tickmarks to span the given timestamps.
//
// The array of timestamps are presumed to be in ascending order.
// If 'in' is nil then the "Local" time zone is used.
//
// The choice of how to label the tick marks is based on the full time
// range of the timestamps.
func TickMarks(timestamps []int64, in *time.Location) []*Tick {
	loc := in
	if loc == nil {
		var err error
		loc, err = time.LoadLocation("Local")
		if err != nil {
			loc = time.UTC
		}
	}
	ret := []*Tick{}
	if len(timestamps) < 2 {
		glog.Warning("Insufficient number of commits: %d", len(timestamps))
		return ret
	}

	begin := time.Unix(timestamps[0], 0).In(loc)
	end := time.Unix(timestamps[len(timestamps)-1], 0).In(loc)
	duration := end.Sub(begin)
	op, err := opFromHours(duration)
	if err != nil {
		glog.Errorf("Failed to calculate tickmarks for: %s %s: %s", begin, end, err)
		return ret
	}
	last := op(begin)
	ret = append(ret, &Tick{X: 0, Value: last})
	for i, t := range timestamps {
		if tickValue := op(time.Unix(t, 0).In(loc)); last != tickValue {
			last = tickValue
			ret = append(ret, &Tick{X: float64(i) - 0.5, Value: tickValue})
		}
	}

	return ret
}
Ejemplo n.º 2
0
// Start calculating and reporting statistics on the repo and tiles.
//
// We presume the git.Update(true) is called somewhere else, usually this is done
// in the trace/db.Builder, so the repo is always as good as the loaded tiles.
func Start(nanoTileStore *db.Builder, git *gitinfo.GitInfo) {
	coverage := metrics.NewRegisteredGaugeFloat64("stats.tests.bench_runs_per_changelist", metrics.DefaultRegistry)
	skpLatency := metrics.NewRegisteredTimer("stats.skp.update_latency", metrics.DefaultRegistry)
	commits := metrics.NewRegisteredGauge("stats.commits.total", metrics.DefaultRegistry)

	go func() {
		for _ = range time.Tick(2 * time.Minute) {
			tile := nanoTileStore.GetTile()
			numCommits := tile.LastCommitIndex() + 1
			numTraces := len(tile.Traces)
			total := 0
			for _, tr := range tile.Traces {
				for i := 0; i < numCommits; i++ {
					if !tr.IsMissing(i) {
						total += 1
					}
				}
			}
			cov := float64(total) / float64(numCommits*numTraces)
			glog.Info("Coverage: ", cov)
			coverage.Update(cov)

			last, err := git.LastSkpCommit()
			if err != nil {
				glog.Warning("Failed to read last SKP commit: %s", err)
				continue
			}
			skpLatency.Update(time.Since(last))
			commits.Update(int64(git.NumCommits()))
		}
	}()
}
Ejemplo n.º 3
0
// NewTraceServiceDB creates a new DB that stores the data in the BoltDB backed
// gRPC accessible traceservice.
func NewTraceServiceDB(conn *grpc.ClientConn, traceBuilder tiling.TraceBuilder) (*TsDB, error) {
	ret := &TsDB{
		conn:         conn,
		traceService: traceservice.NewTraceServiceClient(conn),
		traceBuilder: traceBuilder,
		cache:        lru.New(MAX_ID_CACHED),
		paramsCache:  map[string]map[string]string{},
		id64Cache:    map[uint64]string{},
		ctx:          context.Background(),
	}

	// This ping causes the client to try and reach the backend. If the backend
	// is down, it will keep trying until it's up.
	if err := ret.ping(); err != nil {
		return nil, err
	}

	// Liveness metric.
	go func() {
		liveness := metrics.NewLiveness("tracedb-ping")
		for _ = range time.Tick(time.Minute) {
			if ret.ping() == nil {
				liveness.Update()
			}
		}
	}()

	// Keep the caches sizes in check.
	go func() {
		for _ = range time.Tick(15 * time.Minute) {
			ret.clearMutex.Lock()
			if len(ret.paramsCache) > MAX_ID_CACHED {
				ret.paramsCache = map[string]map[string]string{}
				glog.Warning("Had to clear paramsCache, this is unexpected. MAX_ID_CACHED too small?")
			}
			if len(ret.id64Cache) > MAX_ID_CACHED {
				ret.id64Cache = map[uint64]string{}
				glog.Warning("Had to clear id64Cache, this is unexpected. MAX_ID_CACHED too small?")
			}
			ret.clearMutex.Unlock()
		}
	}()
	return ret, nil
}
Ejemplo n.º 4
0
// Runs commonly-used initialization metrics.
func Init() {
	flag.Parse()
	defer glog.Flush()
	flag.VisitAll(func(f *flag.Flag) {
		glog.Infof("Flags: --%s=%v", f.Name, f.Value)
	})

	// See skbug.com/4386 for details on why the below section exists.
	glog.Info("Initializing logserver for log level INFO.")
	glog.Warning("Initializing logserver for log level WARNING.")
	glog.Error("Initializing logserver for log level ERROR.")

	// Use all cores.
	runtime.GOMAXPROCS(runtime.NumCPU())
}
Ejemplo n.º 5
0
// GetNRGBA converts the image to an *image.NRGBA in an efficent manner.
func GetNRGBA(img image.Image) *image.NRGBA {
	switch t := img.(type) {
	case *image.NRGBA:
		return t
	case *image.RGBA:
		for i := 0; i < len(t.Pix); i += 4 {
			if t.Pix[i+3] != 0xff {
				glog.Warning("Unexpected premultiplied image!")
				return recode(img)
			}
		}
		// If every alpha is 0xff then t.Pix is already in NRGBA format, simply
		// share Pix between the RGBA and NRGBA structs.
		return &image.NRGBA{
			Pix:    t.Pix,
			Stride: t.Stride,
			Rect:   t.Rect,
		}
	default:
		// TODO(mtklein): does it make sense we're getting other types, or a DM bug?
		return recode(img)
	}
}
Ejemplo n.º 6
0
// This method looks for and returns DiffMetrics of the specified digests from the
// local diffmetrics dir. It is thread safe because it locks the diff store's
// mutex before accessing the digest cache.
func (fs *FileDiffStore) getDiffMetricsFromFileCache(baseName string) (*diff.DiffMetrics, error) {
	diffMetricsFilePath := fs.getDiffMetricPath(baseName)

	// Lock the mutex before reading from the local diff directory.
	fs.diffDirLock.Lock()
	defer fs.diffDirLock.Unlock()
	if _, err := os.Stat(diffMetricsFilePath); err != nil {
		if os.IsNotExist(err) {
			// File does not exist.
			return nil, nil
		} else {
			// There was some other error.
			glog.Warningf("Some other error: %s: %s", baseName, err)
			return nil, err
		}
	}

	diffMetrics, err := openDiffMetrics(diffMetricsFilePath)
	if err != nil {
		glog.Warning("Some error opening: %s: %s", baseName, err)
		return nil, err
	}
	return diffMetrics, nil
}
Ejemplo n.º 7
0
// runID is the unique id of the current run (typically requester + timestamp).
// targetPlatform is the platform the benchmark will run on (Android / Linux ).
// chromiumHash is the hash the checkout should be synced to. If not specified then
// Chromium's Tot hash is used.
// skiaHash is the hash the checkout should be synced to. If not specified then
// Skia's LKGR hash is used (the hash in Chromium's DEPS file).
// applyPatches if true looks for Chromium/Blink/Skia patches in the temp dir and
// runs once with the patch applied and once without the patch applied.
func CreateChromiumBuild(runID, targetPlatform, chromiumHash, skiaHash string, applyPatches bool) (string, string, error) {
	// Determine which build dir and fetch target to use.
	var chromiumBuildDir, fetchTarget string
	if targetPlatform == "Android" {
		chromiumBuildDir = filepath.Join(ChromiumBuildsDir, "android_base")
		fetchTarget = "android"
	} else if targetPlatform == "Linux" {
		chromiumBuildDir = filepath.Join(ChromiumBuildsDir, "linux_base")
		fetchTarget = "chromium"
	} else {
		return "", "", fmt.Errorf("Unrecognized target_platform %s", targetPlatform)
	}
	util.MkdirAll(chromiumBuildDir, 0700)

	// Find which Chromium commit hash should be used.
	var err error
	if chromiumHash == "" {
		chromiumHash, err = getChromiumHash()
		if err != nil {
			return "", "", fmt.Errorf("Error while finding Chromium's Hash: %s", err)
		}
	}

	// Find which Skia commit hash should be used.
	if skiaHash == "" {
		skiaHash, err = getSkiaHash()
		if err != nil {
			return "", "", fmt.Errorf("Error while finding Skia's Hash: %s", err)
		}
	}

	// Run chromium sync command using the above commit hashes.
	// Construct path to the sync_skia_in_chrome python script.
	_, currentFile, _, _ := runtime.Caller(0)
	pathToPyFiles := filepath.Join(
		filepath.Dir((filepath.Dir(filepath.Dir(currentFile)))),
		"py")
	syncArgs := []string{
		filepath.Join(pathToPyFiles, "sync_skia_in_chrome.py"),
		"--destination=" + chromiumBuildDir,
		"--fetch_target=" + fetchTarget,
		"--chrome_revision=" + chromiumHash,
		"--skia_revision=" + skiaHash,
	}
	if err := ExecuteCmd("python", syncArgs, []string{}, 2*time.Hour, nil, nil); err != nil {
		glog.Warning("There was an error. Deleting base directory and trying again.")
		util.RemoveAll(chromiumBuildDir)
		util.MkdirAll(chromiumBuildDir, 0700)
		if err := ExecuteCmd("python", syncArgs, []string{}, 2*time.Hour, nil, nil); err != nil {
			return "", "", fmt.Errorf("There was an error checking out chromium %s + skia %s: %s", chromiumHash, skiaHash, err)
		}
	}

	// Make sure we are starting from a clean slate.
	if err := resetChromiumCheckout(filepath.Join(chromiumBuildDir, "src")); err != nil {
		return "", "", fmt.Errorf("Could not reset the chromium checkout in %s: %s", chromiumBuildDir, err)
	}
	googleStorageDirName := fmt.Sprintf("%s-%s-%s", getTruncatedHash(chromiumHash), getTruncatedHash(skiaHash), runID)
	if runID == "" {
		// Do not include the runID in the dir name if it is not specified.
		googleStorageDirName = fmt.Sprintf("%s-%s", getTruncatedHash(chromiumHash), getTruncatedHash(skiaHash))
	}
	if applyPatches {
		if err := applyRepoPatches(filepath.Join(chromiumBuildDir, "src"), runID); err != nil {
			return "", "", fmt.Errorf("Could not apply patches in the chromium checkout in %s: %s", chromiumBuildDir, err)
		}
		// Add "try" prefix and "withpatch" suffix.
		googleStorageDirName = fmt.Sprintf("try-%s-withpatch", googleStorageDirName)
	}
	// Build chromium.
	if err := buildChromium(chromiumBuildDir, targetPlatform); err != nil {
		return "", "", fmt.Errorf("There was an error building chromium %s + skia %s: %s", chromiumHash, skiaHash, err)
	}

	// Upload to Google Storage.
	gs, err := NewGsUtil(nil)
	if err != nil {
		return "", "", fmt.Errorf("Could not create GS object: %s", err)
	}
	if err := uploadChromiumBuild(filepath.Join(chromiumBuildDir, "src", "out", "Release"), filepath.Join(CHROMIUM_BUILDS_DIR_NAME, googleStorageDirName), targetPlatform, gs); err != nil {
		return "", "", fmt.Errorf("There was an error uploaded the chromium build dir %s: %s", filepath.Join(chromiumBuildDir, "src", "out", "Release"), err)
	}

	// Check for the applypatch flag and reset and then build again and copy to
	// google storage.
	if applyPatches {
		// Now build chromium without the patches and upload it to Google Storage.

		// Make sure we are starting from a clean slate.
		if err := resetChromiumCheckout(filepath.Join(chromiumBuildDir, "src")); err != nil {
			return "", "", fmt.Errorf("Could not reset the chromium checkout in %s: %s", chromiumBuildDir, err)
		}
		// Build chromium.
		if err := buildChromium(chromiumBuildDir, targetPlatform); err != nil {
			return "", "", fmt.Errorf("There was an error building chromium %s + skia %s: %s", chromiumHash, skiaHash, err)
		}
		// Upload to Google Storage.
		googleStorageDirName = fmt.Sprintf("try-%s-%s-%s-nopatch", getTruncatedHash(chromiumHash), getTruncatedHash(skiaHash), runID)
		if err := uploadChromiumBuild(filepath.Join(chromiumBuildDir, "src", "out", "Release"), filepath.Join(CHROMIUM_BUILDS_DIR_NAME, googleStorageDirName), targetPlatform, gs); err != nil {
			return "", "", fmt.Errorf("There was an error uploaded the chromium build dir %s: %s", filepath.Join(chromiumBuildDir, "src", "out", "Release"), err)
		}
	}
	return getTruncatedHash(chromiumHash), getTruncatedHash(skiaHash), nil
}
Ejemplo n.º 8
0
// OAuth2CallbackHandler must be attached at a handler that matches
// the callback URL registered in the APIs Console. In this case
// "/oauth2callback".
func OAuth2CallbackHandler(w http.ResponseWriter, r *http.Request) {
	glog.Infof("OAuth2CallbackHandler\n")
	cookie, err := r.Cookie(SESSION_COOKIE_NAME)
	if err != nil || cookie.Value == "" {
		http.Error(w, "Invalid session state.", 500)
		return
	}

	state := r.FormValue("state")
	stateParts := strings.SplitN(state, ":", 3)
	redirect := "/"
	// If the state contains a redirect URL.
	if len(stateParts) == 3 {
		// state has this form:   <sessionid>:<hash(salt + original url)>:<original url>
		// See LoginURL for more details.
		state = stateParts[0]
		hash := stateParts[1]
		url := stateParts[2]
		expectedHash := fmt.Sprintf("%x", sha256.Sum256([]byte(cookieSalt+url)))
		if hash == expectedHash {
			redirect = url
		} else {
			glog.Warning("Got an invalid redirect: %s != %s", hash, expectedHash)
		}
	}
	if state != cookie.Value {
		http.Error(w, "Session state doesn't match callback state.", 500)
		return
	}

	code := r.FormValue("code")
	glog.Infof("Code: %s ", code[:5])
	transport := &oauth.Transport{
		Config: oauthConfig,
		Transport: &http.Transport{
			Dial: util.DialTimeout,
		},
	}
	token, err := transport.Exchange(code)
	if err != nil {
		glog.Errorf("Failed to authenticate: %s", err)
		http.Error(w, "Failed to authenticate.", 500)
		return
	}
	// idToken is a JSON Web Token. We only need to decode the token, we do not
	// need to validate the token because it came to us over HTTPS directly from
	// Google's servers.
	idToken := token.Extra["id_token"]
	// The id token is actually three base64 encoded parts that are "." separated.
	segments := strings.Split(idToken, ".")
	if len(segments) != 3 {
		http.Error(w, "Invalid id_token.", 500)
		return
	}
	// Now base64 decode the middle segment, which decodes to JSON.
	padding := 4 - (len(segments[1]) % 4)
	if padding == 4 {
		padding = 0
	}
	middle := segments[1] + strings.Repeat("=", padding)
	b, err := base64.URLEncoding.DecodeString(middle)
	if err != nil {
		glog.Errorf("Failed to base64 decode middle part of token: %s From: %#v", middle, segments)
		http.Error(w, "Failed to base64 decode id_token.", 500)
		return
	}
	// Finally decode the JSON.
	decoded := &decodedIDToken{}
	if err := json.Unmarshal(b, decoded); err != nil {
		glog.Errorf("Failed to JSON decode token: %s", string(b))
		http.Error(w, "Failed to JSON decode id_token.", 500)
		return
	}

	email := strings.ToLower(decoded.Email)
	parts := strings.Split(email, "@")
	if len(parts) != 2 {
		http.Error(w, "Invalid email address received.", 500)
		return
	}

	if !activeDomainWhiteList[parts[1]] && !activeEmailWhiteList[email] {
		http.Error(w, "Accounts from your domain are not allowed or your email address is not white listed.", 500)
		return
	}
	s := Session{
		Email:     email,
		AuthScope: oauthConfig.Scope,
		Token:     token,
	}
	setSkIDCookieValue(w, &s)
	http.Redirect(w, r, redirect, 302)
}