Example #1
0
func dumpCommits(store tiling.TileStore, n int) {
	tile, err := store.Get(0, -1)
	if err != nil {
		glog.Fatal("Could not read tile: " + err.Error())
	}

	tileLen := tile.LastCommitIndex() + 1
	commits := tile.Commits[:tileLen]

	if n <= 0 {
		n = tileLen
	}
	startIdx := tileLen - n

	// Keep track of empty traces.
	notEmpty := map[string]bool{}

	for i := startIdx; i < tileLen; i++ {
		count := 0
		for traceKey, v := range tile.Traces {
			if !v.IsMissing(i) {
				count++
				notEmpty[traceKey] = true
			}
		}
		commit := commits[i]

		// This works because a hash is always ascii.
		outHash := commit.Hash[:20]
		fmt.Printf("%v: %5d/%5d : %s : %s \n", time.Unix(commit.CommitTime, 0), count, len(tile.Traces), outHash, commit.Author)
	}

	fmt.Printf("Total Commits   : %d\n", tileLen)
	fmt.Printf("Non-empty traces: %d\n", len(notEmpty))
}
Example #2
0
func dumpTileToJSON(store tiling.TileStore, nCommits int, nTraces int, fname string) {
	tile, err := store.Get(0, -1)
	if err != nil {
		glog.Fatal("Could not read tile: " + err.Error())
	}

	newTile := tile
	if (nCommits > 0) || (nTraces > 0) {
		lastIdx := tile.LastCommitIndex()
		if nCommits <= 0 {
			nCommits = lastIdx + 1
		}

		if nTraces <= 0 {
			nTraces = len(tile.Traces)
		}

		commitLen := util.MinInt(nCommits, lastIdx+1)
		startCommit := lastIdx + 1 - commitLen
		newTraces := map[string]tiling.Trace{}
		for key, trace := range tile.Traces {
			for i := startCommit; i <= lastIdx; i++ {
				if !trace.IsMissing(i) {
					newTraces[key] = trace
					break
				}
			}
			if len(newTraces) >= nTraces {
				break
			}
		}

		newCommits := tile.Commits[startCommit:]
		newParamSet := map[string][]string{}
		tiling.GetParamSet(newTraces, newParamSet)

		newTile = &tiling.Tile{
			Traces:    newTraces,
			ParamSet:  newParamSet,
			Commits:   newCommits,
			Scale:     tile.Scale,
			TileIndex: tile.TileIndex,
		}
	}

	result, err := json.Marshal(newTile)
	if err != nil {
		glog.Fatalf("Could not marshal to JSON: %s", err)
	}

	err = ioutil.WriteFile(fname, result, 0644)
	if err != nil {
		glog.Fatalf("Could not write output file %s", err)
	}

	fmt.Printf("Commits included: %d\n", len(newTile.Commits))
	fmt.Printf("Traces included:  %d\n", len(newTile.Traces))
}
Example #3
0
func md5Commits(store tiling.TileStore, targetHash string, nCommits int) {
	tile, err := store.Get(0, -1)
	if err != nil {
		glog.Fatal("Could not read tile: " + err.Error())
	}

	tileLen := tile.LastCommitIndex() + 1
	commits := tile.Commits[:tileLen]

	// Find the target index.
	endIdx := -1
	for i, v := range commits {
		if strings.HasPrefix(v.Hash, targetHash) {
			endIdx = i
			break
		}
	}
	if endIdx == -1 {
		glog.Fatalf("Unable to find commit %s", targetHash)
	}

	endIdx++
	startIdx := endIdx - nCommits

	traceKeys := make([]string, 0, len(tile.Traces))
	for k, v := range tile.Traces {
		for i := startIdx; i < endIdx; i++ {
			if !v.IsMissing(i) {
				traceKeys = append(traceKeys, k)
				break
			}
		}
	}
	sort.Strings(traceKeys)

	result := make([][]string, len(traceKeys))
	for i, k := range traceKeys {
		switch trace := tile.Traces[k].(type) {
		case *gtypes.GoldenTrace:
			result[i] = trace.Values[startIdx:endIdx]
		case *types.PerfTrace:
			result[i] = asStringSlice(trace.Values[startIdx:endIdx])
		}
	}

	byteStr, err := getBytes(result)
	if err != nil {
		glog.Fatalf("Unable to serialize to bytes: %s", err.Error())
	}

	md5Hash := fmt.Sprintf("%x", md5.Sum(byteStr))

	fmt.Printf("Commit Range    : %s - %s\n", commits[startIdx].Hash, commits[endIdx-1].Hash)
	fmt.Printf("Hash            : %s\n", md5Hash)
	fmt.Printf("Total     traces: %d\n", len(tile.Traces))
	fmt.Printf("Non-empty traces: %d\n", len(traceKeys))
}
Example #4
0
// ValidateDataset validates all the tiles stored in a TileStore.
func ValidateDataset(store tiling.TileStore, verbose, echoHashes bool) bool {
	index := -1
	isValid := true
	// If tilebuilding were instantaneous this might cause a false negative, but it's not.
	oldestTS := time.Now().Unix()

	for {
		tile, err := store.Get(0, index)
		if err != nil {
			fmt.Printf("Failed to Get(0, %d): %s\n", index, err)
			isValid = false
			break
		}
		if verbose {
			fmt.Println("TileIndex:", tile.TileIndex)
			fmt.Println("Tile range:", tile.Commits[0].CommitTime, tile.Commits[len(tile.Commits)-1].CommitTime)
			fmt.Println("Tile range:", time.Unix(tile.Commits[0].CommitTime, 0), time.Unix(tile.Commits[len(tile.Commits)-1].CommitTime, 0))
		}
		// Validate the git hashes in the tile.
		err = validateTile(tile, oldestTS, verbose, echoHashes)
		oldestTS = tile.Commits[0].CommitTime
		if err != nil {
			fmt.Printf("Failed to validate tile %d scale 0: %s\n", index, err)
			isValid = false
			break
		}
		if index > 0 && index != tile.TileIndex {
			fmt.Printf("Tile index inconsistent: index %d != tile.TileIndex %d\n", index, tile.TileIndex)
			isValid = false
			break
		}
		if tile.Scale != 0 {
			fmt.Printf("Tile scale isn't 0: tile.Scale %d\n", tile.Scale)
			isValid = false
			break
		}
		if tile.TileIndex > 0 {
			index = tile.TileIndex - 1
		} else {
			break
		}
	}

	return isValid
}
Example #5
0
// Start calculating and reporting statistics on the repo and tiles.
//
// We presume the git.Update(true) is called somewhere else, usually this is done
// in the ingester, so the repo is always as good as the ingested tiles.
func Start(tileStore tiling.TileStore, git *gitinfo.GitInfo) {
	coverage := metrics.NewRegisteredGaugeFloat64("stats.tests.bench_runs_per_changelist", metrics.DefaultRegistry)
	skpLatency := metrics.NewRegisteredTimer("stats.skp.update_latency", metrics.DefaultRegistry)
	commits := metrics.NewRegisteredGauge("stats.commits.total", metrics.DefaultRegistry)

	go func() {
		for _ = range time.Tick(2 * time.Minute) {
			tile, err := tileStore.Get(0, -1)
			if err != nil {
				glog.Warning("Failed to get tile: %s", err)
				continue
			}
			numCommits := tile.LastCommitIndex() + 1
			numTraces := len(tile.Traces)
			total := 0
			for _, tr := range tile.Traces {
				for i := 0; i < numCommits; i++ {
					if !tr.IsMissing(i) {
						total += 1
					}
				}
			}
			cov := float64(total) / float64(numCommits*numTraces)
			glog.Info("Coverage: ", cov)
			coverage.Update(cov)

			last, err := git.LastSkpCommit()
			if err != nil {
				glog.Warning("Failed to read last SKP commit: %s", err)
				continue
			}
			skpLatency.Update(time.Since(last))
			commits.Update(int64(git.NumCommits()))
		}
	}()
}
Example #6
0
// singleStep does a single round of alerting.
func singleStep(tileStore tiling.TileStore, issueTracker issues.IssueTracker) {
	latencyBegin := time.Now()
	tile, err := tileStore.Get(0, -1)
	if err != nil {
		glog.Errorf("Alerting: Failed to get tile: %s", err)
		return
	}

	tile, err = trimTile(tile)
	if err != nil {
		glog.Errorf("Alerting: Failed to Trim tile down to size: %s", err)
		return
	}

	summary, err := clustering.CalculateClusterSummaries(tile, CLUSTER_SIZE, CLUSTER_STDDEV, skpOnly)
	if err != nil {
		glog.Errorf("Alerting: Failed to calculate clusters: %s", err)
		return
	}
	fresh := []*types.ClusterSummary{}
	for _, c := range summary.Clusters {
		if math.Abs(c.StepFit.Regression) > clustering.INTERESTING_THRESHHOLD {
			fresh = append(fresh, c)
		}
	}
	old, err := ListFrom(tile.Commits[0].CommitTime)
	if err != nil {
		glog.Errorf("Alerting: Failed to get existing clusters: %s", err)
		return
	}
	glog.Infof("Found %d old", len(old))
	glog.Infof("Found %d fresh", len(fresh))
	updated := CombineClusters(fresh, old)
	for _, c := range updated {
		if c.Status == "" {
			c.Status = "New"
		}
		if err := Write(c); err != nil {
			glog.Errorf("Alerting: Failed to write updated cluster: %s", err)
		}
	}

	current, err := ListFrom(tile.Commits[0].CommitTime)
	if err != nil {
		glog.Errorf("Alerting: Failed to get existing clusters: %s", err)
		return
	}
	count := 0
	for _, c := range current {
		if c.Status == "New" {
			count++
		}
		if issueTracker != nil {
			if err := updateBugs(c, issueTracker); err != nil {
				glog.Errorf("Error retrieving bugs: %s", err)
				return
			}
		} else {
			glog.Infof("Skipping ClusterSummary.Bugs update because apiKey is missing.")
			return
		}
	}
	newClustersGauge.Update(int64(count))
	runsCounter.Inc(1)
	alertingLatency.UpdateSince(latencyBegin)
}