示例#1
0
// NewDocSetForIssue creates a new DocSet patched to the latest patch level of
// the given issue.
//
// The returned DocSet is not periodically refreshed.
func NewDocSetForIssue(workDir, repo string, issue int64) (*DocSet, error) {
	// Only do pull and patch if directory doesn't exist.
	issueInfo, err := rc.Issue(issue)
	if err != nil {
		return nil, fmt.Errorf("Failed to load issue info: %s", err)
	}
	patchset := issueInfo.Patchsets[len(issueInfo.Patchsets)-1]
	addr, err := mail.ParseAddress(issueInfo.OwnerEmail)
	if err != nil {
		return nil, fmt.Errorf("CL contains invalid author email: %s", err)
	}
	domain := strings.Split(addr.Address, "@")[1]
	if !util.In(domain, config.WHITELIST) {
		return nil, fmt.Errorf("User is not authorized to test docset CLs.")
	}
	var d *DocSet
	repoDir := filepath.Join(workDir, "patches", fmt.Sprintf("%d-%d", issue, patchset))
	if _, err := os.Stat(repoDir); os.IsNotExist(err) {
		d, err = newDocSet(repoDir, repo, issue, patchset, false)
		if err != nil {
			if err := os.RemoveAll(repoDir); err != nil {
				glog.Errorf("Failed to remove %q: %s", repoDir, err)
			}
			return nil, fmt.Errorf("Failed to create new doc set: %s", err)
		}
	} else {
		d = &DocSet{
			repoDir: repoDir,
		}
		d.BuildNavigation()
	}
	return d, nil
}
// addResultToTile adds the Digests from the DMResults to the tile at the given offset.
func addResultToTile(res *DMResults, tile *tiling.Tile, offset int, counter metrics.Counter) {
	res.ForEach(func(traceID, digest string, params map[string]string) {
		if ext, ok := params["ext"]; !ok || ext != "png" {
			return // Skip non-PNG results they are be converted to PNG by a separate process.
		}

		var trace *types.GoldenTrace
		var ok bool
		needsUpdate := false
		if tr, ok := tile.Traces[traceID]; !ok {
			trace = types.NewGoldenTrace()
			tile.Traces[traceID] = trace
			needsUpdate = true
		} else {
			trace = tr.(*types.GoldenTrace)
			if !util.MapsEqual(params, tile.Traces[traceID].Params()) {
				needsUpdate = true
			}
		}
		trace.Params_ = params

		if needsUpdate {
			// Update the Tile's ParamSet with any new keys or values we see.
			for k, v := range params {
				if _, ok = tile.ParamSet[k]; !ok {
					tile.ParamSet[k] = []string{v}
				} else if !util.In(v, tile.ParamSet[k]) {
					tile.ParamSet[k] = append(tile.ParamSet[k], v)
				}
			}
		}
		trace.Values[offset] = digest
		counter.Inc(1)
	})
}
示例#3
0
// Merge adds in new info from the passed in ClusterSummary.
func (c *ClusterSummary) Merge(from *ClusterSummary) {
	for _, k := range from.Keys {
		if !util.In(k, c.Keys) {
			c.Keys = append(c.Keys, k)
		}
	}
}
示例#4
0
// Get returns the ShortCommit info for the given branch, target, and buildID.
func (i *info) Get(branch, target, buildID string) (*gitinfo.ShortCommit, error) {
	// Get the list of targets and confirm that this target is in it, otherwise add it to the list of targets.
	branchtargets := i.branchtargets()
	branchtarget := fmt.Sprintf("%s:%s", branch, target)
	if !util.In(branchtarget, branchtargets) {
		// If we aren't currently scanning results for this (branch, target) pair
		// then add it to the list.
		branchtargets = append(branchtargets, branchtarget)
		err := i.db.Put([]byte(TARGETS_KEY), []byte(strings.Join(branchtargets, " ")), nil)
		if err != nil {
			glog.Errorf("Failed to add new target %s: %s", branchtarget, err)
		}
		// Always try to fetch the information from the Android Build API directly if
		// we don't have it yet.
		return i.single_get(branch, target, buildID)
	} else {
		key, err := toKey(branch, target, buildID)
		if err != nil {
			return nil, fmt.Errorf("Can't Get with an invalid build ID %q: %s", buildID, err)
		}
		// Scan backwards through the build info until we find a buildID that is equal to or
		// comes before the buildID we are looking for.
		iter := i.db.NewIterator(lutil.BytesPrefix([]byte(toPrefix(branch, target))), nil)
		defer iter.Release()
		if found := iter.Seek([]byte(key)); found {
			value := &gitinfo.ShortCommit{}
			if err := json.Unmarshal(iter.Value(), value); err != nil {
				return nil, fmt.Errorf("Unable to deserialize value: %s", err)
			}
			return value, nil
		} else {
			return i.single_get(branch, target, buildID)
		}
	}
}
示例#5
0
// CombineClusters combines freshly found clusters with existing clusters.
//
//  Algorithm:
//    Run clustering and pick out the "Interesting" clusters.
//    Compare all the Interesting clusters to all the existing relevant clusters,
//      where "relevant" clusters are ones whose Hash/timestamp of the step
//      exists in the current tile.
//    Start with an empty "list".
//    For each cluster:
//      For each relevant existing cluster:
//        Take the top 20 keys from the existing cluster and count how many appear
//        in the cluster.
//      If there are no matches then this is a new cluster, add it to the "list".
//      If there are matches, possibly to multiple existing clusters, find the
//      existing cluster with the most matches.
//        Take the cluster (old/new) with the most members, or the best fit if
//        they have the same number of matches.
//    Return all the updated clusters.
func CombineClusters(freshSummaries, oldSummaries []*types.ClusterSummary) []*types.ClusterSummary {
	ret := []*types.ClusterSummary{}

	stillFresh := []*types.ClusterSummary{}
	// If two cluster summaries have the same hash and same Regression direction
	// then they are the same, merge them together.
	for _, fresh := range freshSummaries {
		for _, old := range oldSummaries {
			if fresh.Hash == old.Hash && math.Signbit(fresh.StepFit.Regression) == math.Signbit(old.StepFit.Regression) {
				old.Merge(fresh)
				ret = append(ret, old)
				break
			}
		}
		stillFresh = append(stillFresh, fresh)
	}

	// Even if a summary has a different hash it might still be the same event if
	// there is an overlap in the traces each summary contains.
	for _, fresh := range stillFresh {
		var bestMatch *types.ClusterSummary = nil
		bestMatchHits := 0
		for _, old := range oldSummaries {
			hits := 0
			for _, key := range util.AtMost(old.Keys, 20) {
				if util.In(key, fresh.Keys) {
					hits += 1
				}
			}
			if hits > bestMatchHits {
				bestMatchHits = hits
				bestMatch = old
			}
		}
		if bestMatch != nil {
			keysLengthEqual := len(fresh.Keys) == len(bestMatch.Keys)
			regressionInSameDirection := math.Signbit(fresh.StepFit.Regression) == math.Signbit(bestMatch.StepFit.Regression)
			freshHasBetterFit := math.Abs(fresh.StepFit.Regression) > math.Abs(bestMatch.StepFit.Regression)
			freshHasMoreKeys := len(fresh.Keys) > len(bestMatch.Keys)
			if freshHasMoreKeys || (keysLengthEqual && regressionInSameDirection && freshHasBetterFit) {
				fresh.Status = bestMatch.Status
				fresh.Message = bestMatch.Message
				fresh.ID = bestMatch.ID
				fresh.Bugs = bestMatch.Bugs
				ret = append(ret, fresh)
				// Find the bestMatch in oldSummaries and replace it with fresh.
				for i, oldBest := range oldSummaries {
					if oldBest == bestMatch {
						oldSummaries[i] = fresh
						break
					}
				}
			}
		} else {
			ret = append(ret, fresh)
		}
	}
	return ret
}
示例#6
0
// Matches returns true if the given Trace matches the given query.
func Matches(tr Trace, query url.Values) bool {
	for k, values := range query {
		if _, ok := tr.Params()[k]; !ok || !util.In(tr.Params()[k], values) {
			return false
		}
	}
	return true
}
示例#7
0
// makeRollResult determines what the result of a roll should be, given that
// it is going to be closed.
func (r *AutoRoller) makeRollResult(roll *autoroll.AutoRollIssue) string {
	if util.In(roll.Result, autoroll.DRY_RUN_RESULTS) {
		if roll.Result == autoroll.ROLL_RESULT_DRY_RUN_IN_PROGRESS {
			return autoroll.ROLL_RESULT_DRY_RUN_FAILURE
		} else {
			return roll.Result
		}
	}
	return autoroll.ROLL_RESULT_FAILURE
}
示例#8
0
// scoreBuild returns the current score for the given commit/builder pair. The
// details on how scoring works are described in the doc for NewBuildQueue.
func scoreBuild(commit *vcsinfo.LongCommit, build *buildbot.Build, now time.Time, timeLambda float64) float64 {
	s := -1.0
	if build != nil {
		if build.GotRevision == commit.Hash {
			s = 1.0
		} else if util.In(commit.Hash, build.Commits) {
			s = 1.0 / float64(len(build.Commits))
		}
	}
	return s * timeFactor(now, commit.Timestamp, timeLambda)
}
示例#9
0
// Finds the paramSet for the given slice of traces.
func GetParamSet(traces map[string]Trace, paramSet map[string][]string) {
	for _, trace := range traces {
		for k, v := range trace.Params() {
			if _, ok := paramSet[k]; !ok {
				paramSet[k] = []string{v}
			} else if !util.In(v, paramSet[k]) {
				paramSet[k] = append(paramSet[k], v)
			}
		}
	}
}
示例#10
0
// differences returns all strings that appear in server but not local.
func differences(server, local []string) ([]string, []string) {
	newPackages := []string{}
	installedPackages := []string{}
	for _, s := range server {
		if util.In(s, local) {
			installedPackages = append(installedPackages, s)
		} else {
			newPackages = append(newPackages, s)
		}
	}
	return newPackages, installedPackages
}
示例#11
0
// setStatus sets the current reporting status of the bot.
func (r *AutoRoller) setStatus(s string, lastError error) error {
	r.mtx.Lock()
	defer r.mtx.Unlock()
	if !util.In(string(s), VALID_STATUSES) {
		return fmt.Errorf("Invalid status: %s", s)
	}
	if s == STATUS_ERROR {
		if lastError == nil {
			return fmt.Errorf("Cannot set error status without an error!")
		}
	} else if lastError != nil {
		return fmt.Errorf("Cannot be in any status other than error when an error occurred.")
	}
	r.status = s
	r.lastError = lastError
	return nil
}
示例#12
0
// polyListTestsHandler returns a JSON list with high level information about
// each test.
//
// Takes two query parameters:
//  include - True if ignored digests should be included. (true, false)
//  query   - A query to restrict the responses to, encoded as a URL encoded paramset.
//  head    - True if only digest that appear at head should be included.
//
// The return format looks like:
//
//  [
//    {
//      "name": "01-original",
//      "diameter": 123242,
//      "untriaged": 2,
//      "num": 2
//    },
//    ...
//  ]
//
func polyListTestsHandler(w http.ResponseWriter, r *http.Request) {
	if err := r.ParseForm(); err != nil {
		util.ReportError(w, r, err, "Failed to parse form data.")
		return
	}
	// If the query only includes source_type parameters, and include==false, then we can just
	// filter the response from summaries.Get(). If the query is broader than that, or
	// include==true, then we need to call summaries.CalcSummaries().
	if err := r.ParseForm(); err != nil {
		util.ReportError(w, r, err, "Invalid request.")
		return
	}
	q, err := url.ParseQuery(r.FormValue("query"))
	if err != nil {
		util.ReportError(w, r, err, "Invalid query in request.")
		return
	}
	_, hasSourceType := q["source_type"]
	sumSlice := []*summary.Summary{}
	if r.FormValue("include") == "false" && r.FormValue("head") == "true" && len(q) == 1 && hasSourceType {
		sumMap := summaries.Get()
		corpus := q["source_type"]
		for _, s := range sumMap {
			if util.In(s.Corpus, corpus) {
				sumSlice = append(sumSlice, s)
			}
		}
	} else {
		glog.Infof("%q %q %q", r.FormValue("query"), r.FormValue("include"), r.FormValue("head"))
		sumMap, err := summaries.CalcSummaries(nil, r.FormValue("query"), r.FormValue("include") == "true", r.FormValue("head") == "true")
		if err != nil {
			util.ReportError(w, r, err, "Failed to calculate summaries.")
			return
		}
		for _, s := range sumMap {
			sumSlice = append(sumSlice, s)
		}
	}
	sort.Sort(SummarySlice(sumSlice))
	w.Header().Set("Content-Type", "application/json")
	enc := json.NewEncoder(w)
	if err := enc.Encode(sumSlice); err != nil {
		glog.Errorf("Failed to write or encode result: %s", err)
	}
}
示例#13
0
// Validate returns an error iff there is some problem with the issue.
func (i *AutoRollIssue) Validate() error {
	if i.Closed {
		if i.Result == ROLL_RESULT_IN_PROGRESS {
			return fmt.Errorf("AutoRollIssue cannot have a Result of %q if it is Closed.", ROLL_RESULT_IN_PROGRESS)
		}
		if i.CommitQueue {
			return errors.New("AutoRollIssue cannot be marked CommitQueue if it is Closed.")
		}
	} else {
		if i.Committed {
			return errors.New("AutoRollIssue cannot be Committed without being Closed.")
		}
		if !util.In(i.Result, OPEN_ROLL_VALID_RESULTS) {
			return fmt.Errorf("AutoRollIssue which is not Closed must have as a Result one of: %v", OPEN_ROLL_VALID_RESULTS)
		}
	}
	return nil
}
示例#14
0
// changeHandler changes the status of a service.
//
// Takes the following query parameters:
//
//   name - The name of the service.
//   action - The action to perform. One of ["start", "stop", "restart"].
//
// The response is of the form:
//
//   {
//     "result": "started"
//   }
//
func changeHandler(w http.ResponseWriter, r *http.Request) {
	var err error

	if err := r.ParseForm(); err != nil {
		util.ReportError(w, r, err, "Failed to parse form.")
		return
	}
	action := r.Form.Get("action")
	if !util.In(action, ACTIONS) {
		util.ReportError(w, r, fmt.Errorf("Not a valid action: %s", action), "Invalid action.")
		return
	}
	name := r.Form.Get("name")
	if name == "" {
		util.ReportError(w, r, fmt.Errorf("Not a valid service name: %s", name), "Invalid service name.")
		return
	}
	if *local {
		w.Header().Set("Content-Type", "application/json")
		if err := json.NewEncoder(w).Encode(ChangeResult{"started"}); err != nil {
			glog.Errorf("Failed to write or encode output: %s", err)
		}
		return
	}
	ch := make(chan string)
	switch action {
	case "start":
		_, err = dbc.StartUnit(name, "replace", ch)
	case "stop":
		_, err = dbc.StopUnit(name, "replace", ch)
	case "restart":
		_, err = dbc.RestartUnit(name, "replace", ch)
	}
	if err != nil {
		util.ReportError(w, r, err, "Action failed.")
		return
	}
	res := ChangeResult{}
	res.Result = <-ch
	w.Header().Set("Content-Type", "application/json")
	if err := json.NewEncoder(w).Encode(res); err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
	}
}
示例#15
0
// Add inserts a new ModeChange.
func (mh *ModeHistory) Add(m, user, message string) error {
	if !util.In(m, VALID_MODES) {
		return fmt.Errorf("Invalid mode: %s", m)
	}

	modeChange := &ModeChange{
		Message: message,
		Mode:    m,
		Time:    time.Now(),
		User:    user,
	}

	mh.mtx.Lock()
	defer mh.mtx.Unlock()
	if err := mh.db.SetMode(modeChange); err != nil {
		return err
	}
	return mh.refreshHistory()
}
示例#16
0
// SkpCommits returns the indices for all the commits that contain SKP updates.
func (g *GitInfo) SkpCommits(tile *tiling.Tile) ([]int, error) {
	// Executes a git log command that looks like:
	//
	//   git log --format=format:%H  32956400b4d8f33394e2cdef9b66e8369ba2a0f3..e7416bfc9858bde8fc6eb5f3bfc942bc3350953a SKP_VERSION
	//
	// The output should be a \n separated list of hashes that match.
	first, last := tile.CommitRange()
	output, err := exec.RunCwd(g.dir, "git", "log", "--format=format:%H", first+".."+last, "SKP_VERSION")
	if err != nil {
		return nil, fmt.Errorf("SkpCommits: Failed to find git log of SKP_VERSION: %s", err)
	}
	hashes := strings.Split(output, "\n")

	ret := []int{}
	for i, c := range tile.Commits {
		if c.CommitTime != 0 && util.In(c.Hash, hashes) {
			ret = append(ret, i)
		}
	}
	return ret, nil
}
示例#17
0
// updateCurrentRoll retrieves updated information about the current DEPS roll.
func (r *AutoRoller) updateCurrentRoll() error {
	r.mtx.Lock()
	defer r.mtx.Unlock()

	currentRoll := r.recent.CurrentRoll()
	if currentRoll == nil {
		return nil
	}
	currentResult := currentRoll.Result

	updated, err := r.retrieveRoll(currentRoll.Issue)
	if err != nil {
		return err
	}

	// We have to rely on data we store for the dry run case.
	if !updated.Closed && util.In(currentResult, autoroll.DRY_RUN_RESULTS) {
		updated.Result = currentResult
	}

	// If the current roll succeeded, we need to make sure we update the
	// repo so that we see the roll commit. This can take some time, so
	// we have to repeatedly update until we see the commit.
	if updated.Committed {
		glog.Infof("Roll succeeded (%d); syncing the repo until it lands.", currentRoll.Issue)
		for {
			glog.Info("Syncing...")
			if err := r.rm.ForceUpdate(); err != nil {
				return err
			}
			if r.rm.RolledPast(currentRoll.RollingTo) {
				break
			}
			time.Sleep(10 * time.Second)
		}
	}
	return r.recent.Update(updated)
}
示例#18
0
// addBenchDataToTile adds BenchData to a Tile.
//
// See the description at the top of this file for how the mapping works.
func addBenchDataToTile(benchData *BenchData, tile *tiling.Tile, offset int, counter metrics.Counter) {

	// cb is the anonymous closure we'll pass over all the trace values found in benchData.
	cb := func(key string, value float64, params map[string]string) {
		needsUpdate := false
		var trace *types.PerfTrace
		if tr, ok := tile.Traces[key]; !ok {
			trace = types.NewPerfTrace()
			tile.Traces[key] = trace
			needsUpdate = true
		} else {
			trace = tr.(*types.PerfTrace)
			if !util.MapsEqual(params, tile.Traces[key].Params()) {
				needsUpdate = true
			}
		}
		trace.Params_ = params
		trace.Values[offset] = value
		counter.Inc(1)

		if needsUpdate {
			// Update the Tile's ParamSet with any new keys or values we see.
			//
			// TODO(jcgregorio) Maybe defer this until we are about to Put the Tile
			// back to disk and rebuild ParamSet from scratch over all the Traces.
			for k, v := range params {
				if _, ok := tile.ParamSet[k]; !ok {
					tile.ParamSet[k] = []string{v}
				} else if !util.In(v, tile.ParamSet[k]) {
					tile.ParamSet[k] = append(tile.ParamSet[k], v)
				}
			}
		}
	}

	benchData.ForEach(cb)
}
示例#19
0
// updateRepo syncs the given repo and returns a set of BuildCandidates for it.
func (q *BuildQueue) updateRepo(repoUrl string, repo *gitinfo.GitInfo, now time.Time) (map[string][]*BuildCandidate, error) {
	from := now.Add(-q.period)
	if q.period == PERIOD_FOREVER {
		from = time.Unix(0, 0)
	}
	recentCommits := repo.From(from)
	commitDetails := map[string]*gitinfo.LongCommit{}
	for _, c := range recentCommits {
		details, err := repo.Details(c)
		if err != nil {
			return nil, err
		}
		commitDetails[c] = details
	}

	// Get all builds associated with the recent commits.
	buildsByCommit, err := buildbot.GetBuildsForCommits(recentCommits, map[int]bool{})
	if err != nil {
		return nil, err
	}

	// Find the sets of all bots and masters, organize builds by
	// commit/builder and builder/number.
	masters := map[string]string{}
	builds := map[string]map[string]*buildbot.Build{}
	buildsByBuilderAndNum := map[string]map[int]*buildbot.Build{}
	for commit, buildsForCommit := range buildsByCommit {
		builds[commit] = map[string]*buildbot.Build{}
		for _, build := range buildsForCommit {
			if !util.In(build.Builder, q.botWhitelist) {
				continue
			}
			masters[build.Builder] = build.Master
			builds[commit][build.Builder] = build
			if _, ok := buildsByBuilderAndNum[build.Builder]; !ok {
				buildsByBuilderAndNum[build.Builder] = map[int]*buildbot.Build{}
			}
			buildsByBuilderAndNum[build.Builder][build.Number] = build
		}
	}
	allBots := make([]string, 0, len(masters))
	for builder, _ := range masters {
		allBots = append(allBots, builder)
	}

	// Find the current scores for each commit/builder pair.
	currentScores := map[string]map[string]float64{}
	for _, commit := range recentCommits {
		myBuilds, ok := builds[commit]
		if !ok {
			myBuilds = map[string]*buildbot.Build{}
		}
		currentScores[commit] = map[string]float64{}
		for _, builder := range allBots {
			currentScores[commit][builder] = scoreBuild(commitDetails[commit], myBuilds[builder], now, q.timeLambda)
		}
	}

	// For each commit/builder pair, determine the score increase obtained
	// by running a build at that commit.
	scoreIncrease := map[string]map[string]float64{}
	for _, commit := range recentCommits {
		scoreIncrease[commit] = map[string]float64{}
		for _, builder := range allBots {
			// Shortcut: Don't bisect builds with a huge number
			// of commits.  This saves lots of time and only affects
			// the first successful build for a bot.
			if _, ok := builds[commit][builder]; ok {
				if len(builds[commit][builder].Commits) > NO_BISECT_COMMIT_LIMIT {
					glog.Warningf("Skipping %s on %s; previous build has too many commits.", commit[0:7], builder)
					scoreIncrease[commit][builder] = 0.0
					continue
				}
			}

			newScores := map[string]float64{}
			// Pretend to create a new Build at the given commit.
			newBuild := buildbot.Build{
				Builder:     builder,
				Master:      masters[builder],
				Number:      math.MaxInt32,
				GotRevision: commit,
				Repository:  repoUrl,
			}
			commits, stealFrom, stolen, err := buildbot.FindCommitsForBuild(&newBuild, q.repos)
			if err != nil {
				return nil, err
			}
			// Re-score all commits in the new build.
			newBuild.Commits = commits
			for _, c := range commits {
				if _, ok := currentScores[c]; !ok {
					// If this build has commits which are outside of our window,
					// insert them into currentScores to account for them.
					score := scoreBuild(commitDetails[commit], builds[commit][builder], now, q.timeLambda)
					currentScores[c] = map[string]float64{
						builder: score,
					}
				}
				if _, ok := commitDetails[c]; !ok {
					d, err := repo.Details(c)
					if err != nil {
						return nil, err
					}
					commitDetails[c] = d
				}
				newScores[c] = scoreBuild(commitDetails[c], &newBuild, now, q.timeLambda)
			}
			// If the new build includes commits previously included in
			// another build, update scores for commits in the build we stole
			// them from.
			if stealFrom != -1 {
				stoleFromOrig, ok := buildsByBuilderAndNum[builder][stealFrom]
				if !ok {
					// The build may not be cached. Fall back on getting it from the DB.
					stoleFromOrig, err = buildbot.GetBuildFromDB(builder, masters[builder], stealFrom)
					if err != nil {
						return nil, err
					}
					buildsByBuilderAndNum[builder][stealFrom] = stoleFromOrig
				}
				// "copy" the build so that we can assign new commits to it
				// without modifying the cached build.
				stoleFromBuild := *stoleFromOrig
				newCommits := []string{}
				for _, c := range stoleFromBuild.Commits {
					if !util.In(c, stolen) {
						newCommits = append(newCommits, c)
					}
				}
				stoleFromBuild.Commits = newCommits
				for _, c := range stoleFromBuild.Commits {
					newScores[c] = scoreBuild(commitDetails[c], &stoleFromBuild, now, q.timeLambda)
				}
			}
			// Sum the old and new scores.
			// First, sort the old and new scores to help with numerical stability.
			oldScoresList := make([]float64, 0, len(newScores))
			newScoresList := make([]float64, 0, len(newScores))
			for c, score := range newScores {
				oldScoresList = append(oldScoresList, currentScores[c][builder])
				newScoresList = append(newScoresList, score)
			}
			sort.Sort(sort.Float64Slice(oldScoresList))
			sort.Sort(sort.Float64Slice(newScoresList))
			oldTotal := 0.0
			newTotal := 0.0
			for i, _ := range oldScoresList {
				oldTotal += oldScoresList[i]
				newTotal += newScoresList[i]
			}
			scoreIncrease[commit][builder] = newTotal - oldTotal
		}
	}

	// Arrange the score increases by builder.
	candidates := map[string][]*BuildCandidate{}
	for commit, builders := range scoreIncrease {
		for builder, scoreIncrease := range builders {
			if _, ok := candidates[builder]; !ok {
				candidates[builder] = []*BuildCandidate{}
			}
			// Don't schedule builds below the given threshold.
			if scoreIncrease > q.scoreThreshold {
				candidates[builder] = append(candidates[builder], &BuildCandidate{
					Author:  commitDetails[commit].Author,
					Builder: builder,
					Commit:  commit,
					Repo:    repoUrl,
					Score:   scoreIncrease,
				})
			}
		}
	}

	return candidates, nil
}
示例#20
0
// isFinished returns true if the Build has finished running.
func isFinished(b *androidbuildinternal.Build) bool {
	return util.In(b.BuildAttemptStatus, terminal_build_status)
}
示例#21
0
func TestAdd(t *testing.T) {
	ts, cleanup := setupClientServerForTesting(t.Fatalf)
	defer cleanup()

	now := time.Unix(100, 0)

	commitIDs := []*CommitID{
		&CommitID{
			Timestamp: now.Unix(),
			ID:        "abc123",
			Source:    "master",
		},
		&CommitID{
			Timestamp: now.Add(time.Hour).Unix(),
			ID:        "xyz789",
			Source:    "master",
		},
	}

	entries := map[string]*Entry{
		"key:8888:android": &Entry{
			Params: map[string]string{
				"config":   "8888",
				"platform": "android",
				"type":     "skp",
			},
			Value: types.BytesFromFloat64(0.01),
		},
		"key:gpu:win8": &Entry{
			Params: map[string]string{
				"config":   "gpu",
				"platform": "win8",
				"type":     "skp",
			},
			Value: types.BytesFromFloat64(1.234),
		},
	}

	err := ts.Add(commitIDs[0], entries)

	assert.NoError(t, err)
	tile, hashes, err := ts.TileFromCommits(commitIDs)
	assert.NoError(t, err)
	assert.Equal(t, 2, len(tile.Traces))
	assert.Equal(t, 2, len(tile.Commits))
	assert.Equal(t, 2, len(hashes))
	assert.True(t, util.In("d41d8cd98f00b204e9800998ecf8427e", hashes))
	assert.NotEqual(t, hashes[0], hashes[1])

	hashes, err = ts.ListMD5(commitIDs)
	assert.NoError(t, err)
	assert.Equal(t, 2, len(hashes))
	assert.True(t, util.In("d41d8cd98f00b204e9800998ecf8427e", hashes))
	assert.NotEqual(t, hashes[0], hashes[1])

	tr := tile.Traces["key:8888:android"].(*types.PerfTrace)
	assert.Equal(t, 0.01, tr.Values[0])
	assert.True(t, tr.IsMissing(1))
	assert.Equal(t, "8888", tr.Params()["config"])

	tr = tile.Traces["key:gpu:win8"].(*types.PerfTrace)
	assert.Equal(t, 1.234, tr.Values[0])
	assert.True(t, tr.IsMissing(1))

	assert.Equal(t, "abc123", tile.Commits[0].Hash)
	assert.Equal(t, "xyz789", tile.Commits[1].Hash)

	foundCommits, err := ts.List(now, now.Add(time.Hour))
	assert.NoError(t, err)
	assert.Equal(t, 1, len(foundCommits))
}
示例#22
0
// CalcSummaries returns a Summary for each test that matches the given input filters.
//
// testNames
//   If not nil or empty then restrict the results to only tests that appear in this slice.
// query
//   URL encoded paramset to use for filtering.
// includeIgnores
//   Boolean, if true then include all digests in the results, including ones normally hidden
//   by the ignores list.
// head
//   Only consider digests at head if true.
//
func (s *Summaries) CalcSummaries(testNames []string, query string, includeIgnores bool, head bool) (map[string]*Summary, error) {
	defer timer.New("CalcSummaries").Stop()
	glog.Infof("CalcSummaries: includeIgnores %v head %v", includeIgnores, head)

	t := timer.New("CalcSummaries:GetLastTileTrimmed")
	tile, err := s.storages.GetLastTileTrimmed(includeIgnores)
	t.Stop()
	if err != nil {
		return nil, fmt.Errorf("Couldn't retrieve tile: %s", err)
	}
	q, err := url.ParseQuery(query)
	if err != nil {
		return nil, fmt.Errorf("Failed to parse Query in CalcSummaries: %s", err)
	}

	ret := map[string]*Summary{}

	t = timer.New("CalcSummaries:Expectations")
	e, err := s.storages.ExpectationsStore.Get()
	t.Stop()
	if err != nil {
		return nil, fmt.Errorf("Couldn't get expectations: %s", err)
	}

	// Filter down to just the traces we are interested in, based on query.
	filtered := map[string][]*TraceID{}
	t = timer.New("Filter Traces")
	for id, tr := range tile.Traces {
		name := tr.Params()[types.PRIMARY_KEY_FIELD]
		if len(testNames) > 0 && !util.In(name, testNames) {
			continue
		}
		if tiling.Matches(tr, q) {
			if slice, ok := filtered[name]; ok {
				filtered[name] = append(slice, &TraceID{tr: tr, id: id})
			} else {
				filtered[name] = []*TraceID{&TraceID{tr: tr, id: id}}
			}
		}
	}
	t.Stop()

	traceTally := s.tallies.ByTrace()

	// Now create summaries for each test using the filtered set of traces.
	t = timer.New("Tally up the filtered traces")
	lastCommitIndex := tile.LastCommitIndex()
	for name, traces := range filtered {
		digests := map[string]bool{}
		corpus := ""
		for _, trid := range traces {
			corpus = trid.tr.Params()["source_type"]
			if head {
				// Find the last non-missing value in the trace.
				for i := lastCommitIndex; i >= 0; i-- {
					if trid.tr.IsMissing(i) {
						continue
					} else {
						digests[trid.tr.(*types.GoldenTrace).Values[i]] = true
						break
					}
				}
			} else {
				// Use the traceTally if available, otherwise just inspect the trace.
				if t, ok := traceTally[trid.id]; ok {
					for k, _ := range t {
						digests[k] = true
					}
				} else {
					for i := lastCommitIndex; i >= 0; i-- {
						if !trid.tr.IsMissing(i) {
							digests[trid.tr.(*types.GoldenTrace).Values[i]] = true
						}
					}
				}
			}
		}
		ret[name] = s.makeSummary(name, e, s.storages.DiffStore, corpus, util.KeysOfStringSet(digests))
	}
	t.Stop()

	return ret, nil
}
示例#23
0
// stateHandler handles the GET of the JSON.
func stateHandler(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Content-Type", "application/json")

	allAvailable := packageInfo.AllAvailable()
	allInstalled := packageInfo.AllInstalled()

	// Update allInstalled to add in missing applications.
	//
	// Loop over 'config' and make sure each server and application is
	// represented, adding in "appName/" placeholders as package names where
	// appropriate. This is to bootstrap the case where an app is configured to
	// be available for a server, but no package for that application has been
	// installed yet.
	serversSeen := map[string]bool{}
	for name, installed := range allInstalled {
		installedNames := appNames(installed.Names)
		for _, expected := range config.Servers[name].AppNames {
			if !util.In(expected, installedNames) {
				installed.Names = append(installed.Names, expected+"/")
			}
		}
		allInstalled[name] = installed
		serversSeen[name] = true
	}

	// Now loop over config.Servers and find servers that don't have
	// any installed applications. Add them to allInstalled.
	for name, expected := range config.Servers {
		if _, ok := serversSeen[name]; ok {
			continue
		}
		installed := []string{}
		for _, appName := range expected.AppNames {
			installed = append(installed, appName+"/")
		}
		allInstalled[name].Names = installed
	}

	if r.Method == "POST" {
		if login.LoggedInAs(r) == "" {
			util.ReportError(w, r, fmt.Errorf("You must be logged on to push."), "")
			return
		}
		push := PushNewPackage{}
		dec := json.NewDecoder(r.Body)
		defer util.Close(r.Body)
		if err := dec.Decode(&push); err != nil {
			util.ReportError(w, r, fmt.Errorf("Failed to decode push request"), "Failed to decode push request")
			return
		}
		if installedPackages, ok := allInstalled[push.Server]; !ok {
			util.ReportError(w, r, fmt.Errorf("Unknown server name"), "Unknown server name")
			return
		} else {
			// Find a string starting with the same appname, replace it with
			// push.Name. Leave all other package names unchanged.
			appName := strings.Split(push.Name, "/")[0]
			newInstalled := []string{}
			for _, oldName := range installedPackages.Names {
				goodName := oldName
				if strings.Split(oldName, "/")[0] == appName {
					goodName = push.Name
				}
				newInstalled = append(newInstalled, goodName)
			}
			glog.Infof("Updating %s with %#v giving %#v", push.Server, push.Name, newInstalled)
			if err := packageInfo.PutInstalled(push.Server, newInstalled, installedPackages.Generation); err != nil {
				util.ReportError(w, r, err, "Failed to update server.")
				return
			}
			resp, err := client.Get(fmt.Sprintf("http://%s:10114/pullpullpull", push.Server))
			if err != nil || resp == nil {
				glog.Infof("Failed to trigger an instant pull for server %s: %v %v", push.Server, err, resp)
			} else {
				util.Close(resp.Body)
			}
			allInstalled[push.Server].Names = newInstalled
		}
	}

	// The response to either a GET or a POST is an up to date ServersUI.
	servers := serversFromAllInstalled(allInstalled)
	enc := json.NewEncoder(w)
	err := enc.Encode(AllUI{
		Servers:  servers,
		Packages: allAvailable,
		IP:       ip.Get(),
		Status:   serviceStatus(servers),
	})
	if err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
		return
	}
}
func TestImpl(t *testing.T) {
	ts, err := NewTraceServiceServer(FILENAME)
	assert.NoError(t, err)
	defer util.Close(ts)
	defer cleanup()

	now := time.Unix(100, 0)

	first := now.Unix()
	second := now.Add(time.Minute).Unix()

	commitIDs := []*CommitID{
		&CommitID{
			Timestamp: first,
			Id:        "abc123",
			Source:    "master",
		},
		&CommitID{
			Timestamp: second,
			Id:        "xyz789",
			Source:    "master",
		},
	}

	params := &AddParamsRequest{
		Params: []*ParamsPair{
			&ParamsPair{
				Key: "key:8888:android",
				Params: map[string]string{
					"config":   "8888",
					"platform": "android",
					"type":     "skp",
				},
			},
			&ParamsPair{
				Key: "key:gpu:win8",
				Params: map[string]string{
					"config":   "gpu",
					"platform": "win8",
					"type":     "skp",
				},
			},
		},
	}

	ctx := context.Background()

	// First confirm that Ping() works.
	_, err = ts.Ping(ctx, &Empty{})
	assert.Nil(t, err)

	// Confirm that these traceids don't have Params stored in the db yet.
	missingRequest := &MissingParamsRequest{
		Traceids: []string{"key:8888:android", "key:gpu:win8"},
	}
	missingResp, err := ts.MissingParams(ctx, missingRequest)
	assert.NoError(t, err)
	assert.Equal(t, missingResp.Traceids, missingRequest.Traceids)

	// Now add the Params for them.
	_, err = ts.AddParams(ctx, params)
	assert.NoError(t, err)

	// Confirm the missing list is now empty.
	nowMissing, err := ts.MissingParams(ctx, missingRequest)
	assert.Equal(t, []string{}, nowMissing.Traceids)

	addReq := &AddRequest{
		Commitid: commitIDs[0],
		Values: []*ValuePair{
			&ValuePair{
				Key:   "key:gpu:win8",
				Value: types.BytesFromFloat64(1.234),
			},
			&ValuePair{
				Key:   "key:8888:android",
				Value: types.BytesFromFloat64(0.01),
			},
		},
	}

	// Add a commit.
	_, err = ts.Add(ctx, addReq)
	assert.NoError(t, err)

	// List, GetValues, and GetParams for the added commit.
	listReq := &ListRequest{
		Begin: first,
		End:   second,
	}
	listResp, err := ts.List(ctx, listReq)
	assert.NoError(t, err)
	assert.Equal(t, 1, len(listResp.Commitids))
	assert.Equal(t, "abc123", listResp.Commitids[0].Id)

	valuesReq := &GetValuesRequest{
		Commitid: commitIDs[0],
	}
	valuesResp, err := ts.GetValues(ctx, valuesReq)
	assert.NoError(t, err)
	assert.Equal(t, 2, len(valuesResp.Values))
	expected := map[string]float64{
		"key:gpu:win8":     1.234,
		"key:8888:android": 0.01,
	}
	for _, v := range valuesResp.Values {
		assert.Equal(t, expected[v.Key], math.Float64frombits(binary.LittleEndian.Uint64(v.Value)))
	}
	assert.NotEqual(t, "", valuesResp.Md5)

	paramsReq := &GetParamsRequest{
		Traceids: []string{"key:8888:android", "key:gpu:win8"},
	}
	paramsResp, err := ts.GetParams(ctx, paramsReq)
	assert.NoError(t, err)
	assert.Equal(t, "8888", paramsResp.Params[0].Params["config"])
	assert.Equal(t, "win8", paramsResp.Params[1].Params["platform"])

	// Request the raw data for the commit.
	rawRequest := &GetValuesRequest{
		Commitid: commitIDs[0],
	}
	rawResp, err := ts.GetValuesRaw(ctx, rawRequest)
	assert.NoError(t, err)
	assert.Equal(t, 34, len(rawResp.Value))
	assert.Equal(t, valuesResp.Md5, rawResp.Md5, "Should get the same md5 regardless of how you request the data.")
	// Confirm that we can decode the info on the client side.
	ci, err := NewCommitInfo(rawResp.Value)
	assert.NoError(t, err)

	// The keys are trace64ids, so test that we can convert them to traceids,
	// i.e. from uint64's to strings.
	keys64 := []uint64{}
	for k, _ := range ci.Values {
		keys64 = append(keys64, k)
	}
	assert.Equal(t, 2, len(keys64))
	traceidsRequest := &GetTraceIDsRequest{
		Id: keys64,
	}
	traceids, err := ts.GetTraceIDs(ctx, traceidsRequest)
	assert.NoError(t, err)
	assert.Equal(t, 2, len(traceids.Ids))
	assert.True(t, util.In(traceids.Ids[0].Id, paramsReq.Traceids))
	assert.True(t, util.In(traceids.Ids[1].Id, paramsReq.Traceids))

	// List the md5s.
	listMD5Request := &ListMD5Request{
		Commitid: commitIDs,
	}
	listMD5Response, err := ts.ListMD5(ctx, listMD5Request)
	assert.NoError(t, err)
	assert.Equal(t, 2, len(listMD5Response.Commitmd5))
	hashes := []string{listMD5Response.Commitmd5[0].Md5, listMD5Response.Commitmd5[1].Md5}
	assert.True(t, util.In("d41d8cd98f00b204e9800998ecf8427e", hashes))
	assert.NotEqual(t, hashes[0], hashes[1])

	// Remove the commit.
	removeRequest := &RemoveRequest{
		Commitid: commitIDs[0],
	}
	_, err = ts.Remove(ctx, removeRequest)
	assert.NoError(t, err)

	listResp, err = ts.List(ctx, listReq)
	assert.NoError(t, err)
	assert.Equal(t, 0, len(listResp.Commitids))
}
示例#25
0
// TestFindCommitsForBuild verifies that findCommitsForBuild correctly obtains
// the list of commits which were newly built in a given build.
func TestFindCommitsForBuild(t *testing.T) {
	testutils.SkipIfShort(t)
	httpClient = testHttpClient
	d := clearDB(t)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// The test repo is laid out like this:
	//
	// *   06eb2a58139d3ff764f10232d5c8f9362d55e20f I (HEAD, master, Build #4)
	// *   ecb424466a4f3b040586a062c15ed58356f6590e F (Build #3)
	// |\
	// | * d30286d2254716d396073c177a754f9e152bbb52 H
	// | * 8d2d1247ef5d2b8a8d3394543df6c12a85881296 G (Build #2)
	// * | 67635e7015d74b06c00154f7061987f426349d9f E
	// * | 6d4811eddfa637fac0852c3a0801b773be1f260d D (Build #1)
	// * | d74dfd42a48325ab2f3d4a97278fc283036e0ea4 C (Build #6)
	// |/
	// *   4b822ebb7cedd90acbac6a45b897438746973a87 B (Build #0)
	// *   051955c355eb742550ddde4eccc3e90b6dc5b887 A
	//
	hashes := map[rune]string{
		'A': "051955c355eb742550ddde4eccc3e90b6dc5b887",
		'B': "4b822ebb7cedd90acbac6a45b897438746973a87",
		'C': "d74dfd42a48325ab2f3d4a97278fc283036e0ea4",
		'D': "6d4811eddfa637fac0852c3a0801b773be1f260d",
		'E': "67635e7015d74b06c00154f7061987f426349d9f",
		'F': "ecb424466a4f3b040586a062c15ed58356f6590e",
		'G': "8d2d1247ef5d2b8a8d3394543df6c12a85881296",
		'H': "d30286d2254716d396073c177a754f9e152bbb52",
		'I': "06eb2a58139d3ff764f10232d5c8f9362d55e20f",
	}

	// Test cases. Each test case builds on the previous cases.
	testCases := []struct {
		GotRevision string
		Expected    []string
		StoleFrom   int
		Stolen      []string
	}{
		// 0. The first build.
		{
			GotRevision: hashes['B'],
			Expected:    []string{hashes['B'], hashes['A']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 1. On a linear set of commits, with at least one previous build.
		{
			GotRevision: hashes['D'],
			Expected:    []string{hashes['D'], hashes['C']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 2. The first build on a new branch.
		{
			GotRevision: hashes['G'],
			Expected:    []string{hashes['G']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 3. After a merge.
		{
			GotRevision: hashes['F'],
			Expected:    []string{hashes['F'], hashes['E'], hashes['H']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 4. One last "normal" build.
		{
			GotRevision: hashes['I'],
			Expected:    []string{hashes['I']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 5. No GotRevision.
		{
			GotRevision: "",
			Expected:    []string{},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 6. Steal commits from a previously-ingested build.
		{
			GotRevision: hashes['C'],
			Expected:    []string{hashes['C']},
			StoleFrom:   1,
			Stolen:      []string{hashes['C']},
		},
	}
	master := "client.skia"
	builder := "Test-Ubuntu12-ShuttleA-GTX660-x86-Release"
	for buildNum, tc := range testCases {
		build, err := getBuildFromMaster(master, builder, buildNum, repos)
		assert.Nil(t, err)
		assert.Nil(t, IngestBuild(build, repos))
		ingested, err := GetBuildFromDB(builder, master, buildNum)
		assert.Nil(t, err)
		assert.True(t, util.SSliceEqual(ingested.Commits, tc.Expected), fmt.Sprintf("Commits for build do not match expectation.\nGot:  %v\nWant: %v", ingested.Commits, tc.Expected))
	}

	// Extra: ensure that build #6 really stole the commit from #1.
	b, err := GetBuildFromDB(builder, master, 1)
	assert.Nil(t, err)
	assert.False(t, util.In(hashes['C'], b.Commits), fmt.Sprintf("Expected not to find %s in %v", hashes['C'], b.Commits))
}
示例#26
0
// getBestCandidate finds the best BuildCandidate for the given builder.
func (q *BuildQueue) getBestCandidate(bc *buildCache, recentCommits []string, now time.Time) (float64, *buildbot.Build, *buildbot.Build, error) {
	errMsg := fmt.Sprintf("Failed to get best candidate for %s: %%v", bc.Builder)
	repo, err := q.repos.Repo(bc.Repo)
	if err != nil {
		return 0.0, nil, nil, fmt.Errorf(errMsg, err)
	}
	// Find the current scores for each commit.
	currentScores := map[string]float64{}
	for _, commit := range recentCommits {
		currentBuild, err := bc.getBuildForCommit(commit)
		if err != nil {
			return 0.0, nil, nil, fmt.Errorf(errMsg, err)
		}
		d, err := repo.Details(commit)
		if err != nil {
			return 0.0, nil, nil, err
		}
		currentScores[commit] = scoreBuild(d, currentBuild, now, q.timeLambda)
	}

	// For each commit/builder pair, determine the score increase obtained
	// by running a build at that commit.
	scoreIncrease := map[string]float64{}
	newBuildsByCommit := map[string]*buildbot.Build{}
	stoleFromByCommit := map[string]*buildbot.Build{}
	for _, commit := range recentCommits {
		// Shortcut: Don't bisect builds with a huge number
		// of commits.  This saves lots of time and only affects
		// the first successful build for a bot.
		b, err := bc.getBuildForCommit(commit)
		if err != nil {
			return 0.0, nil, nil, fmt.Errorf(errMsg, err)
		}
		if b != nil {
			if len(b.Commits) > NO_BISECT_COMMIT_LIMIT {
				glog.Warningf("Skipping %s on %s; previous build has too many commits (#%d)", commit[0:7], b.Builder, b.Number)
				scoreIncrease[commit] = 0.0
				break // Don't bother looking at previous commits either, since these will be out of range.
			}
		}

		newScores := map[string]float64{}
		// Pretend to create a new Build at the given commit.
		newBuild := buildbot.Build{
			Builder:     bc.Builder,
			Master:      bc.Master,
			Number:      bc.MaxBuildNum + 1,
			GotRevision: commit,
			Repository:  bc.Repo,
		}
		commits, stealFrom, stolen, err := buildbot.FindCommitsForBuild(bc, &newBuild, q.repos)
		if err != nil {
			return 0.0, nil, nil, fmt.Errorf(errMsg, err)
		}
		// Re-score all commits in the new build.
		newBuild.Commits = commits
		for _, c := range commits {
			d, err := repo.Details(c)
			if err != nil {
				return 0.0, nil, nil, fmt.Errorf(errMsg, err)
			}
			if _, ok := currentScores[c]; !ok {
				// If this build has commits which are outside of our window,
				// insert them into currentScores to account for them.
				b, err := bc.getBuildForCommit(c)
				if err != nil {
					return 0.0, nil, nil, fmt.Errorf(errMsg, err)
				}
				score := scoreBuild(d, b, now, q.timeLambda)
				currentScores[c] = score
			}
			newScores[c] = scoreBuild(d, &newBuild, now, q.timeLambda)
		}
		newBuildsByCommit[commit] = &newBuild
		// If the new build includes commits previously included in
		// another build, update scores for commits in the build we stole
		// them from.
		if stealFrom != -1 {
			stoleFromOrig, err := bc.getByNumber(stealFrom)
			if err != nil {
				return 0.0, nil, nil, fmt.Errorf(errMsg, err)
			}
			if stoleFromOrig == nil {
				// The build may not be cached. Fall back on getting it from the DB.
				stoleFromOrig, err = buildbot.GetBuildFromDB(bc.Builder, bc.Master, stealFrom)
				if err != nil {
					return 0.0, nil, nil, fmt.Errorf(errMsg, err)
				}
				if err := bc.Put(stoleFromOrig); err != nil {
					return 0.0, nil, nil, err
				}
			}
			// "copy" the build so that we can assign new commits to it
			// without modifying the cached build.
			stoleFromBuild := *stoleFromOrig
			newCommits := []string{}
			for _, c := range stoleFromBuild.Commits {
				if !util.In(c, stolen) {
					newCommits = append(newCommits, c)
				}
			}
			stoleFromBuild.Commits = newCommits
			for _, c := range stoleFromBuild.Commits {
				d, err := repo.Details(c)
				if err != nil {
					return 0.0, nil, nil, err
				}
				newScores[c] = scoreBuild(d, &stoleFromBuild, now, q.timeLambda)
			}
			stoleFromByCommit[commit] = &stoleFromBuild
		}
		// Sum the old and new scores.
		oldScoresList := make([]float64, 0, len(newScores))
		newScoresList := make([]float64, 0, len(newScores))
		for c, score := range newScores {
			oldScoresList = append(oldScoresList, currentScores[c])
			newScoresList = append(newScoresList, score)
		}
		oldTotal := util.Float64StableSum(oldScoresList)
		newTotal := util.Float64StableSum(newScoresList)
		scoreIncrease[commit] = newTotal - oldTotal
	}

	// Arrange the score increases by builder.
	candidates := []*BuildCandidate{}
	for commit, increase := range scoreIncrease {
		candidates = append(candidates, &BuildCandidate{
			Commit: commit,
			Score:  increase,
		})
	}
	sort.Sort(BuildCandidateSlice(candidates))
	best := candidates[len(candidates)-1]

	return best.Score, newBuildsByCommit[best.Commit], stoleFromByCommit[best.Commit], nil
}
示例#27
0
// Handler serves the /annotate/ endpoint for changing the status of an
// alert cluster. It also writes a new types.Activity log record to the database.
//
// Expects a POST of JSON of the following form:
//
//   {
//     Id: 20                - The id of the alerting cluster.
//     Status: "Ignore"      - The new Status value.
//     Message: "SKP Update" - The new Messge value.
//   }
//
// Returns JSON of the form:
//
//  {
//    "Bug": "http://"
//  }
//
// Where bug, if set, is the URL the user should be directed to to log a bug report.
func Handler(w http.ResponseWriter, r *http.Request) {
	glog.Infof("Annotate Handler: %q\n", r.URL.Path)

	if login.LoggedInAs(r) == "" {
		util.ReportError(w, r, fmt.Errorf("Not logged in."), "You must be logged in to change an alert status.")
		return
	}
	if r.Method != "POST" {
		http.NotFound(w, r)
		return
	}
	if r.Body == nil {
		util.ReportError(w, r, fmt.Errorf("Missing POST Body."), "POST with no request body.")
		return
	}

	req := struct {
		Id      int64
		Status  string
		Message string
	}{}
	dec := json.NewDecoder(r.Body)
	if err := dec.Decode(&req); err != nil {
		util.ReportError(w, r, err, "Unable to decode posted JSON.")
		return
	}

	if !util.In(req.Status, types.ValidStatusValues) {
		util.ReportError(w, r, fmt.Errorf("Invalid status value: %s", req.Status), "Unknown value.")
		return
	}

	// Store the updated values in the ClusterSummary.
	c, err := alerting.Get(req.Id)
	if err != nil {
		util.ReportError(w, r, err, "Failed to load cluster summary.")
		return
	}
	c.Status = req.Status
	c.Message = req.Message
	if err := alerting.Write(c); err != nil {
		util.ReportError(w, r, err, "Failed to save cluster summary.")
		return
	}

	// Write a new Activity record.
	// TODO(jcgregorio) Move into alerting.Write().
	a := &types.Activity{
		UserID: login.LoggedInAs(r),
		Action: "Perf Alert: " + req.Status,
		URL:    fmt.Sprintf("https://perf.skia.org/cl/%d", req.Id),
	}
	if err := activitylog.Write(a); err != nil {
		util.ReportError(w, r, err, "Failed to save activity.")
		return
	}

	retval := map[string]string{}

	if req.Status == "Bug" {
		q := url.Values{
			"labels":  []string{"FromSkiaPerf,Type-Defect,Priority-Medium"},
			"comment": []string{fmt.Sprintf(ISSUE_COMMENT_TEMPLATE, req.Id)},
		}
		retval["Bug"] = "https://bugs.chromium.org/p/skia/issues/entry?" + q.Encode()
	}
	w.Header().Set("Content-Type", "application/json")
	enc := json.NewEncoder(w)
	if err := enc.Encode(retval); err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
	}
}
示例#28
0
func StartAlertRoutines(am *alerting.AlertManager, tickInterval time.Duration, c *influxdb.Client) {
	emailAction, err := alerting.ParseAction("Email([email protected])")
	if err != nil {
		glog.Fatal(err)
	}
	actions := []alerting.Action{emailAction}

	// Disconnected buildslaves.
	go func() {
		seriesTmpl := "buildbot.buildslaves.%s.connected"
		re := regexp.MustCompile("[^A-Za-z0-9]+")
		for _ = range time.Tick(tickInterval) {
			glog.Info("Loading buildslave data.")
			slaves, err := buildbot.GetBuildSlaves()
			if err != nil {
				glog.Error(err)
				continue
			}
			for masterName, m := range slaves {
				for _, s := range m {
					if util.In(s.Name, BUILDSLAVE_OFFLINE_BLACKLIST) {
						continue
					}
					v := int64(0)
					if s.Connected {
						v = int64(1)
					}
					metric := fmt.Sprintf(seriesTmpl, re.ReplaceAllString(s.Name, "_"))
					metrics.GetOrRegisterGauge(metric, metrics.DefaultRegistry).Update(v)
					if !s.Connected {
						// This buildslave is offline. Figure out which one it is.
						if err := am.AddAlert(&alerting.Alert{
							Name:        fmt.Sprintf("Buildslave %s offline", s.Name),
							Category:    alerting.INFRA_ALERT,
							Message:     fmt.Sprintf(BUILDSLAVE_OFFLINE, s.Name, masterName, s.Name, s.Name, s.Name),
							Nag:         int64(time.Hour),
							AutoDismiss: int64(2 * tickInterval),
							Actions:     actions,
						}); err != nil {
							glog.Error(err)
						}
					}
				}
			}
		}
	}()

	// AutoRoll failure.
	go func() {
		getDepsRollStatus := func() (*autoroller.AutoRollStatus, error) {
			resp, err := http.Get(autoroll.AUTOROLL_STATUS_URL)
			if err != nil {
				return nil, err
			}

			defer util.Close(resp.Body)
			var status autoroller.AutoRollStatus
			if err := json.NewDecoder(resp.Body).Decode(&status); err != nil {
				return nil, err
			}
			return &status, nil
		}

		for _ = range time.Tick(time.Minute) {
			glog.Infof("Searching for DEPS rolls.")
			status, err := getDepsRollStatus()
			if err != nil {
				util.LogErr(fmt.Errorf("Failed to search for DEPS rolls: %v", err))
				continue
			}

			activeAlert := am.ActiveAlert(AUTOROLL_ALERT_NAME)
			if status.LastRoll != nil {
				if status.LastRoll.Closed {
					if status.LastRoll.Succeeded() {
						if activeAlert != 0 {
							msg := fmt.Sprintf("Subsequent roll succeeded: %s/%d", autoroll.RIETVELD_URL, status.LastRoll.Issue)
							if err := am.Dismiss(activeAlert, alerting.USER_ALERTSERVER, msg); err != nil {
								util.LogErr(err)
							}
						}
					} else if status.LastRoll.Failed() {
						if err := am.AddAlert(&alerting.Alert{
							Name:    AUTOROLL_ALERT_NAME,
							Message: fmt.Sprintf("DEPS roll failed: %s/%d", autoroll.RIETVELD_URL, status.LastRoll.Issue),
							Nag:     int64(3 * time.Hour),
							Actions: actions,
						}); err != nil {
							util.LogErr(err)
						}
					}
				}
			}
		}
	}()

	// Android device disconnects, hung buildslaves.
	go func() {
		// These builders are frequently slow. Ignore them when looking for hung buildslaves.
		hungSlavesIgnore := []string{
			"Housekeeper-Nightly-RecreateSKPs_Canary",
			"Housekeeper-Weekly-RecreateSKPs",
			"Linux Builder",
			"Mac Builder",
			"Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-Valgrind",
			"Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind",
			"Win Builder",
		}
		hangTimePeriod := 3 * time.Hour
		for _ = range time.Tick(tickInterval) {
			glog.Infof("Searching for hung buildslaves and disconnected Android devices.")
			builds, err := buildbot.GetUnfinishedBuilds()
			if err != nil {
				glog.Error(err)
				continue
			}
			for _, b := range builds {
				// Disconnected Android device?
				disconnectedAndroid := false
				if strings.Contains(b.Builder, "Android") && !strings.Contains(b.Builder, "Build") {
					for _, s := range b.Steps {
						if strings.Contains(s.Name, "wait for device") {
							// If "wait for device" has been running for 10 minutes, the device is probably offline.
							if s.Finished == 0 && time.Since(time.Unix(int64(s.Started), 0)) > 10*time.Minute {
								if err := am.AddAlert(&alerting.Alert{
									Name:     fmt.Sprintf("Android device disconnected (%s)", b.BuildSlave),
									Category: alerting.INFRA_ALERT,
									Message:  fmt.Sprintf(ANDROID_DISCONNECT, b.BuildSlave, b.Master, b.Builder, b.Number, b.BuildSlave, b.BuildSlave),
									Nag:      int64(3 * time.Hour),
									Actions:  actions,
								}); err != nil {
									glog.Error(err)
								}
								disconnectedAndroid = true
							}
						}
					}
				}
				if !disconnectedAndroid && !util.ContainsAny(b.Builder, hungSlavesIgnore) {
					// Hung buildslave?
					for _, s := range b.Steps {
						if s.Name == "steps" {
							continue
						}
						// If the step has been running for over an hour, it's probably hung.
						if s.Finished == 0 && time.Since(time.Unix(int64(s.Started), 0)) > hangTimePeriod {
							if err := am.AddAlert(&alerting.Alert{
								Name:        fmt.Sprintf("Possibly hung buildslave (%s)", b.BuildSlave),
								Category:    alerting.INFRA_ALERT,
								Message:     fmt.Sprintf(HUNG_BUILDSLAVE, b.BuildSlave, hangTimePeriod.String(), b.Master, b.Builder, b.Number, b.BuildSlave, b.BuildSlave),
								Nag:         int64(time.Hour),
								Actions:     actions,
								AutoDismiss: int64(10 * tickInterval),
							}); err != nil {
								glog.Error(err)
							}
						}
					}
				}
			}
		}
	}()

	// Failed update_scripts.
	go func() {
		lastSearch := time.Now()
		for _ = range time.Tick(tickInterval) {
			glog.Infof("Searching for builds which failed update_scripts.")
			currentSearch := time.Now()
			builds, err := buildbot.GetBuildsFromDateRange(lastSearch, currentSearch)
			lastSearch = currentSearch
			if err != nil {
				glog.Error(err)
				continue
			}
			for _, b := range builds {
				for _, s := range b.Steps {
					if s.Name == "update_scripts" {
						if s.Results != 0 {
							if err := am.AddAlert(&alerting.Alert{
								Name:     "update_scripts failed",
								Category: alerting.INFRA_ALERT,
								Message:  fmt.Sprintf(UPDATE_SCRIPTS, b.Builder, b.Master, b.Builder, b.Number, b.Builder, b.BuildSlave),
								Actions:  actions,
							}); err != nil {
								glog.Error(err)
							}
						}
						break
					}
				}
			}
		}
	}()
}
示例#29
0
// testFindCommitsForBuild verifies that findCommitsForBuild correctly obtains
// the list of commits which were newly built in a given build.
func testFindCommitsForBuild(t *testing.T, local bool) {
	testutils.SkipIfShort(t)
	httpClient = testHttpClient
	d := clearDB(t, local)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// The test repo is laid out like this:
	//
	// *   06eb2a58139d3ff764f10232d5c8f9362d55e20f I (HEAD, master, Build #4)
	// *   ecb424466a4f3b040586a062c15ed58356f6590e F (Build #3)
	// |\
	// | * d30286d2254716d396073c177a754f9e152bbb52 H
	// | * 8d2d1247ef5d2b8a8d3394543df6c12a85881296 G (Build #2)
	// * | 67635e7015d74b06c00154f7061987f426349d9f E
	// * | 6d4811eddfa637fac0852c3a0801b773be1f260d D (Build #1)
	// * | d74dfd42a48325ab2f3d4a97278fc283036e0ea4 C (Build #6)
	// |/
	// *   4b822ebb7cedd90acbac6a45b897438746973a87 B (Build #0)
	// *   051955c355eb742550ddde4eccc3e90b6dc5b887 A
	//
	hashes := map[rune]string{
		'A': "051955c355eb742550ddde4eccc3e90b6dc5b887",
		'B': "4b822ebb7cedd90acbac6a45b897438746973a87",
		'C': "d74dfd42a48325ab2f3d4a97278fc283036e0ea4",
		'D': "6d4811eddfa637fac0852c3a0801b773be1f260d",
		'E': "67635e7015d74b06c00154f7061987f426349d9f",
		'F': "ecb424466a4f3b040586a062c15ed58356f6590e",
		'G': "8d2d1247ef5d2b8a8d3394543df6c12a85881296",
		'H': "d30286d2254716d396073c177a754f9e152bbb52",
		'I': "06eb2a58139d3ff764f10232d5c8f9362d55e20f",
	}

	// Test cases. Each test case builds on the previous cases.
	testCases := []struct {
		GotRevision string
		Expected    []string
		StoleFrom   int
		Stolen      []string
	}{
		// 0. The first build.
		{
			GotRevision: hashes['B'],
			Expected:    []string{hashes['A'], hashes['B']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 1. On a linear set of commits, with at least one previous build.
		{
			GotRevision: hashes['D'],
			Expected:    []string{hashes['D'], hashes['C']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 2. The first build on a new branch.
		{
			GotRevision: hashes['G'],
			Expected:    []string{hashes['G']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 3. After a merge.
		{
			GotRevision: hashes['F'],
			Expected:    []string{hashes['E'], hashes['H'], hashes['F']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 4. One last "normal" build.
		{
			GotRevision: hashes['I'],
			Expected:    []string{hashes['I']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 5. No GotRevision.
		{
			GotRevision: "",
			Expected:    []string{},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 6. Steal commits from a previously-ingested build.
		{
			GotRevision: hashes['C'],
			Expected:    []string{hashes['C']},
			StoleFrom:   1,
			Stolen:      []string{hashes['C']},
		},
	}
	master := "client.skia"
	builder := "Test-Ubuntu12-ShuttleA-GTX660-x86-Release"
	for buildNum, tc := range testCases {
		build, err := getBuildFromMaster(master, builder, buildNum, repos)
		assert.Nil(t, err)
		// Sanity check. Make sure we have the GotRevision we expect.
		assert.Equal(t, tc.GotRevision, build.GotRevision)
		gotRevProp, err := build.GetStringProperty("got_revision")
		assert.Nil(t, err)
		assert.Equal(t, tc.GotRevision, gotRevProp)

		assert.Nil(t, IngestBuild(d.DB(), build, repos))

		ingested, err := d.DB().GetBuildFromDB(master, builder, buildNum)
		assert.Nil(t, err)
		assert.NotNil(t, ingested)

		// Double-check the inserted build's GotRevision.
		assert.Equal(t, tc.GotRevision, ingested.GotRevision)
		gotRevProp, err = ingested.GetStringProperty("got_revision")
		assert.Nil(t, err)
		assert.Equal(t, tc.GotRevision, gotRevProp)

		// Verify that we got the build (and commits list) we expect.
		build.Commits = tc.Expected
		testutils.AssertDeepEqual(t, build, ingested)

		// Ensure that we can search by commit to find the build we inserted.
		for _, c := range hashes {
			expectBuild := util.In(c, tc.Expected)
			builds, err := d.DB().GetBuildsForCommits([]string{c}, nil)
			assert.Nil(t, err)
			if expectBuild {
				// Assert that we get the build we inserted.
				assert.Equal(t, 1, len(builds))
				assert.Equal(t, 1, len(builds[c]))
				testutils.AssertDeepEqual(t, ingested, builds[c][0])
			} else {
				// Assert that we didn't get the build we inserted.
				for _, gotBuild := range builds[c] {
					assert.NotEqual(t, ingested.Id(), gotBuild.Id())
				}
			}

			n, err := d.DB().GetBuildNumberForCommit(build.Master, build.Builder, c)
			assert.Nil(t, err)
			if expectBuild {
				assert.Equal(t, buildNum, n)
			} else {
				assert.NotEqual(t, buildNum, n)
			}
		}
	}

	// Extra: ensure that build #6 really stole the commit from #1.
	b, err := d.DB().GetBuildFromDB(master, builder, 1)
	assert.Nil(t, err)
	assert.NotNil(t, b)
	assert.False(t, util.In(hashes['C'], b.Commits), fmt.Sprintf("Expected not to find %s in %v", hashes['C'], b.Commits))
}
示例#30
0
// Succeeded returns true iff the roll succeeded (including dry run success).
func (a *AutoRollIssue) Succeeded() bool {
	return util.In(a.Result, SUCCESS_RESULTS)
}