// update syncs the source code repository and loads any new commits. func (c *CommitCache) update() (rv error) { defer timer.New("CommitCache.update()").Stop() glog.Info("Reloading commits.") if err := c.repo.Update(true, true); err != nil { return fmt.Errorf("Failed to update the repo: %v", err) } from := time.Time{} n := c.NumCommits() if n > 0 { last, err := c.Get(n - 1) if err != nil { return fmt.Errorf("Failed to get last commit: %v", err) } from = last.Timestamp } newCommitHashes := c.repo.From(from) glog.Infof("Processing %d new commits.", len(newCommitHashes)) newCommits := make([]*gitinfo.LongCommit, len(newCommitHashes)) if len(newCommitHashes) > 0 { for i, h := range newCommitHashes { d, err := c.repo.Details(h) if err != nil { return fmt.Errorf("Failed to obtain commit details for %s: %v", h, err) } newCommits[i] = d } } branchHeads, err := c.repo.GetBranches() if err != nil { return fmt.Errorf("Failed to read branch information from the repo: %v", err) } // Load new builds for the BuildCache. allCommits := append(c.Commits, newCommits...) buildCacheHashes := make([]string, 0, c.requestSize) for _, commit := range allCommits[len(allCommits)-c.requestSize:] { buildCacheHashes = append(buildCacheHashes, commit.Hash) } byId, byCommit, builderStatuses, err := build_cache.LoadData(buildCacheHashes) if err != nil { return fmt.Errorf("Failed to update BuildCache: %v", err) } // Update the cached values all at once at at the end. glog.Infof("Updating the cache.") // Write the cache to disk *after* unlocking it. defer func() { rv = c.toFile() }() defer timer.New(" CommitCache locked").Stop() c.mutex.Lock() defer c.mutex.Unlock() c.BranchHeads = branchHeads c.Commits = allCommits c.buildCache.UpdateWithData(byId, byCommit, builderStatuses) glog.Infof("Finished updating the cache.") return nil }
// RemoveChange, see ExpectationsStore interface. func (s *SQLExpectationsStore) RemoveChange(changedDigests map[string][]string) (retErr error) { defer timer.New("removing exp change").Stop() const markRemovedStmt = `UPDATE exp_test_change SET removed = IF(removed IS NULL, ?, removed) WHERE (name=?) AND (digest=?)` // start a transaction tx, err := s.vdb.DB.Begin() if err != nil { return err } defer func() { retErr = database.CommitOrRollback(tx, retErr) }() // Mark all the digests as removed. now := util.TimeStampMs() for testName, digests := range changedDigests { for _, digest := range digests { if _, err = tx.Exec(markRemovedStmt, now, testName, digest); err != nil { return err } } } return nil }
func addBuildCommentHandler(w http.ResponseWriter, r *http.Request) { defer timer.New("addBuildCommentHandler").Stop() if !userHasEditRights(r) { util.ReportError(w, r, fmt.Errorf("User does not have edit rights."), "User does not have edit rights.") return } w.Header().Set("Content-Type", "application/json") cache, err := getCommitCache(w, r) if err != nil { return } buildId, err := strconv.ParseInt(mux.Vars(r)["buildId"], 10, 32) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Invalid build id: %v", err)) return } comment := struct { Comment string `json:"comment"` }{} if err := json.NewDecoder(r.Body).Decode(&comment); err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to add comment: %v", err)) return } defer util.Close(r.Body) c := buildbot.BuildComment{ BuildId: int(buildId), User: login.LoggedInAs(r), Timestamp: float64(time.Now().UTC().Unix()), Message: comment.Comment, } if err := cache.AddBuildComment(int(buildId), &c); err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to add comment: %v", err)) return } }
func deleteBuildCommentHandler(w http.ResponseWriter, r *http.Request) { defer timer.New("deleteBuildCommentHandler").Stop() if !userHasEditRights(r) { util.ReportError(w, r, fmt.Errorf("User does not have edit rights."), "User does not have edit rights.") return } w.Header().Set("Content-Type", "application/json") cache, err := getCommitCache(w, r) if err != nil { return } buildId, err := strconv.ParseInt(mux.Vars(r)["buildId"], 10, 32) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Invalid build id: %v", err)) return } commentId, err := strconv.ParseInt(mux.Vars(r)["commentId"], 10, 32) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Invalid comment id: %v", err)) return } if err := cache.DeleteBuildComment(int(buildId), int(commentId)); err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to delete comment: %v", err)) return } }
func addCommitCommentHandler(w http.ResponseWriter, r *http.Request) { defer timer.New("addCommitCommentHandler").Stop() if !userHasEditRights(r) { util.ReportError(w, r, fmt.Errorf("User does not have edit rights."), "User does not have edit rights.") return } w.Header().Set("Content-Type", "application/json") commit := mux.Vars(r)["commit"] comment := struct { Comment string `json:"comment"` }{} if err := json.NewDecoder(r.Body).Decode(&comment); err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to add comment: %v", err)) return } defer util.Close(r.Body) c := buildbot.CommitComment{ Commit: commit, User: login.LoggedInAs(r), Timestamp: time.Now().UTC(), Message: comment.Comment, } if err := db.PutCommitComment(&c); err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to add commit comment: %v", err)) return } }
// getCandidatesForBuilder finds all BuildCandidates for the given builder, in order. func (q *BuildQueue) getCandidatesForBuilder(bf *buildFinder, recentCommits []string, now time.Time) ([]*BuildCandidate, error) { defer timer.New(fmt.Sprintf("getCandidatesForBuilder(%s)", bf.Builder)).Stop() repo, err := q.repos.Repo(bf.Repo) if err != nil { return nil, err } candidates := []*BuildCandidate{} for { score, newBuild, stoleFrom, err := q.getBestCandidate(bf, recentCommits, now) if err != nil { return nil, fmt.Errorf("Failed to get build candidates for %s: %v", bf.Builder, err) } if score < q.scoreThreshold { break } d, err := repo.Details(newBuild.GotRevision) if err != nil { return nil, err } // "insert" the new build. bf.add(newBuild) if stoleFrom != nil { bf.add(stoleFrom) } candidates = append(candidates, &BuildCandidate{ Author: d.Author, Builder: newBuild.Builder, Commit: newBuild.GotRevision, Score: score, Repo: bf.Repo, }) } return candidates, nil }
// New creates and returns a new CommitCache which watches the given repo. // The initial update will load ALL commits from the repository, so expect // this to be slow. func New(repo *gitinfo.GitInfo, cacheFile string, requestSize int) (*CommitCache, error) { defer timer.New("commit_cache.New()").Stop() c, err := fromFile(cacheFile) if err != nil { glog.Warningf("Failed to read commit cache from file; starting from scratch. Error: %v", err) c = &CommitCache{} } c.buildCache = &build_cache.BuildCache{} c.cacheFile = cacheFile c.repo = repo c.requestSize = requestSize // Update the cache. if err := c.update(); err != nil { return nil, err } // Update in a loop. go func() { for _ = range time.Tick(time.Minute) { if err := c.update(); err != nil { glog.Errorf("Failed to update commit cache: %v", err) } } }() return c, nil }
// LoadData loads the build data for the given commits. func LoadData(commits []string) (map[int]*buildbot.Build, map[string]map[string]*buildbot.BuildSummary, map[string][]*buildbot.BuilderComment, error) { defer timer.New("build_cache.LoadData()").Stop() builds, err := buildbot.GetBuildsForCommits(commits, nil) if err != nil { return nil, nil, nil, err } byId := map[int]*buildbot.Build{} byCommit := map[string]map[string]*buildbot.BuildSummary{} builders := map[string]bool{} for hash, buildList := range builds { byBuilder := map[string]*buildbot.BuildSummary{} for _, b := range buildList { byId[b.Id] = b if !util.AnyMatch(BOT_BLACKLIST, b.Builder) { byBuilder[b.Builder] = b.GetSummary() builders[b.Builder] = true } } byCommit[hash] = byBuilder } builderList := make([]string, 0, len(builders)) for b, _ := range builders { builderList = append(builderList, b) } builderComments, err := buildbot.GetBuildersComments(builderList) if err != nil { return nil, nil, nil, err } return byId, byCommit, builderComments, nil }
// Run executes the function for the given test and returns an error if it fails. func (t test) Run() error { defer timer.New(t.Name).Stop() err, output := t.run() if err != nil { return fmt.Errorf(TEST_FAILURE, t.Name, t.Cmd, err, output) } return nil }
// UpdateWithData replaces the contents of the BuildCache with the given // data. Not intended to be used by consumers of BuildCache, but exists to // allow for loading and storing the cache data separately so that the cache // may be locked for the minimum amount of time. func (c *BuildCache) UpdateWithData(byId map[int]*buildbot.Build, byCommit map[string]map[string]*buildbot.BuildSummary, builders map[string][]*buildbot.BuilderComment) { defer timer.New(" BuildCache locked").Stop() c.mutex.Lock() defer c.mutex.Unlock() c.byId = byId c.byCommit = byCommit c.builders = builders }
// toFile saves the CommitCache to a file. func (c *CommitCache) toFile() error { defer timer.New("commit_cache.toFile()").Stop() f, err := os.Create(c.cacheFile) if err != nil { return err } defer util.Close(f) if err := gob.NewEncoder(f).Encode(c); err != nil { return err } return nil }
func infraHandler(w http.ResponseWriter, r *http.Request) { defer timer.New("infraHandler").Stop() w.Header().Set("Content-Type", "text/html") // Don't use cached templates in testing mode. if *testing { reloadTemplates() } if err := infraTemplate.Execute(w, struct{}{}); err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to expand template: %v", err)) } }
func hostsHandler(w http.ResponseWriter, r *http.Request) { defer timer.New("hostsHandler").Stop() w.Header().Set("Content-Type", "text/html") // Don't use cached templates in testing mode. if *testing { reloadTemplates() } if err := hostsTemplate.Execute(w, struct{}{}); err != nil { glog.Errorf("Failed to write or encode output: %s", err) } }
// fromFile attempts to load the CommitCache from the given file. func fromFile(cacheFile string) (*CommitCache, error) { defer timer.New("commit_cache.fromFile()").Stop() c := CommitCache{} if _, err := os.Stat(cacheFile); err != nil { return nil, fmt.Errorf("Could not stat cache file: %v", err) } f, err := os.Open(cacheFile) if err != nil { return nil, fmt.Errorf("Failed to open cache file %s: %v", cacheFile, err) } defer util.Close(f) if err := gob.NewDecoder(f).Decode(&c); err != nil { return nil, fmt.Errorf("Failed to read cache file %s: %v", cacheFile, err) } return &c, nil }
// GetBuildsFromDateRange retrieves all builds which finished in the given date range. func GetBuildsFromDateRange(start, end time.Time) ([]*Build, error) { defer timer.New("GetBuildsFromDateRange").Stop() var ids []int if err := DB.Select(&ids, fmt.Sprintf("SELECT id FROM %s WHERE started > ? and started < ?", TABLE_BUILDS), float64(start.UTC().Unix()), float64(end.UTC().Unix())); err != nil { return nil, fmt.Errorf("Failed to obtain builds from date range: %v", err) } builds, err := GetBuildsFromDB(ids) if err != nil { return nil, err } rv := make([]*Build, 0, len(builds)) for _, b := range builds { rv = append(rv, b) } return rv, nil }
// oneStep does a single step, calculating all the paramsets from the latest tile and tallies. // // Returns the paramsets for both the tile with and without ignored traces included. func oneStep(tallies *tally.Tallies, storages *storage.Storage) (map[string]map[string][]string, map[string]map[string][]string, error) { defer timer.New("paramsets").Stop() tile, err := storages.GetLastTileTrimmed(false) if err != nil { return nil, nil, fmt.Errorf("Failed to get tile: %s", err) } byTrace := byTraceForTile(tile, tallies.ByTrace()) tile, err = storages.GetLastTileTrimmed(true) if err != nil { return nil, nil, fmt.Errorf("Failed to get tile: %s", err) } byTraceIncludeIgnored := byTraceForTile(tile, tallies.ByTrace()) return byTrace, byTraceIncludeIgnored, nil }
// LoggingGzipRequestResponse records parts of the request and the response to the logs. func LoggingGzipRequestResponse(h http.Handler) http.Handler { // Closure to capture the request. f := func(w http.ResponseWriter, r *http.Request) { defer func() { if err := recover(); err != nil { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] glog.Errorf("panic serving %v: %v\n%s", r.URL.Path, err, buf) } }() defer timer.New(fmt.Sprintf("Request: %s %s %#v Content Length: %d Latency:", r.URL.Path, r.Method, r.URL, r.ContentLength)).Stop() h.ServeHTTP(w, r) } return autogzip.Handle(recordResponse(http.HandlerFunc(f))) }
// Create a new instance of historian. func newHistorian(storages *storage.Storage, nTilesToBackfill int) (*historian, error) { defer timer.New("historian").Stop() ret := &historian{ storages: storages, } // Start running the background process to gather digestinfo. if err := ret.start(); err != nil { return nil, err } // There is at least one tile to backfill digestinfo then start the process. if nTilesToBackfill > 0 { ret.backFillDigestInfo(nTilesToBackfill) } return ret, nil }
func commitsJsonHandler(w http.ResponseWriter, r *http.Request) { defer timer.New("commitsJsonHandler").Stop() w.Header().Set("Content-Type", "application/json") cache, err := getCommitCache(w, r) if err != nil { return } commitsToLoad := DEFAULT_COMMITS_TO_LOAD n, err := getIntParam("n", r) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Invalid parameter: %v", err)) return } if n != nil { commitsToLoad = *n } if err := cache.LastNAsJson(w, commitsToLoad); err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to load commits from cache: %v", err)) return } }
// update is the inner function which does all the work for Update. It accepts // a time.Time so that time.Now() can be faked for testing. func (q *BuildQueue) update(now time.Time) error { glog.Info("Updating build queue.") defer timer.New("BuildQueue.update()").Stop() queue := map[string][]*BuildCandidate{} errs := map[string]error{} mutex := sync.Mutex{} var wg sync.WaitGroup for _, repoUrl := range q.repos.Repos() { wg.Add(1) go func(repoUrl string) { defer wg.Done() candidates, err := q.updateRepo(repoUrl, now) mutex.Lock() defer mutex.Unlock() if err != nil { errs[repoUrl] = err return } for k, v := range candidates { queue[k] = v } }(repoUrl) } wg.Wait() if len(errs) > 0 { msg := "Failed to update repos:" for repoUrl, err := range errs { msg += fmt.Sprintf("\n%s: %v", repoUrl, err) } return fmt.Errorf(msg) } // Update the queues. q.lock.Lock() defer q.lock.Unlock() q.queue = queue return nil }
// update is the inner function which does all the work for Update. It accepts // a time.Time so that time.Now() can be faked for testing. func (q *BuildQueue) update(now time.Time) error { glog.Info("Updating build queue.") defer timer.New("BuildQueue.update()").Stop() if err := q.repos.Update(); err != nil { return err } queue := map[string][]*BuildCandidate{} for _, repoUrl := range REPOS { repo, err := q.repos.Repo(repoUrl) if err != nil { return err } candidates, err := q.updateRepo(repoUrl, repo, now) if err != nil { return err } for k, v := range candidates { if _, ok := queue[k]; !ok { queue[k] = v } else { queue[k] = append(queue[k], v...) } } } // Sort the priorities. for _, prioritiesForBuilder := range queue { sort.Sort(BuildCandidateSlice(prioritiesForBuilder)) } // Update the queues. q.lock.Lock() defer q.lock.Unlock() q.queue = queue return nil }
// tallyTile computes a map[tracename]Tally and map[testname]Tally from the given Tile. func tallyTile(tile *tiling.Tile) (map[string]Tally, map[string]Tally) { defer timer.New("tally").Stop() traceTally := map[string]Tally{} testTally := map[string]Tally{} for k, tr := range tile.Traces { gtr := tr.(*types.GoldenTrace) tally := Tally{} for _, s := range gtr.Values { if s == types.MISSING_DIGEST { continue } if n, ok := tally[s]; ok { tally[s] = n + 1 } else { tally[s] = 1 } } traceTally[k] = tally testName := tr.Params()[types.PRIMARY_KEY_FIELD] if t, ok := testTally[testName]; ok { for digest, n := range tally { if _, ok := t[digest]; ok { t[digest] += n } else { t[digest] = n } } } else { cp := Tally{} for k, v := range tally { cp[k] = v } testTally[testName] = cp } } return traceTally, testTally }
func addBuilderCommentHandler(w http.ResponseWriter, r *http.Request) { defer timer.New("addBuilderCommentHandler").Stop() if !userHasEditRights(r) { util.ReportError(w, r, fmt.Errorf("User does not have edit rights."), "User does not have edit rights.") return } w.Header().Set("Content-Type", "application/json") cache, err := getCommitCache(w, r) if err != nil { return } builder := mux.Vars(r)["builder"] comment := struct { Comment string `json:"comment"` Flaky bool `json:"flaky"` IgnoreFailure bool `json:"ignoreFailure"` }{} if err := json.NewDecoder(r.Body).Decode(&comment); err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to add comment: %v", err)) return } defer util.Close(r.Body) c := buildbot.BuilderComment{ Builder: builder, User: login.LoggedInAs(r), Timestamp: time.Now().UTC(), Flaky: comment.Flaky, IgnoreFailure: comment.IgnoreFailure, Message: comment.Comment, } if err := cache.AddBuilderComment(builder, &c); err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to add builder comment: %v", err)) return } }
// Search returns a slice of DigestInfo with all the digests that match the given query parameters. // // Note that unlike CalcSummaries the results aren't restricted by test name. // Also note that the result can include positive and negative digests. func (s *Summaries) Search(query string, includeIgnores bool, head bool, pos bool, neg bool, unt bool) ([]DigestInfo, error) { t := timer.New("Search:GetLastTileTrimmed") tile, err := s.storages.GetLastTileTrimmed(includeIgnores) t.Stop() if err != nil { return nil, fmt.Errorf("Couldn't retrieve tile: %s", err) } q, err := url.ParseQuery(query) if err != nil { return nil, fmt.Errorf("Failed to parse Query in Search: %s", err) } t = timer.New("Search:Expectations") e, err := s.storages.ExpectationsStore.Get() t.Stop() if err != nil { return nil, fmt.Errorf("Couldn't get expectations: %s", err) } // Filter down to just the traces we are interested in, based on query. filtered := map[string]tiling.Trace{} t = timer.New("Filter Traces") for id, tr := range tile.Traces { if tiling.Matches(tr, q) { filtered[id] = tr } } t.Stop() traceTally := s.tallies.ByTrace() // Find all test:digest's in the filtered traces. matches := map[string]bool{} t = timer.New("Tally up the filtered traces") lastCommitIndex := tile.LastCommitIndex() for id, trace := range filtered { test := trace.Params()[types.PRIMARY_KEY_FIELD] if head { // Find the last non-missing value in the trace. for i := lastCommitIndex; i >= 0; i-- { if trace.IsMissing(i) { continue } else { matches[test+":"+trace.(*types.GoldenTrace).Values[i]] = true break } } } else { if t, ok := traceTally[id]; ok { for d, _ := range t { matches[test+":"+d] = true } } } } t.Stop() // Now create DigestInfo for each test:digest found, filtering out // digests with that don't match the triage classification. ret := []DigestInfo{} for key, _ := range matches { testDigest := strings.Split(key, ":") if len(testDigest) != 2 { glog.Errorf("Invalid test name or digest value: %s", key) continue } test := testDigest[0] digest := testDigest[1] class := e.Classification(test, digest) switch { case class == types.NEGATIVE && !neg: continue case class == types.POSITIVE && !pos: continue case class == types.UNTRIAGED && !unt: continue } ret = append(ret, DigestInfo{ Test: test, Digest: digest, }) } return ret, nil }
// CalcSummaries returns a Summary for each test that matches the given input filters. // // testNames // If not nil or empty then restrict the results to only tests that appear in this slice. // query // URL encoded paramset to use for filtering. // includeIgnores // Boolean, if true then include all digests in the results, including ones normally hidden // by the ignores list. // head // Only consider digests at head if true. // func (s *Summaries) CalcSummaries(testNames []string, query string, includeIgnores bool, head bool) (map[string]*Summary, error) { defer timer.New("CalcSummaries").Stop() glog.Infof("CalcSummaries: includeIgnores %v head %v", includeIgnores, head) t := timer.New("CalcSummaries:GetLastTileTrimmed") tile, err := s.storages.GetLastTileTrimmed(includeIgnores) t.Stop() if err != nil { return nil, fmt.Errorf("Couldn't retrieve tile: %s", err) } q, err := url.ParseQuery(query) if err != nil { return nil, fmt.Errorf("Failed to parse Query in CalcSummaries: %s", err) } ret := map[string]*Summary{} t = timer.New("CalcSummaries:Expectations") e, err := s.storages.ExpectationsStore.Get() t.Stop() if err != nil { return nil, fmt.Errorf("Couldn't get expectations: %s", err) } // Filter down to just the traces we are interested in, based on query. filtered := map[string][]*TraceID{} t = timer.New("Filter Traces") for id, tr := range tile.Traces { name := tr.Params()[types.PRIMARY_KEY_FIELD] if len(testNames) > 0 && !util.In(name, testNames) { continue } if tiling.Matches(tr, q) { if slice, ok := filtered[name]; ok { filtered[name] = append(slice, &TraceID{tr: tr, id: id}) } else { filtered[name] = []*TraceID{&TraceID{tr: tr, id: id}} } } } t.Stop() traceTally := s.tallies.ByTrace() // Now create summaries for each test using the filtered set of traces. t = timer.New("Tally up the filtered traces") lastCommitIndex := tile.LastCommitIndex() for name, traces := range filtered { digests := map[string]bool{} corpus := "" for _, trid := range traces { corpus = trid.tr.Params()["source_type"] if head { // Find the last non-missing value in the trace. for i := lastCommitIndex; i >= 0; i-- { if trid.tr.IsMissing(i) { continue } else { digests[trid.tr.(*types.GoldenTrace).Values[i]] = true break } } } else { // Use the traceTally if available, otherwise just inspect the trace. if t, ok := traceTally[trid.id]; ok { for k, _ := range t { digests[k] = true } } else { for i := lastCommitIndex; i >= 0; i-- { if !trid.tr.IsMissing(i) { digests[trid.tr.(*types.GoldenTrace).Values[i]] = true } } } } } ret[name] = s.makeSummary(name, e, s.storages.DiffStore, corpus, util.KeysOfStringSet(digests)) } t.Stop() return ret, nil }
// buildProgressHandler returns the number of finished builds at the given // commit, compared to that of an older commit. func buildProgressHandler(w http.ResponseWriter, r *http.Request) { defer timer.New("buildProgressHandler").Stop() w.Header().Set("Content-Type", "application/json") w.Header().Set("Access-Control-Allow-Origin", "*") // Get the commit cache. cache, err := getCommitCache(w, r) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to get the commit cache.")) return } // Get the number of finished builds for the requested commit. hash := r.FormValue("commit") buildsAtNewCommit, err := cache.GetBuildsForCommit(hash) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to get the number of finished builds.")) return } finishedAtNewCommit := 0 for _, b := range buildsAtNewCommit { if b.Finished { finishedAtNewCommit++ } } // Find an older commit for which we'll assume that all builds have completed. oldCommit, err := cache.Get(cache.NumCommits() - DEFAULT_COMMITS_TO_LOAD) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to get an old commit from the cache.")) return } buildsAtOldCommit, err := cache.GetBuildsForCommit(oldCommit.Hash) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to get the number of finished builds.")) return } finishedAtOldCommit := 0 for _, b := range buildsAtOldCommit { if b.Finished { finishedAtOldCommit++ } } res := struct { OldCommit string `json:"oldCommit"` FinishedAtOldCommit int `json:"finishedAtOldCommit"` NewCommit string `json:"newCommit"` FinishedAtNewCommit int `json:"finishedAtNewCommit"` FinishedProportion float64 `json:"finishedProportion"` }{ OldCommit: oldCommit.Hash, FinishedAtOldCommit: finishedAtOldCommit, NewCommit: hash, FinishedAtNewCommit: finishedAtNewCommit, FinishedProportion: float64(finishedAtNewCommit) / float64(finishedAtOldCommit), } if err := json.NewEncoder(w).Encode(res); err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to encode JSON.")) return } }
func buildsJsonHandler(w http.ResponseWriter, r *http.Request) { defer timer.New("buildsHandler").Stop() w.Header().Set("Content-Type", "application/json") start, err := getIntParam("start", r) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Invalid value for parameter \"start\": %v", err)) return } end, err := getIntParam("end", r) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Invalid value for parameter \"end\": %v", err)) return } var startTime time.Time var endTime time.Time if end == nil { endTime = time.Now() } else { endTime = time.Unix(int64(*end), 0) } if start == nil { startTime = endTime.AddDate(0, 0, -1) } else { startTime = time.Unix(int64(*start), 0) } // Fetch the builds. builds, err := db.GetBuildsFromDateRange(startTime, endTime) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Failed to load builds: %v", err)) return } // Shrink the builds. // TODO(borenet): Can we share build-shrinking code with the main status // page? // TinyBuildStep is a struct containing a small subset of a BuildStep's fields. type TinyBuildStep struct { Name string Started time.Time Finished time.Time Results int } // TinyBuild is a struct containing a small subset of a Build's fields. type TinyBuild struct { Builder string BuildSlave string Master string Number int Properties [][]interface{} `json:"properties"` Started time.Time Finished time.Time Results int Steps []*TinyBuildStep } rv := make([]*TinyBuild, 0, len(builds)) for _, b := range builds { steps := make([]*TinyBuildStep, 0, len(b.Steps)) for _, s := range b.Steps { steps = append(steps, &TinyBuildStep{ Name: s.Name, Started: s.Started, Finished: s.Finished, Results: s.Results, }) } rv = append(rv, &TinyBuild{ Builder: b.Builder, BuildSlave: b.BuildSlave, Master: b.Master, Number: b.Number, Properties: b.Properties, Started: b.Started, Finished: b.Finished, Results: b.Results, Steps: steps, }) } defer timer.New("buildsHandler_encode").Stop() if err := json.NewEncoder(w).Encode(rv); err != nil { glog.Errorf("Failed to write or encode output: %s", err) return } }
// updateBlame reads from the provided tileStream and updates the current // blame lists. func (b *Blamer) updateBlame(tile *tiling.Tile) error { exp, err := b.storages.ExpectationsStore.Get() if err != nil { return err } defer timer.New("blame").Stop() // Note: blameStart and blameEnd are continously updated to contain the // smalles start and end index of the ranges for a testName/digest pair. blameStart := map[string]map[string]int{} blameEnd := map[string]map[string]int{} // blameRange stores the candidate ranges for a testName/digest pair. blameRange := map[string]map[string][][]int{} firstCommit := tile.Commits[0] tileLen := tile.LastCommitIndex() + 1 ret := map[string]map[string]*BlameDistribution{} for _, trace := range tile.Traces { gtr := trace.(*types.GoldenTrace) testName := gtr.Params()[types.PRIMARY_KEY_FIELD] // lastIdx tracks the index of the last digest that is definitely // not in the blamelist. lastIdx := -1 found := map[string]bool{} for idx, digest := range gtr.Values[:tileLen] { if digest == types.MISSING_DIGEST { continue } status := exp.Classification(testName, digest) if (status == types.UNTRIAGED) && !found[digest] { found[digest] = true var startIdx int endIdx := idx // If we have only seen empty digests, then we do not // consider any digest before the current one. if lastIdx == -1 { startIdx = idx } else { startIdx = lastIdx + 1 } // Get the info about this digest. digestInfo, err := b.storages.GetOrUpdateDigestInfo(testName, digest, tile.Commits[idx]) if err != nil { return err } // Check if the digest was first seen outside the current tile. isOld := digestInfo.First < firstCommit.CommitTime commitRange := []int{startIdx, endIdx} if blameStartFound, ok := blameStart[testName]; !ok { blameStart[testName] = map[string]int{digest: startIdx} blameEnd[testName] = map[string]int{digest: endIdx} blameRange[testName] = map[string][][]int{digest: [][]int{commitRange}} ret[testName] = map[string]*BlameDistribution{digest: &BlameDistribution{Old: isOld}} } else if currentStart, ok := blameStartFound[digest]; !ok { blameStart[testName][digest] = startIdx blameEnd[testName][digest] = endIdx blameRange[testName][digest] = [][]int{commitRange} ret[testName][digest] = &BlameDistribution{Old: isOld} } else { blameStart[testName][digest] = util.MinInt(currentStart, startIdx) blameEnd[testName][digest] = util.MinInt(blameEnd[testName][digest], endIdx) blameRange[testName][digest] = append(blameRange[testName][digest], commitRange) ret[testName][digest].Old = isOld || ret[testName][digest].Old } } lastIdx = idx } } commits := tile.Commits[:tileLen] for testName, digests := range blameRange { for digest, commitRanges := range digests { start := blameStart[testName][digest] end := blameEnd[testName][digest] freq := make([]int, len(commits)-start) for _, commitRange := range commitRanges { // If the commit range is nil, we cannot calculate the a // blamelist. if commitRange == nil { freq = []int{} break } // Calculate the blame. idxEnd := util.MinInt(commitRange[1], end) for i := commitRange[0]; i <= idxEnd; i++ { freq[i-start]++ } } ret[testName][digest].Freq = freq } } // Swap out the old blame lists for the new ones. b.mutex.Lock() b.testBlameLists, b.commits = ret, commits b.mutex.Unlock() return nil }
func Init(storages *storage.Storage, summaries *summary.Summaries, tallies *tally.Tallies) error { exp, err := storages.ExpectationsStore.Get() if err != nil { return err } go func() { oneRun := func() { t := timer.New("warmer one loop") for test, sum := range summaries.Get() { for _, digest := range sum.UntHashes { t := tallies.ByTest()[test] if t != nil { // Calculate the closest digest for the side effect of filling in the filediffstore cache. digesttools.ClosestDigest(test, digest, exp, t, storages.DiffStore, types.POSITIVE) digesttools.ClosestDigest(test, digest, exp, t, storages.DiffStore, types.NEGATIVE) } } } t.Stop() if newExp, err := storages.ExpectationsStore.Get(); err != nil { glog.Errorf("warmer: Failed to get expectations: %s", err) } else { exp = newExp } // Make sure all images are downloaded. This is necessary, because // the front-end doesn't get URLs (generated via DiffStore.AbsPath) // which ensures that the image has been downloaded. // TODO(stephana): Remove this once the new diffstore is in place. tile, err := storages.GetLastTileTrimmed(true) if err != nil { glog.Errorf("Error retrieving tile: %s", err) } tileLen := tile.LastCommitIndex() + 1 traceDigests := make(map[string]bool, tileLen) for _, trace := range tile.Traces { gTrace := trace.(*types.GoldenTrace) for _, digest := range gTrace.Values { if digest != types.MISSING_DIGEST { traceDigests[digest] = true } } } digests := util.KeysOfStringSet(traceDigests) glog.Infof("FOUND %d digests to fetch.", len(digests)) storages.DiffStore.AbsPath(digests) if err := warmTrybotDigests(storages, traceDigests); err != nil { glog.Errorf("Error retrieving trybot digests: %s", err) return } } oneRun() for _ = range time.Tick(time.Minute) { oneRun() } }() return nil }
// updateRepo syncs the given repo and returns a set of BuildCandidates for // each builder which uses it. func (q *BuildQueue) updateRepo(repoUrl string, now time.Time) (map[string][]*BuildCandidate, error) { defer timer.New("BuildQueue.updateRepo()").Stop() errMsg := "Failed to update the repo: %v" // Sync/update the code. repo, err := q.repos.Repo(repoUrl) if err != nil { return nil, fmt.Errorf(errMsg, err) } if err := repo.Update(true, true); err != nil { return nil, fmt.Errorf(errMsg, err) } // Get the details for all recent commits. from := now.Add(-q.period) if q.period == PERIOD_FOREVER { from = time.Unix(0, 0) } recentCommits := util.Reverse(repo.From(from)) // Pre-load commit details, from a larger window than we actually care about. fromPreload := now.Add(time.Duration(int64(-1.5 * float64(q.period)))) if q.period == PERIOD_FOREVER { fromPreload = time.Unix(0, 0) } recentCommitsPreload := repo.From(fromPreload) for _, c := range recentCommitsPreload { if _, err := repo.Details(c); err != nil { return nil, fmt.Errorf(errMsg, err) } } // Get all builds associated with the recent commits. buildsByCommit, err := buildbot.GetBuildsForCommits(recentCommitsPreload, map[int]bool{}) if err != nil { return nil, fmt.Errorf(errMsg, err) } // Create buildCaches for each builder. buildCaches := map[string]*buildCache{} for _, buildsForCommit := range buildsByCommit { for _, build := range buildsForCommit { if util.AnyMatch(q.botBlacklist, build.Builder) { glog.Infof("Skipping blacklisted builder %s", build.Builder) continue } if _, ok := buildCaches[build.Builder]; !ok { bc, err := newBuildCache(build.Builder, build.Master, build.Repository) if err != nil { return nil, fmt.Errorf(errMsg, err) } buildCaches[build.Builder] = bc } if err := buildCaches[build.Builder].Put(build); err != nil { return nil, err } } } // Find candidates for each builder. candidates := map[string][]*BuildCandidate{} errs := map[string]error{} mutex := sync.Mutex{} var wg sync.WaitGroup for builder, finder := range buildCaches { wg.Add(1) go func(b string, bc *buildCache) { defer wg.Done() c, err := q.getCandidatesForBuilder(bc, recentCommits, now) mutex.Lock() defer mutex.Unlock() if err != nil { errs[b] = err return } candidates[b] = c }(builder, finder) } wg.Wait() if len(errs) > 0 { msg := "Failed to update the repo:" for _, err := range errs { msg += fmt.Sprintf("\n%v", err) } return nil, fmt.Errorf(msg) } return candidates, nil }