func testBuildQueue(t *testing.T, timeDecay24Hr float64, expectations []*buildQueueExpect, testInsert bool) { testutils.SkipIfShort(t) // Initialize the buildbot database. d := clearDB(t) defer d.Close(t) // Load the test repo. tr := util.NewTempRepo() defer tr.Cleanup() repos := gitinfo.NewRepoMap(tr.Dir) repo, err := repos.Repo(TEST_REPO) assert.Nil(t, err) assert.Nil(t, repos.Update()) // Create the BuildQueue. q, err := NewBuildQueue(PERIOD_FOREVER, repos, DEFAULT_SCORE_THRESHOLD, timeDecay24Hr, []*regexp.Regexp{}, d.db) assert.Nil(t, err) // Fake time.Now() details, err := repo.Details(hashes['I'], false) assert.Nil(t, err) now := details.Timestamp.Add(1 * time.Hour) // Update the queue. assert.Nil(t, q.update(now)) // Ensure that we get the expected BuildCandidate at each step. Insert // each BuildCandidate into the buildbot database to simulate actually // running builds. buildNum := 0 for _, expected := range expectations { bc, err := q.Pop([]string{TEST_BUILDER}) assert.Equal(t, expected.err, err) if err != nil { break } glog.Infof("\n%v\n%v", expected.bc, bc) assert.True(t, reflect.DeepEqual(expected.bc, bc)) if testInsert || buildNum == 0 { // Actually insert a build, as if we're really using the scheduler. // Do this even if we're not testing insertion, because if we don't, // the queue won't know about this builder. b := &buildbot.Build{ Builder: bc.Builder, Master: "fake", Number: buildNum, BuildSlave: "fake", Branch: "master", GotRevision: bc.Commit, Repository: TEST_REPO, } assert.Nil(t, buildbot.IngestBuild(d.db, b, repos)) buildNum++ assert.Nil(t, q.update(now)) } } }
func testBuildQueue(t *testing.T, timeDecay24Hr float64, expectations []*buildQueueExpect) { testutils.SkipIfShort(t) // Initialize the buildbot database. d := clearDB(t) defer d.Close(t) // Load the test repo. tr := util.NewTempRepo() defer tr.Cleanup() repos := gitinfo.NewRepoMap(tr.Dir) repo, err := repos.Repo("/skia.git") assert.Nil(t, err) assert.Nil(t, repos.Update()) // Create the BuildQueue. q, err := NewBuildQueue(PERIOD_FOREVER, tr.Dir, DEFAULT_SCORE_THRESHOLD, timeDecay24Hr, []string{TEST_BUILDER}) assert.Nil(t, err) // Fake time.Now() details, err := repo.Details(hashes['I']) assert.Nil(t, err) now := details.Timestamp.Add(1 * time.Hour) // Ensure that we get the expected BuildCandidate at each step. Insert // each BuildCandidate into the buildbot database to simulate actually // running builds. buildNum := 0 for _, expected := range expectations { assert.Nil(t, q.update(now)) bc, err := q.Pop(TEST_BUILDER) assert.Equal(t, expected.err, err) if err != nil { break } hash, err := repo.FullHash(bc.Commit) assert.Nil(t, err) bc.Commit = hash assert.True(t, reflect.DeepEqual(expected.bc, bc)) b := &buildbot.Build{ Builder: bc.Builder, Master: "fake", Number: buildNum, BuildSlave: "fake", Branch: "master", GotRevision: bc.Commit, Repository: TEST_REPO, } assert.Nil(t, buildbot.IngestBuild(b, repos)) buildNum++ } }
// ingestBuild encapsulates many of the steps of ingesting a build: // - Record the mapping between the codename (build.Builder) and the internal target name. // - If no matching build exists, assign a new build number for this build and insert it. // - Otherwise, update the existing build to match the given build. func ingestBuild(build *buildbot.Build, commitHash, target string) error { // Store build.Builder (the codename) with its pair build.Target.Name in a local leveldb to serve redirects. if err := codenameDB.Put([]byte(build.Builder), []byte(target), nil); err != nil { glog.Errorf("Failed to write codename to data store: %s", err) } buildNumber, err := db.GetBuildNumberForCommit(build.Master, build.Builder, commitHash) if err != nil { return fmt.Errorf("Failed to find the build in the database: %s", err) } glog.Infof("GetBuildForCommit at hash: %s returned %d", commitHash, buildNumber) var cachedBuild *buildbot.Build if buildNumber != -1 { cachedBuild, err = db.GetBuildFromDB(build.Master, build.Builder, buildNumber) if err != nil { return fmt.Errorf("Failed to retrieve build from database: %s", err) } } if cachedBuild == nil { // This is a new build we've never seen before, so add it to the buildbot database. // TODO(benjaminwagner): This logic won't work well for concurrent requests. Revisit // after borenet's "giant datahopper change." // First calculate a new unique build.Number. number, err := db.GetMaxBuildNumber(build.Master, build.Builder) if err != nil { return fmt.Errorf("Failed to find next build number: %s", err) } build.Number = number + 1 glog.Infof("Writing new build to the database: %s %d", build.Builder, build.Number) } else { // If the state of the build has changed then write it to the buildbot database. glog.Infof("Writing updated build to the database: %s %d", build.Builder, build.Number) } if err := buildbot.IngestBuild(db, build, repos); err != nil { return fmt.Errorf("Failed to ingest build: %s", err) } return nil }
// step does a single step in ingesting builds from tradefed and pushing the results into the buildbot database. func step(targets []string, buildService *androidbuildinternal.Service, repos *gitinfo.RepoMap) { glog.Errorf("step: Begin") if err := repos.Update(); err != nil { glog.Errorf("Failed to update repos: %s", err) return } // Loop over every target and look for skia commits in the builds. for _, target := range targets { r, err := buildService.Build.List().Branch(SKIA_BRANCH).BuildType("submitted").Target(target).ExtraFields("changeInfo").MaxResults(40).Do() if err != nil { glog.Errorf("Failed to load internal builds: %v", err) continue } // Iterate over the builds in reverse order so we ingest the earlier Git // hashes first and the more recent Git hashes later. for i := len(r.Builds) - 1; i >= 0; i-- { b := r.Builds[i] commits := androidbuild.CommitsFromChanges(b.Changes) glog.Infof("Commits: %#v", commits) if len(commits) > 0 { var cachedBuild *buildbot.Build = nil // Only look at the first commit in the list. The commits always appear in reverse chronological order, so // the 0th entry is the most recent commit. c := commits[0] // Create a buildbot.Build from the build info. key, build := buildFromCommit(b, c) glog.Infof("Key: %s Hash: %s", key, c.Hash) // Store build.Builder (the codename) with its pair build.Target.Name in a local leveldb to serve redirects. if err := codenameDB.Put([]byte(build.Builder), []byte(b.Target.Name), nil); err != nil { glog.Errorf("Failed to write codename to data store: %s", err) } buildNumber, err := buildbot.GetBuildForCommit(build.Builder, build.Master, c.Hash) if err != nil { glog.Errorf("Failed to find the build in the database: %s", err) continue } glog.Infof("GetBuildForCommit at hash: %s returned %d", c.Hash, buildNumber) if buildNumber != -1 { cachedBuild, err = buildbot.GetBuildFromDB(build.Builder, build.Master, buildNumber) if err != nil { glog.Errorf("Failed to retrieve build from database: %s", err) continue } } if cachedBuild == nil { // This is a new build we've never seen before, so add it to the buildbot database. // First calculate a new unique build.Number. number, err := buildbot.GetMaxBuildNumber(build.Builder) if err != nil { glog.Infof("Failed to find next build number: %s", err) continue } build.Number = number + 1 if err := buildbot.IngestBuild(build, repos); err != nil { glog.Errorf("Failed to ingest build: %s", err) continue } cachedBuild = build } // If the state of the build has changed then write it to the buildbot database. if buildsDiffer(build, cachedBuild) { // If this was a failure then we need to check that there is a mirror // failure on the main branch, at which point we will say that this // is a warning. if build.Results == buildbot.BUILDBOT_FAILURE && brokenOnMaster(buildService, target, b.BuildId) { build.Results = buildbot.BUILDBOT_WARNING } cachedBuild.Results = build.Results cachedBuild.Finished = build.Finished glog.Infof("Writing updated build to the database: %s %d", cachedBuild.Builder, cachedBuild.Number) if err := buildbot.IngestBuild(cachedBuild, repos); err != nil { glog.Errorf("Failed to ingest build: %s", err) } } } liveness.Update() } } }