Пример #1
0
// testUnfinishedBuild verifies that we can write a build which is not yet
// finished, load the build back from the database, and update it when it
// finishes.
func testUnfinishedBuild(t *testing.T) {
	d := clearDB(t)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// Obtain and insert an unfinished build.
	httpClient = testHttpClient
	b, err := getBuildFromMaster("client.skia", "Test-Ubuntu12-ShuttleA-GTX550Ti-x86_64-Release-Valgrind", 152, repos)
	assert.Nil(t, err)
	assert.False(t, b.IsFinished(), fmt.Errorf("Unfinished build thinks it's finished!"))
	dbSerializeAndCompare(t, b, true)

	// Ensure that the build is found by getUnfinishedBuilds.
	unfinished, err := getUnfinishedBuilds()
	assert.Nil(t, err)
	found := false
	for _, u := range unfinished {
		if u.Master == b.Master && u.Builder == b.Builder && u.Number == b.Number {
			found = true
			break
		}
	}
	assert.True(t, found, "Unfinished build was not found by getUnfinishedBuilds!")

	// Add another step to the build to "finish" it, ensure that we can
	// retrieve it as expected.
	b.Finished = b.Started + 1000
	b.Times[1] = b.Finished
	stepStarted := b.Started + 500
	s := &BuildStep{
		BuildID:    b.Id,
		Name:       "LastStep",
		Times:      []float64{stepStarted, b.Finished},
		Number:     len(b.Steps),
		Results:    0,
		ResultsRaw: []interface{}{0.0, []interface{}{}},
		Started:    b.Started + 500.0,
		Finished:   b.Finished,
	}
	b.Steps = append(b.Steps, s)
	assert.True(t, b.IsFinished(), "Finished build thinks it's unfinished!")
	dbSerializeAndCompare(t, b, true)

	// Ensure that the finished build is NOT found by getUnfinishedBuilds.
	unfinished, err = getUnfinishedBuilds()
	assert.Nil(t, err)
	found = false
	for _, u := range unfinished {
		if u.Master == b.Master && u.Builder == b.Builder && u.Number == b.Number {
			found = true
			break
		}
	}
	assert.False(t, found, "Finished build was found by getUnfinishedBuilds!")
}
Пример #2
0
// testBuildDbSerialization verifies that we can write a build to the DB and
// pull it back out without losing or corrupting the data.
func testBuildDbSerialization(t *testing.T) {
	d := clearDB(t)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// Test case: an empty build. Tests null and empty values.
	emptyTime := 0.0
	emptyBuild := &Build{
		Steps:   []*BuildStep{},
		Times:   []float64{emptyTime, emptyTime},
		Commits: []string{},
	}

	// Test case: a completely filled-out build.
	buildFromFullJson, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)

	testCases := []*Build{emptyBuild, buildFromFullJson}
	for _, b := range testCases {
		dbSerializeAndCompare(t, b, true)
	}
}
Пример #3
0
func testBuildQueue(t *testing.T, timeDecay24Hr float64, expectations []*buildQueueExpect, testInsert bool) {
	testutils.SkipIfShort(t)

	// Initialize the buildbot database.
	d := clearDB(t)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()
	repos := gitinfo.NewRepoMap(tr.Dir)
	repo, err := repos.Repo(TEST_REPO)
	assert.Nil(t, err)
	assert.Nil(t, repos.Update())

	// Create the BuildQueue.
	q, err := NewBuildQueue(PERIOD_FOREVER, repos, DEFAULT_SCORE_THRESHOLD, timeDecay24Hr, []*regexp.Regexp{}, d.db)
	assert.Nil(t, err)

	// Fake time.Now()
	details, err := repo.Details(hashes['I'], false)
	assert.Nil(t, err)
	now := details.Timestamp.Add(1 * time.Hour)

	// Update the queue.
	assert.Nil(t, q.update(now))

	// Ensure that we get the expected BuildCandidate at each step. Insert
	// each BuildCandidate into the buildbot database to simulate actually
	// running builds.
	buildNum := 0
	for _, expected := range expectations {
		bc, err := q.Pop([]string{TEST_BUILDER})
		assert.Equal(t, expected.err, err)
		if err != nil {
			break
		}
		glog.Infof("\n%v\n%v", expected.bc, bc)
		assert.True(t, reflect.DeepEqual(expected.bc, bc))
		if testInsert || buildNum == 0 {
			// Actually insert a build, as if we're really using the scheduler.
			// Do this even if we're not testing insertion, because if we don't,
			// the queue won't know about this builder.
			b := &buildbot.Build{
				Builder:     bc.Builder,
				Master:      "fake",
				Number:      buildNum,
				BuildSlave:  "fake",
				Branch:      "master",
				GotRevision: bc.Commit,
				Repository:  TEST_REPO,
			}
			assert.Nil(t, buildbot.IngestBuild(d.db, b, repos))
			buildNum++
			assert.Nil(t, q.update(now))
		}
	}
}
Пример #4
0
// testUnfinishedBuild verifies that we can write a build which is not yet
// finished, load the build back from the database, and update it when it
// finishes.
func testUnfinishedBuild(t *testing.T, local bool) {
	testutils.SkipIfShort(t)
	d := clearDB(t, local)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// Obtain and insert an unfinished build.
	httpClient = testHttpClient
	b, err := getBuildFromMaster("client.skia", "Test-Ubuntu12-ShuttleA-GTX550Ti-x86_64-Release-Valgrind", 152, repos)
	assert.Nil(t, err)
	assert.False(t, b.IsFinished(), "Unfinished build thinks it's finished!")
	dbSerializeAndCompare(t, d, b, true)

	// Ensure that the build is found by GetUnfinishedBuilds.
	unfinished, err := d.DB().GetUnfinishedBuilds(b.Master)
	assert.Nil(t, err)
	found := false
	for _, u := range unfinished {
		if u.Master == b.Master && u.Builder == b.Builder && u.Number == b.Number {
			found = true
			break
		}
	}
	assert.True(t, found, "Unfinished build was not found by getUnfinishedBuilds!")

	// Add another step to the build to "finish" it, ensure that we can
	// retrieve it as expected.
	b.Finished = b.Started.Add(30 * time.Second)
	stepStarted := b.Started.Add(500 * time.Millisecond)
	s := &BuildStep{
		Name:     "LastStep",
		Number:   len(b.Steps),
		Results:  0,
		Started:  stepStarted,
		Finished: b.Finished,
	}
	b.Steps = append(b.Steps, s)
	assert.True(t, b.IsFinished(), "Finished build thinks it's unfinished!")
	dbSerializeAndCompare(t, d, b, true)

	// Ensure that the finished build is NOT found by getUnfinishedBuilds.
	unfinished, err = d.DB().GetUnfinishedBuilds(b.Master)
	assert.Nil(t, err)
	found = false
	for _, u := range unfinished {
		if u.Master == b.Master && u.Builder == b.Builder && u.Number == b.Number {
			found = true
			break
		}
	}
	assert.False(t, found, "Finished build was found by getUnfinishedBuilds!")
}
Пример #5
0
func main() {
	defer common.LogPanic()
	// Setup flags.
	dbConf := buildbot.DBConfigFromFlags()

	// Global init.
	common.InitWithMetrics(APP_NAME, graphiteServer)

	// Parse the time period.
	period, err := human.ParseDuration(*timePeriod)
	if err != nil {
		glog.Fatal(err)
	}

	// Initialize the buildbot database.
	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	if err := dbConf.InitDB(); err != nil {
		glog.Fatal(err)
	}

	// Initialize the BuildBucket client.
	c, err := auth.NewClient(*local, path.Join(*workdir, "oauth_token_cache"), buildbucket.DEFAULT_SCOPES...)
	if err != nil {
		glog.Fatal(err)
	}
	bb := buildbucket.NewClient(c)

	// Build the queue.
	repos := gitinfo.NewRepoMap(*workdir)
	for _, r := range REPOS {
		if _, err := repos.Repo(r); err != nil {
			glog.Fatal(err)
		}
	}
	q, err := build_queue.NewBuildQueue(period, repos, *scoreThreshold, *scoreDecay24Hr, BOT_BLACKLIST)
	if err != nil {
		glog.Fatal(err)
	}

	// Start scheduling builds in a loop.
	liveness := metrics.NewLiveness(APP_NAME)
	if err := scheduleBuilds(q, bb); err != nil {
		glog.Errorf("Failed to schedule builds: %v", err)
	}
	for _ = range time.Tick(time.Minute) {
		liveness.Update()
		if err := scheduleBuilds(q, bb); err != nil {
			glog.Errorf("Failed to schedule builds: %v", err)
		}
	}
}
func testBuildQueue(t *testing.T, timeDecay24Hr float64, expectations []*buildQueueExpect) {
	testutils.SkipIfShort(t)

	// Initialize the buildbot database.
	d := clearDB(t)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()
	repos := gitinfo.NewRepoMap(tr.Dir)
	repo, err := repos.Repo("/skia.git")
	assert.Nil(t, err)
	assert.Nil(t, repos.Update())

	// Create the BuildQueue.
	q, err := NewBuildQueue(PERIOD_FOREVER, tr.Dir, DEFAULT_SCORE_THRESHOLD, timeDecay24Hr, []string{TEST_BUILDER})
	assert.Nil(t, err)

	// Fake time.Now()
	details, err := repo.Details(hashes['I'])
	assert.Nil(t, err)
	now := details.Timestamp.Add(1 * time.Hour)

	// Ensure that we get the expected BuildCandidate at each step. Insert
	// each BuildCandidate into the buildbot database to simulate actually
	// running builds.
	buildNum := 0
	for _, expected := range expectations {
		assert.Nil(t, q.update(now))

		bc, err := q.Pop(TEST_BUILDER)
		assert.Equal(t, expected.err, err)
		if err != nil {
			break
		}
		hash, err := repo.FullHash(bc.Commit)
		assert.Nil(t, err)
		bc.Commit = hash
		assert.True(t, reflect.DeepEqual(expected.bc, bc))
		b := &buildbot.Build{
			Builder:     bc.Builder,
			Master:      "fake",
			Number:      buildNum,
			BuildSlave:  "fake",
			Branch:      "master",
			GotRevision: bc.Commit,
			Repository:  TEST_REPO,
		}
		assert.Nil(t, buildbot.IngestBuild(b, repos))
		buildNum++
	}
}
Пример #7
0
// IngestNewBuildsLoop continually ingests new builds.
func IngestNewBuildsLoop(workdir string) {
	lv := metrics.NewLiveness("buildbot-ingest")
	repos := gitinfo.NewRepoMap(workdir)
	for _ = range time.Tick(30 * time.Second) {
		glog.Info("Ingesting builds.")
		if err := ingestNewBuilds(repos); err != nil {
			glog.Errorf("Failed to ingest new builds: %v", err)
		} else {
			lv.Update()
		}
	}
}
Пример #8
0
// NewBuildQueue creates and returns a BuildQueue instance which considers
// commits in the specified time period.
//
// Build candidates with a score below the given scoreThreshold are not added
// to the queue. The score for a build candidate is defined as the value added
// by running that build, which is the difference between the total scores for
// all commits on a given builder before and after the build candidate would
// run. Scoring for an individual commit/builder pair is as follows:
//
// -1.0    if no build has ever included this commit on this builder.
// 1.0     if this builder has built AT this commit.
// 1.0 / N if a build on this builer has included this commit, where N is the
//         number of commits included in the build.
//
// The scoring works out such that build candidates which include commits which
// have never been included in a build have a value-add of >= 2, and other build
// candidates (eg. backfilling) have a value-add of < 2.
//
// Additionally, the scores include a time factor which serves to prioritize
// backfilling of more recent commits. The time factor is an exponential decay
// which is controlled by the timeDecay24Hr parameter. This parameter indicates
// what the time factor should be after 24 hours. For example, setting
// timeDecay24Hr equal to 0.5 causes the score for a build candidate with value
// 1.0 to be 0.5 if the commit is 24 hours old. At 48 hours with a value of 1.0
// the build candidate would receive a score of 0.25.
func NewBuildQueue(period time.Duration, workdir string, scoreThreshold, timeDecay24Hr float64, botWhitelist []string) (*BuildQueue, error) {
	if timeDecay24Hr <= 0.0 || timeDecay24Hr > 1.0 {
		return nil, fmt.Errorf("Time penalty must be 0 < p <= 1")
	}
	q := &BuildQueue{
		botWhitelist:   botWhitelist,
		lock:           sync.RWMutex{},
		period:         period,
		scoreThreshold: scoreThreshold,
		queue:          map[string][]*BuildCandidate{},
		repos:          gitinfo.NewRepoMap(workdir),
		timeLambda:     lambda(timeDecay24Hr),
	}
	return q, nil
}
Пример #9
0
// TestGetBuildFromMaster verifies that we can load JSON data from the build master and
// decode it into a Build object.
func TestGetBuildFromMaster(t *testing.T) {
	testutils.SkipIfShort(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// Default, complete build.
	_, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	// Incomplete build.
	_, err = getBuildFromMaster("client.skia", "Test-Ubuntu12-ShuttleA-GTX550Ti-x86_64-Release-Valgrind", 152, repos)
	assert.Nil(t, err)
}
Пример #10
0
// TestBuildJsonSerialization verifies that we can serialize a build to JSON
// and back without losing or corrupting the data.
func TestBuildJsonSerialization(t *testing.T) {
	testutils.SkipIfShort(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	b1, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	bytes, err := json.Marshal(b1)
	assert.Nil(t, err)
	b2 := &Build{}
	assert.Nil(t, json.Unmarshal(bytes, b2))
	testutils.AssertDeepEqual(t, b1, b2)
}
Пример #11
0
// IngestNewBuildsLoop continually ingests new builds.
func IngestNewBuildsLoop(workdir string) {
	repos := gitinfo.NewRepoMap(workdir)
	var wg sync.WaitGroup
	for _, m := range MASTER_NAMES {
		go func(master string) {
			defer wg.Done()
			lv := metrics.NewLiveness(fmt.Sprintf("buildbot-ingest-%s", master))
			for _ = range time.Tick(30 * time.Second) {
				if err := ingestNewBuilds(master, repos); err != nil {
					glog.Errorf("Failed to ingest new builds: %v", err)
				} else {
					lv.Update()
				}
			}
		}(m)
	}
	wg.Wait()
}
Пример #12
0
// IngestNewBuildsLoop continually ingests new builds.
func IngestNewBuildsLoop(db DB, workdir string) error {
	if _, ok := db.(*localDB); !ok {
		return fmt.Errorf("Can only ingest builds with a local DB instance.")
	}
	repos := gitinfo.NewRepoMap(workdir)
	go func() {
		var wg sync.WaitGroup
		for _, m := range MASTER_NAMES {
			go func(master string) {
				defer wg.Done()
				lv := metrics.NewLiveness(fmt.Sprintf("buildbot-ingest-%s", master))
				for _ = range time.Tick(10 * time.Second) {
					if err := ingestNewBuilds(db.(*localDB), master, repos); err != nil {
						glog.Errorf("Failed to ingest new builds: %s", err)
					} else {
						lv.Update()
					}
				}
			}(m)
		}
		wg.Wait()
	}()
	return nil
}
Пример #13
0
// testIngestNewBuilds verifies that we can successfully query the masters and
// the database for new and unfinished builds, respectively, and ingest them
// into the database.
func testIngestNewBuilds(t *testing.T) {
	d := clearDB(t)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// This builder needs to load a few builds.
	b1, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	b1.Master = "client.skia.android"
	b1.Builder = "Perf-Android-Venue8-PowerVR-x86-Release"
	b1.Number = 463
	b1.Steps = []*BuildStep{}
	assert.Nil(t, b1.ReplaceIntoDB())

	// This builder has no new builds, but the last one wasn't finished
	// at its time of ingestion.
	b2, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	b2.Master = "client.skia.fyi"
	b2.Builder = "Housekeeper-PerCommit"
	b2.Number = 1035
	b2.Finished = 0.0
	b2.Steps = []*BuildStep{}
	assert.Nil(t, b2.ReplaceIntoDB())

	// Subsequent builders are already up-to-date.
	b3, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	b3.Master = "client.skia.fyi"
	b3.Builder = "Housekeeper-Nightly-RecreateSKPs"
	b3.Number = 58
	b3.Steps = []*BuildStep{}
	assert.Nil(t, b3.ReplaceIntoDB())

	b4, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	b4.Master = "client.skia.android"
	b4.Builder = "Test-Android-Venue8-PowerVR-x86-Debug"
	b4.Number = 532
	b4.Steps = []*BuildStep{}
	assert.Nil(t, b4.ReplaceIntoDB())

	// IngestNewBuilds should process the above Venue8 Perf bot's builds
	// 464-466 as well as Housekeeper-PerCommit's unfinished build #1035.
	assert.Nil(t, ingestNewBuilds(repos))

	// Verify that the expected builds are now in the database.
	expected := []Build{
		Build{
			Master:  b1.Master,
			Builder: b1.Builder,
			Number:  464,
		},
		Build{
			Master:  b1.Master,
			Builder: b1.Builder,
			Number:  465,
		},
		Build{
			Master:  b1.Master,
			Builder: b1.Builder,
			Number:  466,
		},
		Build{
			Master:  b2.Master,
			Builder: b2.Builder,
			Number:  1035,
		},
	}
	for _, e := range expected {
		a, err := GetBuildFromDB(e.Builder, e.Master, e.Number)
		assert.Nil(t, err)
		if !(a.Master == e.Master && a.Builder == e.Builder && a.Number == e.Number) {
			t.Fatalf("Incorrect build was inserted!\n  %s == %s\n  %s == %s\n  %d == %d", a.Master, e.Master, a.Builder, e.Builder, a.Number, e.Number)
		}
		assert.True(t, a.IsFinished(), fmt.Sprintf("Failed to update build properly; it should be finished: %v", a))
	}
}
Пример #14
0
// testGetUningestedBuilds verifies that getUningestedBuilds works as expected.
func testGetUningestedBuilds(t *testing.T) {
	d := clearDB(t)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// This builder is no longer found on the master.
	b1, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	b1.Master = "client.skia.compile"
	b1.Builder = "My-Builder"
	b1.Number = 115
	b1.Steps = []*BuildStep{}
	assert.Nil(t, b1.ReplaceIntoDB())

	// This builder needs to load a few builds.
	b2, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	b2.Master = "client.skia.android"
	b2.Builder = "Perf-Android-Venue8-PowerVR-x86-Release"
	b2.Number = 463
	b2.Steps = []*BuildStep{}
	assert.Nil(t, b2.ReplaceIntoDB())

	// This builder is already up-to-date.
	b3, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	b3.Master = "client.skia.fyi"
	b3.Builder = "Housekeeper-PerCommit"
	b3.Number = 1035
	b3.Steps = []*BuildStep{}
	assert.Nil(t, b3.ReplaceIntoDB())

	// This builder is already up-to-date.
	b4, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	b4.Master = "client.skia.android"
	b4.Builder = "Test-Android-Venue8-PowerVR-x86-Debug"
	b4.Number = 532
	b4.Steps = []*BuildStep{}
	assert.Nil(t, b4.ReplaceIntoDB())

	// Expectations. If the master or builder has no uningested builds,
	// we expect it not to be in the results, even with an empty map/slice.
	expected := map[string]map[string][]int{
		"client.skia.fyi": map[string][]int{
			"Housekeeper-Nightly-RecreateSKPs": []int{ // No already-ingested builds.
				0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
			},
		},
		"client.skia.android": map[string][]int{ // Some already-ingested builds.
			"Perf-Android-Venue8-PowerVR-x86-Release": []int{
				464, 465, 466,
			},
		},
	}
	httpClient = testHttpClient
	actual, err := getUningestedBuilds()
	assert.Nil(t, err)
	testutils.AssertDeepEqual(t, expected, actual)
}
Пример #15
0
// testLastProcessedBuilds verifies that getLastProcessedBuilds gives us
// the expected result.
func testLastProcessedBuilds(t *testing.T) {
	d := clearDB(t)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	build, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)

	// Ensure that we get the right number for not-yet-processed
	// builder/master pair.
	builds, err := getLastProcessedBuilds()
	assert.Nil(t, err)
	if builds == nil || len(builds) != 0 {
		t.Fatal(fmt.Errorf("getLastProcessedBuilds returned an unacceptable value for no builds: %v", builds))
	}

	// Ensure that we get the right number for a single already-processed
	// builder/master pair.
	assert.Nil(t, build.ReplaceIntoDB())
	builds, err = getLastProcessedBuilds()
	assert.Nil(t, err)
	if builds == nil || len(builds) != 1 {
		t.Fatal(fmt.Errorf("getLastProcessedBuilds returned incorrect number of results: %v", builds))
	}
	if builds[0].Master != build.Master || builds[0].Builder != build.Builder || builds[0].Number != build.Number {
		t.Fatal(fmt.Errorf("getLastProcessedBuilds returned the wrong build: %v", builds[0]))
	}

	// Ensure that we get the correct result for multiple builders.
	build2, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	build2.Builder = "Other-Builder"
	build2.Number = build.Number + 10
	assert.Nil(t, build2.ReplaceIntoDB())
	builds, err = getLastProcessedBuilds()
	assert.Nil(t, err)
	compareBuildLists := func(expected []*Build, actual []*BuildID) bool {
		if len(expected) != len(actual) {
			return false
		}
		for _, e := range expected {
			found := false
			for _, a := range actual {
				if e.Builder == a.Builder && e.Master == a.Master && e.Number == a.Number {
					found = true
					break
				}
			}
			if !found {
				return false
			}
		}
		return true
	}
	assert.True(t, compareBuildLists([]*Build{build, build2}, builds), fmt.Sprintf("getLastProcessedBuilds returned incorrect results: %v", builds))

	// Add "older" build, ensure that only the newer ones are returned.
	build3, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)
	build3.Number -= 10
	assert.Nil(t, build3.ReplaceIntoDB())
	builds, err = getLastProcessedBuilds()
	assert.Nil(t, err)
	assert.True(t, compareBuildLists([]*Build{build, build2}, builds), fmt.Sprintf("getLastProcessedBuilds returned incorrect results: %v", builds))
}
Пример #16
0
// testFindCommitsForBuild verifies that findCommitsForBuild correctly obtains
// the list of commits which were newly built in a given build.
func testFindCommitsForBuild(t *testing.T, local bool) {
	testutils.SkipIfShort(t)
	httpClient = testHttpClient
	d := clearDB(t, local)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// The test repo is laid out like this:
	//
	// *   06eb2a58139d3ff764f10232d5c8f9362d55e20f I (HEAD, master, Build #4)
	// *   ecb424466a4f3b040586a062c15ed58356f6590e F (Build #3)
	// |\
	// | * d30286d2254716d396073c177a754f9e152bbb52 H
	// | * 8d2d1247ef5d2b8a8d3394543df6c12a85881296 G (Build #2)
	// * | 67635e7015d74b06c00154f7061987f426349d9f E
	// * | 6d4811eddfa637fac0852c3a0801b773be1f260d D (Build #1)
	// * | d74dfd42a48325ab2f3d4a97278fc283036e0ea4 C (Build #6)
	// |/
	// *   4b822ebb7cedd90acbac6a45b897438746973a87 B (Build #0)
	// *   051955c355eb742550ddde4eccc3e90b6dc5b887 A
	//
	hashes := map[rune]string{
		'A': "051955c355eb742550ddde4eccc3e90b6dc5b887",
		'B': "4b822ebb7cedd90acbac6a45b897438746973a87",
		'C': "d74dfd42a48325ab2f3d4a97278fc283036e0ea4",
		'D': "6d4811eddfa637fac0852c3a0801b773be1f260d",
		'E': "67635e7015d74b06c00154f7061987f426349d9f",
		'F': "ecb424466a4f3b040586a062c15ed58356f6590e",
		'G': "8d2d1247ef5d2b8a8d3394543df6c12a85881296",
		'H': "d30286d2254716d396073c177a754f9e152bbb52",
		'I': "06eb2a58139d3ff764f10232d5c8f9362d55e20f",
	}

	// Test cases. Each test case builds on the previous cases.
	testCases := []struct {
		GotRevision string
		Expected    []string
		StoleFrom   int
		Stolen      []string
	}{
		// 0. The first build.
		{
			GotRevision: hashes['B'],
			Expected:    []string{hashes['A'], hashes['B']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 1. On a linear set of commits, with at least one previous build.
		{
			GotRevision: hashes['D'],
			Expected:    []string{hashes['D'], hashes['C']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 2. The first build on a new branch.
		{
			GotRevision: hashes['G'],
			Expected:    []string{hashes['G']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 3. After a merge.
		{
			GotRevision: hashes['F'],
			Expected:    []string{hashes['E'], hashes['H'], hashes['F']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 4. One last "normal" build.
		{
			GotRevision: hashes['I'],
			Expected:    []string{hashes['I']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 5. No GotRevision.
		{
			GotRevision: "",
			Expected:    []string{},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 6. Steal commits from a previously-ingested build.
		{
			GotRevision: hashes['C'],
			Expected:    []string{hashes['C']},
			StoleFrom:   1,
			Stolen:      []string{hashes['C']},
		},
	}
	master := "client.skia"
	builder := "Test-Ubuntu12-ShuttleA-GTX660-x86-Release"
	for buildNum, tc := range testCases {
		build, err := getBuildFromMaster(master, builder, buildNum, repos)
		assert.Nil(t, err)
		// Sanity check. Make sure we have the GotRevision we expect.
		assert.Equal(t, tc.GotRevision, build.GotRevision)
		gotRevProp, err := build.GetStringProperty("got_revision")
		assert.Nil(t, err)
		assert.Equal(t, tc.GotRevision, gotRevProp)

		assert.Nil(t, IngestBuild(d.DB(), build, repos))

		ingested, err := d.DB().GetBuildFromDB(master, builder, buildNum)
		assert.Nil(t, err)
		assert.NotNil(t, ingested)

		// Double-check the inserted build's GotRevision.
		assert.Equal(t, tc.GotRevision, ingested.GotRevision)
		gotRevProp, err = ingested.GetStringProperty("got_revision")
		assert.Nil(t, err)
		assert.Equal(t, tc.GotRevision, gotRevProp)

		// Verify that we got the build (and commits list) we expect.
		build.Commits = tc.Expected
		testutils.AssertDeepEqual(t, build, ingested)

		// Ensure that we can search by commit to find the build we inserted.
		for _, c := range hashes {
			expectBuild := util.In(c, tc.Expected)
			builds, err := d.DB().GetBuildsForCommits([]string{c}, nil)
			assert.Nil(t, err)
			if expectBuild {
				// Assert that we get the build we inserted.
				assert.Equal(t, 1, len(builds))
				assert.Equal(t, 1, len(builds[c]))
				testutils.AssertDeepEqual(t, ingested, builds[c][0])
			} else {
				// Assert that we didn't get the build we inserted.
				for _, gotBuild := range builds[c] {
					assert.NotEqual(t, ingested.Id(), gotBuild.Id())
				}
			}

			n, err := d.DB().GetBuildNumberForCommit(build.Master, build.Builder, c)
			assert.Nil(t, err)
			if expectBuild {
				assert.Equal(t, buildNum, n)
			} else {
				assert.NotEqual(t, buildNum, n)
			}
		}
	}

	// Extra: ensure that build #6 really stole the commit from #1.
	b, err := d.DB().GetBuildFromDB(master, builder, 1)
	assert.Nil(t, err)
	assert.NotNil(t, b)
	assert.False(t, util.In(hashes['C'], b.Commits), fmt.Sprintf("Expected not to find %s in %v", hashes['C'], b.Commits))
}
Пример #17
0
// testBuildDbIdConsistency verifies that we maintain IDs across build updates.
func testBuildDbIdConsistency(t *testing.T) {
	d := clearDB(t)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// Retrieve a full build.
	b, err := testGetBuildFromMaster(repos)
	assert.Nil(t, err)

	// Assert that all IDs are zero, since we haven't yet inserted the build.
	assert.Equal(t, 0, b.Id)
	for _, s := range b.Steps {
		assert.Equal(t, 0, s.Id)
	}

	// Insert the build for the first time.
	err = b.ReplaceIntoDB()
	assert.Nil(t, err)

	// Rename a step.
	b.Steps[2].Name = "differentName"
	dbSerializeAndCompare(t, b, false)

	// Remove a step.
	b.Steps = append(b.Steps[:2], b.Steps[3:]...)
	dbSerializeAndCompare(t, b, false)

	// Keep track of the original IDs, verify that IDs are non-zero.
	origBuildId := b.Id
	assert.NotEqual(t, 0, b.Id)
	origStepIds := make([]int, 0, len(b.Steps))
	for _, s := range b.Steps {
		assert.NotEqual(t, 0, s.Id)
		origStepIds = append(origStepIds, s.Id)
	}

	// Add some comments.
	b.Comments = append(b.Comments, &BuildComment{
		BuildId:   b.Id,
		User:      "******",
		Timestamp: float64(time.Now().UTC().Unix()),
		Message:   "Hi.",
	})

	// Insert the build again.
	err = b.ReplaceIntoDB()
	assert.Nil(t, err)

	// Assert that the IDs were maintained.
	assert.Equal(t, origBuildId, b.Id)
	for i, s := range b.Steps {
		assert.Equal(t, origStepIds[i], s.Id)
	}

	// Assert that our comment was given an ID, record it for posterity.
	origCommentId := b.Comments[0].Id
	assert.NotEqual(t, 0, origCommentId)

	// Add another comment.
	b.Comments = append(b.Comments, &BuildComment{
		BuildId:   b.Id,
		User:      "******",
		Timestamp: float64(time.Now().UTC().Unix()),
		Message:   "Here's another comment",
	})

	// Insert the build again.
	err = b.ReplaceIntoDB()
	assert.Nil(t, err)

	// Assert that the IDs were maintained.
	assert.Equal(t, origBuildId, b.Id)
	for i, s := range b.Steps {
		assert.Equal(t, origStepIds[i], s.Id)
	}
	assert.Equal(t, b.Comments[0].Id, origCommentId)
}
Пример #18
0
func main() {
	defer common.LogPanic()

	var err error

	// Global init to initialize glog and parse arguments.
	common.InitWithMetrics("internal", graphiteServer)

	if !*local {
		*targetList = metadata.Must(metadata.ProjectGet("datahopper_internal_targets"))
	}
	targets := strings.Split(*targetList, " ")
	glog.Infof("Targets: %#v", targets)

	codenameDB, err = leveldb.OpenFile(*codenameDbDir, nil)
	if err != nil && errors.IsCorrupted(err) {
		codenameDB, err = leveldb.RecoverFile(*codenameDbDir, nil)
	}
	if err != nil {
		glog.Fatalf("Failed to open codename leveldb at %s: %s", *codenameDbDir, err)
	}
	// Initialize the buildbot database.
	db, err = buildbot.NewRemoteDB(*buildbotDbHost)
	if err != nil {
		glog.Fatal(err)
	}

	var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		redirectURL = "https://internal.skia.org/oauth2callback/"
	}
	if err := login.InitFromMetadataOrJSON(redirectURL, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST); err != nil {
		glog.Fatalf("Failed to initialize login system: %s", err)

	}

	if *local {
		webhook.InitRequestSaltForTesting()
	} else {
		webhook.MustInitRequestSaltFromMetadata()
	}

	repos = gitinfo.NewRepoMap(*workdir)

	// Ingest Android framework builds.
	go func() {
		glog.Infof("Starting.")

		// In this case we don't want a backoff transport since the Apiary backend
		// seems to fail a lot, so we basically want to fall back to polling if a
		// call fails.
		client, err := auth.NewJWTServiceAccountClient("", "", &http.Transport{Dial: util.DialTimeout}, androidbuildinternal.AndroidbuildInternalScope, storage.CloudPlatformScope)
		if err != nil {
			glog.Fatalf("Unable to create authenticated client: %s", err)
		}

		buildService, err := androidbuildinternal.New(client)
		if err != nil {
			glog.Fatalf("Failed to obtain Android build service: %v", err)
		}
		glog.Infof("Ready to start loop.")
		step(targets, buildService)
		for _ = range time.Tick(*period) {
			step(targets, buildService)
		}
	}()

	r := mux.NewRouter()
	r.HandleFunc("/", indexHandler)
	r.HandleFunc("/builders/{codename}/builds/{buildNumber}", redirectHandler)
	r.HandleFunc("/builders/{codename}", builderRedirectHandler)
	r.HandleFunc("/ingestBuild", ingestBuildHandler)
	r.HandleFunc("/loginstatus/", login.StatusHandler)
	r.HandleFunc("/logout/", login.LogoutHandler)
	r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler)
	http.Handle("/", util.LoggingGzipRequestResponse(r))
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Пример #19
0
// TestFindCommitsForBuild verifies that findCommitsForBuild correctly obtains
// the list of commits which were newly built in a given build.
func TestFindCommitsForBuild(t *testing.T) {
	testutils.SkipIfShort(t)
	httpClient = testHttpClient
	d := clearDB(t)
	defer d.Close(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()

	repos := gitinfo.NewRepoMap(tr.Dir)

	// The test repo is laid out like this:
	//
	// *   06eb2a58139d3ff764f10232d5c8f9362d55e20f I (HEAD, master, Build #4)
	// *   ecb424466a4f3b040586a062c15ed58356f6590e F (Build #3)
	// |\
	// | * d30286d2254716d396073c177a754f9e152bbb52 H
	// | * 8d2d1247ef5d2b8a8d3394543df6c12a85881296 G (Build #2)
	// * | 67635e7015d74b06c00154f7061987f426349d9f E
	// * | 6d4811eddfa637fac0852c3a0801b773be1f260d D (Build #1)
	// * | d74dfd42a48325ab2f3d4a97278fc283036e0ea4 C (Build #6)
	// |/
	// *   4b822ebb7cedd90acbac6a45b897438746973a87 B (Build #0)
	// *   051955c355eb742550ddde4eccc3e90b6dc5b887 A
	//
	hashes := map[rune]string{
		'A': "051955c355eb742550ddde4eccc3e90b6dc5b887",
		'B': "4b822ebb7cedd90acbac6a45b897438746973a87",
		'C': "d74dfd42a48325ab2f3d4a97278fc283036e0ea4",
		'D': "6d4811eddfa637fac0852c3a0801b773be1f260d",
		'E': "67635e7015d74b06c00154f7061987f426349d9f",
		'F': "ecb424466a4f3b040586a062c15ed58356f6590e",
		'G': "8d2d1247ef5d2b8a8d3394543df6c12a85881296",
		'H': "d30286d2254716d396073c177a754f9e152bbb52",
		'I': "06eb2a58139d3ff764f10232d5c8f9362d55e20f",
	}

	// Test cases. Each test case builds on the previous cases.
	testCases := []struct {
		GotRevision string
		Expected    []string
		StoleFrom   int
		Stolen      []string
	}{
		// 0. The first build.
		{
			GotRevision: hashes['B'],
			Expected:    []string{hashes['B'], hashes['A']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 1. On a linear set of commits, with at least one previous build.
		{
			GotRevision: hashes['D'],
			Expected:    []string{hashes['D'], hashes['C']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 2. The first build on a new branch.
		{
			GotRevision: hashes['G'],
			Expected:    []string{hashes['G']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 3. After a merge.
		{
			GotRevision: hashes['F'],
			Expected:    []string{hashes['F'], hashes['E'], hashes['H']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 4. One last "normal" build.
		{
			GotRevision: hashes['I'],
			Expected:    []string{hashes['I']},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 5. No GotRevision.
		{
			GotRevision: "",
			Expected:    []string{},
			StoleFrom:   -1,
			Stolen:      []string{},
		},
		// 6. Steal commits from a previously-ingested build.
		{
			GotRevision: hashes['C'],
			Expected:    []string{hashes['C']},
			StoleFrom:   1,
			Stolen:      []string{hashes['C']},
		},
	}
	master := "client.skia"
	builder := "Test-Ubuntu12-ShuttleA-GTX660-x86-Release"
	for buildNum, tc := range testCases {
		build, err := getBuildFromMaster(master, builder, buildNum, repos)
		assert.Nil(t, err)
		assert.Nil(t, IngestBuild(build, repos))
		ingested, err := GetBuildFromDB(builder, master, buildNum)
		assert.Nil(t, err)
		assert.True(t, util.SSliceEqual(ingested.Commits, tc.Expected), fmt.Sprintf("Commits for build do not match expectation.\nGot:  %v\nWant: %v", ingested.Commits, tc.Expected))
	}

	// Extra: ensure that build #6 really stole the commit from #1.
	b, err := GetBuildFromDB(builder, master, 1)
	assert.Nil(t, err)
	assert.False(t, util.In(hashes['C'], b.Commits), fmt.Sprintf("Expected not to find %s in %v", hashes['C'], b.Commits))
}
Пример #20
0
func main() {
	defer common.LogPanic()

	var err error
	// Setup flags.
	dbConf := buildbot.DBConfigFromFlags() // Global init to initialize glog and parse arguments.

	// Global init to initialize glog and parse arguments.
	common.InitWithMetrics("internal", graphiteServer)

	if !*local {
		*targetList = metadata.Must(metadata.ProjectGet("datahopper_internal_targets"))
	}
	targets := strings.Split(*targetList, " ")
	glog.Infof("Targets: %#v", targets)

	codenameDB, err = leveldb.OpenFile(*codenameDbDir, nil)
	if err != nil && errors.IsCorrupted(err) {
		codenameDB, err = leveldb.RecoverFile(*codenameDbDir, nil)
	}
	if err != nil {
		glog.Fatalf("Failed to open codename leveldb at %s: %s", *codenameDbDir, err)
	}
	// Initialize the buildbot database.
	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	if err := dbConf.InitDB(); err != nil {
		glog.Fatal(err)
	}

	var cookieSalt = "notverysecret"
	var clientID = "31977622648-1873k0c1e5edaka4adpv1ppvhr5id3qm.apps.googleusercontent.com"
	var clientSecret = "cw0IosPu4yjaG2KWmppj2guj"
	var redirectURL = fmt.Sprintf("http://localhost%s/oauth2callback/", *port)
	if !*local {
		cookieSalt = metadata.Must(metadata.ProjectGet(metadata.COOKIESALT))
		clientID = metadata.Must(metadata.ProjectGet(metadata.CLIENT_ID))
		clientSecret = metadata.Must(metadata.ProjectGet(metadata.CLIENT_SECRET))
		redirectURL = "https://internal.skia.org/oauth2callback/"
	}
	login.Init(clientID, clientSecret, redirectURL, cookieSalt, login.DEFAULT_SCOPE, login.DEFAULT_DOMAIN_WHITELIST, *local)

	// Ingest Android framework builds.
	go func() {
		glog.Infof("Starting.")
		repos := gitinfo.NewRepoMap(*workdir)

		// In this case we don't want a backoff transport since the Apiary backend
		// seems to fail a lot, so we basically want to fall back to polling if a
		// call fails.
		client, err := auth.NewClientWithTransport(*local, *oauthCacheFile, "", &http.Transport{Dial: util.DialTimeout}, androidbuildinternal.AndroidbuildInternalScope, storage.CloudPlatformScope)
		if err != nil {
			glog.Fatalf("Unable to create installed app oauth client:%s", err)
		}

		buildService, err := androidbuildinternal.New(client)
		if err != nil {
			glog.Fatalf("Failed to obtain Android build service: %v", err)
		}
		glog.Infof("Ready to start loop.")
		step(targets, buildService, repos)
		for _ = range time.Tick(*period) {
			step(targets, buildService, repos)
		}
	}()

	r := mux.NewRouter()
	r.HandleFunc("/", indexHandler)
	r.HandleFunc("/builders/{codename}/builds/{buildNumber}", redirectHandler)
	r.HandleFunc("/builders/{codename}", builderRedirectHandler)
	r.HandleFunc("/loginstatus/", login.StatusHandler)
	r.HandleFunc("/logout/", login.LogoutHandler)
	r.HandleFunc("/oauth2callback/", login.OAuth2CallbackHandler)
	http.Handle("/", util.LoggingGzipRequestResponse(r))
	glog.Fatal(http.ListenAndServe(*port, nil))
}
Пример #21
0
func TestBuildScoring(t *testing.T) {
	testutils.SkipIfShort(t)

	// Load the test repo.
	tr := util.NewTempRepo()
	defer tr.Cleanup()
	repos := gitinfo.NewRepoMap(tr.Dir)
	repo, err := repos.Repo(TEST_REPO)
	assert.Nil(t, err)
	assert.Nil(t, repos.Update())

	details := map[string]*vcsinfo.LongCommit{}
	for _, h := range hashes {
		d, err := repo.Details(h, false)
		assert.Nil(t, err)
		details[h] = d
	}

	now := details[hashes['I']].Timestamp.Add(1 * time.Hour)
	build1 := &buildbot.Build{
		GotRevision: hashes['A'],
		Commits:     []string{hashes['A'], hashes['B'], hashes['C']},
	}
	cases := []struct {
		commit        *vcsinfo.LongCommit
		build         *buildbot.Build
		expectedScore float64
		lambda        float64
	}{
		// Built at the given commit.
		{
			commit:        details[hashes['A']],
			build:         build1,
			expectedScore: 1.0,
			lambda:        lambda(1.0),
		},
		// Build included the commit.
		{
			commit:        details[hashes['B']],
			build:         build1,
			expectedScore: 1.0 / 3.0,
			lambda:        lambda(1.0),
		},
		// Build included the commit.
		{
			commit:        details[hashes['C']],
			build:         build1,
			expectedScore: 1.0 / 3.0,
			lambda:        lambda(1.0),
		},
		// Build did not include the commit.
		{
			commit:        details[hashes['D']],
			build:         build1,
			expectedScore: -1.0,
			lambda:        lambda(1.0),
		},
		// Build is nil.
		{
			commit:        details[hashes['A']],
			build:         nil,
			expectedScore: -1.0,
			lambda:        lambda(1.0),
		},
		// Same cases, but with lambda set to something interesting.
		// Built at the given commit.
		{
			commit:        details[hashes['A']],
			build:         build1,
			expectedScore: 0.958902488117383,
			lambda:        lambda(0.5),
		},
		// Build included the commit.
		{
			commit:        details[hashes['B']],
			build:         build1,
			expectedScore: 0.3228038362210165,
			lambda:        lambda(0.5),
		},
		// Build included the commit.
		{
			commit:        details[hashes['C']],
			build:         build1,
			expectedScore: 0.32299553133576475,
			lambda:        lambda(0.5),
		},
		// Build did not include the commit.
		{
			commit:        details[hashes['D']],
			build:         build1,
			expectedScore: -0.9690254634399716,
			lambda:        lambda(0.5),
		},
		// Build is nil.
		{
			commit:        details[hashes['A']],
			build:         nil,
			expectedScore: -0.958902488117383,
			lambda:        lambda(0.5),
		},
		// Same cases, but with an even more agressive lambda.
		// Built at the given commit.
		{
			commit:        details[hashes['A']],
			build:         build1,
			expectedScore: 0.756679619938755,
			lambda:        lambda(0.01),
		},
		// Build included the commit.
		{
			commit:        details[hashes['B']],
			build:         build1,
			expectedScore: 0.269316526502904,
			lambda:        lambda(0.01),
		},
		// Build included the commit.
		{
			commit:        details[hashes['C']],
			build:         build1,
			expectedScore: 0.2703808739655321,
			lambda:        lambda(0.01),
		},
		// Build did not include the commit.
		{
			commit:        details[hashes['D']],
			build:         build1,
			expectedScore: -0.8113588225688924,
			lambda:        lambda(0.01),
		},
		// Build is nil.
		{
			commit:        details[hashes['A']],
			build:         nil,
			expectedScore: -0.756679619938755,
			lambda:        lambda(0.01),
		},
	}
	for _, tc := range cases {
		assert.Equal(t, tc.expectedScore, scoreBuild(tc.commit, tc.build, now, tc.lambda))
	}
}