Exemple #1
0
// ingestNewBuilds finds the set of uningested builds and ingests them.
func ingestNewBuilds(m string, repos *gitinfo.RepoMap) error {
	defer metrics.NewTimer("buildbot.ingestNewBuilds").Stop()
	glog.Infof("Ingesting builds for %s", m)
	// TODO(borenet): Investigate the use of channels here. We should be
	// able to start ingesting builds as the data becomes available rather
	// than waiting until the end.
	buildsToProcess, err := getUningestedBuilds(m)
	if err != nil {
		return fmt.Errorf("Failed to obtain the set of uningested builds: %v", err)
	}
	unfinished, err := getUnfinishedBuilds(m)
	if err != nil {
		return fmt.Errorf("Failed to obtain the set of unfinished builds: %v", err)
	}
	for _, b := range unfinished {
		if _, ok := buildsToProcess[b.Builder]; !ok {
			buildsToProcess[b.Builder] = []int{}
		}
		buildsToProcess[b.Builder] = append(buildsToProcess[b.Builder], b.Number)
	}

	if err := repos.Update(); err != nil {
		return err
	}

	// TODO(borenet): Can we ingest builders in parallel?
	errs := map[string]error{}
	for b, w := range buildsToProcess {
		for _, n := range w {
			if BUILD_BLACKLIST[b][n] {
				glog.Warningf("Skipping blacklisted build: %s # %d", b, n)
				continue
			}
			if IsTrybot(b) {
				continue
			}
			glog.Infof("Ingesting build: %s, %s, %d", m, b, n)
			build, err := retryGetBuildFromMaster(m, b, n, repos)
			if err != nil {
				errs[b] = fmt.Errorf("Failed to ingest build: %v", err)
				break
			}
			if err := IngestBuild(build, repos); err != nil {
				errs[b] = fmt.Errorf("Failed to ingest build: %v", err)
				break
			}
		}
	}
	if len(errs) > 0 {
		msg := fmt.Sprintf("Encountered errors ingesting builds for %s:", m)
		for b, err := range errs {
			msg += fmt.Sprintf("\n%s: %v", b, err)
		}
		return fmt.Errorf(msg)
	}
	glog.Infof("Done ingesting builds for %s", m)
	return nil
}
Exemple #2
0
// step does a single step in ingesting builds from tradefed and pushing the results into the buildbot database.
func step(targets []string, buildService *androidbuildinternal.Service, repos *gitinfo.RepoMap) {
	glog.Errorf("step: Begin")

	if err := repos.Update(); err != nil {
		glog.Errorf("Failed to update repos: %s", err)
		return
	}
	// Loop over every target and look for skia commits in the builds.
	for _, target := range targets {
		r, err := buildService.Build.List().Branch(SKIA_BRANCH).BuildType("submitted").Target(target).ExtraFields("changeInfo").MaxResults(40).Do()
		if err != nil {
			glog.Errorf("Failed to load internal builds: %v", err)
			continue
		}
		// Iterate over the builds in reverse order so we ingest the earlier Git
		// hashes first and the more recent Git hashes later.
		for i := len(r.Builds) - 1; i >= 0; i-- {
			b := r.Builds[i]
			commits := androidbuild.CommitsFromChanges(b.Changes)
			glog.Infof("Commits: %#v", commits)
			if len(commits) > 0 {
				var cachedBuild *buildbot.Build = nil
				// Only look at the first commit in the list. The commits always appear in reverse chronological order, so
				// the 0th entry is the most recent commit.
				c := commits[0]
				// Create a buildbot.Build from the build info.
				key, build := buildFromCommit(b, c)
				glog.Infof("Key: %s Hash: %s", key, c.Hash)

				// Store build.Builder (the codename) with its pair build.Target.Name in a local leveldb to serve redirects.
				if err := codenameDB.Put([]byte(build.Builder), []byte(b.Target.Name), nil); err != nil {
					glog.Errorf("Failed to write codename to data store: %s", err)
				}

				buildNumber, err := buildbot.GetBuildForCommit(build.Builder, build.Master, c.Hash)
				if err != nil {
					glog.Errorf("Failed to find the build in the database: %s", err)
					continue
				}
				glog.Infof("GetBuildForCommit at hash: %s returned %d", c.Hash, buildNumber)
				if buildNumber != -1 {
					cachedBuild, err = buildbot.GetBuildFromDB(build.Builder, build.Master, buildNumber)
					if err != nil {
						glog.Errorf("Failed to retrieve build from database: %s", err)
						continue
					}
				}
				if cachedBuild == nil {
					// This is a new build we've never seen before, so add it to the buildbot database.

					// First calculate a new unique build.Number.
					number, err := buildbot.GetMaxBuildNumber(build.Builder)
					if err != nil {
						glog.Infof("Failed to find next build number: %s", err)
						continue
					}
					build.Number = number + 1
					if err := buildbot.IngestBuild(build, repos); err != nil {
						glog.Errorf("Failed to ingest build: %s", err)
						continue
					}
					cachedBuild = build
				}

				// If the state of the build has changed then write it to the buildbot database.
				if buildsDiffer(build, cachedBuild) {
					// If this was a failure then we need to check that there is a mirror
					// failure on the main branch, at which point we will say that this
					// is a warning.
					if build.Results == buildbot.BUILDBOT_FAILURE && brokenOnMaster(buildService, target, b.BuildId) {
						build.Results = buildbot.BUILDBOT_WARNING
					}
					cachedBuild.Results = build.Results
					cachedBuild.Finished = build.Finished
					glog.Infof("Writing updated build to the database: %s %d", cachedBuild.Builder, cachedBuild.Number)
					if err := buildbot.IngestBuild(cachedBuild, repos); err != nil {
						glog.Errorf("Failed to ingest build: %s", err)
					}
				}
			}
			liveness.Update()
		}
	}
}
Exemple #3
0
// ingestNewBuilds finds the set of uningested builds and ingests them.
func ingestNewBuilds(repos *gitinfo.RepoMap) error {
	// TODO(borenet): Investigate the use of channels here. We should be
	// able to start ingesting builds as the data becomes available rather
	// than waiting until the end.
	buildsToProcess, err := getUningestedBuilds()
	if err != nil {
		return fmt.Errorf("Failed to obtain the set of uningested builds: %v", err)
	}
	unfinished, err := getUnfinishedBuilds()
	if err != nil {
		return fmt.Errorf("Failed to obtain the set of unfinished builds: %v", err)
	}
	for _, b := range unfinished {
		if _, ok := buildsToProcess[b.Master]; !ok {
			buildsToProcess[b.Master] = map[string][]int{}
		}
		if _, ok := buildsToProcess[b.Builder]; !ok {
			buildsToProcess[b.Master][b.Builder] = []int{}
		}
		buildsToProcess[b.Master][b.Builder] = append(buildsToProcess[b.Master][b.Builder], b.Number)
	}

	if err := repos.Update(); err != nil {
		return err
	}

	// TODO(borenet): Figure out how much of this is safe to parallelize.
	// We can definitely do different masters in parallel, and maybe we can
	// ingest different builders in parallel as well.
	var wg sync.WaitGroup
	errors := map[string]error{}
	for m, v := range buildsToProcess {
		wg.Add(1)
		go func(master string, buildsToProcessForMaster map[string][]int) {
			defer wg.Done()
			for b, w := range buildsToProcessForMaster {
				for _, n := range w {
					if BUILD_BLACKLIST[b][n] {
						glog.Warningf("Skipping blacklisted build: %s # %d", b, n)
						continue
					}
					glog.Infof("Ingesting build: %s, %s, %d", master, b, n)
					build, err := retryGetBuildFromMaster(master, b, n, repos)
					if err != nil {
						err := fmt.Errorf("Failed to ingest build: %v", err)
						glog.Error(err)
						errors[master] = err
						return
					}
					if err := IngestBuild(build, repos); err != nil {
						err := fmt.Errorf("Failed to ingest build: %v", err)
						glog.Error(err)
						errors[master] = err
						return
					}
				}
			}
		}(m, v)
	}
	wg.Wait()
	if len(errors) > 0 {
		return fmt.Errorf("Errors: %v", errors)
	}
	return nil
}