Ejemplo n.º 1
0
func getCherrypickParentPRs(obj *github.MungeObject, config *github.Config) []*github.MungeObject {
	out := []*github.MungeObject{}
	if obj.Issue.Body == nil {
		glog.Errorf("Found a nil body in %d", *obj.Issue.Number)
		return nil
	}
	body := *obj.Issue.Body

	// foundOne tracks if we found any valid lines. PR without any valid lines
	// shouldn't get autolabeled.

	lines := strings.Split(body, "\n")
	for _, line := range lines {
		matches := cpRe.FindStringSubmatch(line)
		if len(matches) != 3 {
			glog.V(6).Infof("%d: line:%v len(matches)=%d", *obj.Issue.Number, line, len(matches))
			continue
		}
		parentPRNum, err := strconv.Atoi(matches[1])
		if err != nil {
			glog.Errorf("%d: Unable to convert %q to parent PR number", *obj.Issue.Number, matches[1])
			return nil
		}
		parentPR, err := config.GetObject(parentPRNum)
		if err != nil {
			glog.Errorf("Unable to get object for %d", parentPRNum)
			return nil
		}
		out = append(out, parentPR)
	}
	return out
}
Ejemplo n.º 2
0
func (sq *SubmitQueue) doGenCommitters(config *github_util.Config) error {
	pushUsers, pullUsers, err := config.UsersWithAccess()
	if err != nil {
		glog.Fatalf("Unable to read committers from github: %v", err)
	}

	pushSet := sets.NewString()
	for _, user := range pushUsers {
		pushSet.Insert(*user.Login)
	}
	pullSet := sets.NewString()
	for _, user := range pullUsers {
		pullSet.Insert(*user.Login)
	}

	if err = writeWhitelist(sq.Committers, "# auto-generated by "+os.Args[0]+" gen-committers; manual additions should go in the whitelist", pushSet); err != nil {
		glog.Fatalf("Unable to write committers: %v", err)
	}
	glog.Info("Successfully updated committers file.")

	existingWhitelist, err := loadWhitelist(sq.Whitelist)
	if err != nil {
		glog.Fatalf("error loading whitelist; it will not be updated: %v", err)
	}

	neededInWhitelist := existingWhitelist.Union(pullSet)
	neededInWhitelist = neededInWhitelist.Difference(pushSet)
	if err = writeWhitelist(sq.Whitelist, "# auto-generated by "+os.Args[0]+" gen-committers; manual additions may be added by hand", neededInWhitelist); err != nil {
		glog.Fatalf("Unable to write additional user whitelist: %v", err)
	}
	glog.Info("Successfully update whitelist file.")
	return nil
}
Ejemplo n.º 3
0
// getGeneratedFiles returns a list of all automatically generated files in the repo. These include
// docs, deep_copy, and conversions
//
// It would be 'better' to call this for every commit but that takes
// a whole lot of time for almost always the same information, and if
// our results are slightly wrong, who cares? Instead look for the
// generated files once and if someone changed what files are generated
// we'll size slightly wrong. No biggie.
func (s *SizeMunger) getGeneratedFiles(config *github_util.Config) {
	if s.genFiles != nil {
		return
	}
	files := sets.NewString()
	prefixes := []string{}
	s.genFiles = &files
	s.genPrefixes = &prefixes

	file := s.generatedFilesFile
	if len(file) == 0 {
		glog.Infof("No --generated-files-config= supplied, applying no labels")
		return
	}
	fp, err := os.Open(file)
	if err != nil {
		glog.Errorf("Unable to open %q: %v", file, err)
		return
	}

	defer fp.Close()
	scanner := bufio.NewScanner(fp)
	for scanner.Scan() {
		line := scanner.Text()
		if strings.HasPrefix(line, "#") || line == "" {
			continue
		}
		fields := strings.Fields(line)
		if len(fields) != 2 {
			glog.Errorf("Invalid line in generated docs config %s: %q", file, line)
			continue
		}
		eType := fields[0]
		file := fields[1]
		if eType == "prefix" {
			prefixes = append(prefixes, file)
		} else if eType == "path" {
			files.Insert(file)
		} else if eType == "paths-from-repo" {
			docs, err := config.GetFileContents(file, "")
			if err != nil {
				continue
			}
			docSlice := strings.Split(docs, "\n")
			files.Insert(docSlice...)
		} else {
			glog.Errorf("Invalid line in generated docs config, unknown type: %s, %q", eType, line)
			continue
		}
	}
	if scanner.Err() != nil {
		glog.Errorf("Error scanning %s: %v", file, err)
		return
	}
	s.genFiles = &files
	s.genPrefixes = &prefixes

	return
}
Ejemplo n.º 4
0
func gatherData(cfg *githubhelper.Config) (*reportData, error) {
	issues, err := cfg.ListAllIssues(&github.IssueListByRepoOptions{
		State:  "open",
		Sort:   "created",
		Labels: []string{"kind/flake"},
	})
	if err != nil {
		return nil, err
	}

	r := reportData{
		loginToEmail:  map[string]string{},
		loginToIssues: map[string][]issueReportData{},
	}
	for _, issue := range issues {
		assignee := "UNASSIGNED"
		if issue.Assignee != nil && issue.Assignee.Login != nil {
			assignee = *issue.Assignee.Login
			if _, ok := r.loginToEmail[assignee]; !ok {
				if u, err := cfg.GetUser(assignee); err == nil {
					if u.Email != nil {
						r.loginToEmail[assignee] = *u.Email
					} else {
						// Don't keep looking this up
						r.loginToEmail[assignee] = ""
					}
				}
			}
		}
		age := time.Duration(0)
		if issue.CreatedAt != nil {
			age = time.Now().Sub(*issue.CreatedAt)
		}
		priority := "??"
		priorityLabels := githubhelper.GetLabelsWithPrefix(issue.Labels, "priority/")
		if len(priorityLabels) == 1 {
			priority = strings.TrimPrefix(priorityLabels[0], "priority/")
		}
		if priority == "P2" || priority == "P3" {
			r.lowPriorityTests++
			continue
		}
		reportData := issueReportData{
			priority: priority,
			number:   *issue.Number,
			title:    *issue.Title,
			age:      age,
		}
		r.loginToIssues[assignee] = append(r.loginToIssues[assignee], reportData)
		if priority == "??" {
			const unprioritized = "UNPRIORITIZED"
			r.loginToIssues[unprioritized] = append(r.loginToIssues[unprioritized], reportData)
		}
		r.totalTests++
	}
	return &r, nil
}
Ejemplo n.º 5
0
// MungeIssues is the main function which asks that each munger be called
// for each Issue
func MungeIssues(config *github_util.Config) error {
	mfunc := func(issue *github_api.Issue) error {
		return mungeIssue(config, issue)
	}
	if err := config.ForEachIssueDo([]string{}, mfunc); err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 6
0
// MungePullRequest is the workhorse the will actually make updates to the PR
func (b *BlunderbussMunger) MungePullRequest(config *github_util.Config, pr *github.PullRequest, issue *github.Issue, commits []github.RepositoryCommit, events []github.IssueEvent) {
	if !b.blunderbussReassign && issue.Assignee != nil {
		glog.V(6).Infof("skipping %v: reassign: %v assignee: %v", *pr.Number, b.blunderbussReassign, describeUser(issue.Assignee))
		return
	}
	potentialOwners := weightMap{}
	weightSum := int64(0)
	for _, commit := range commits {
		if commit.Author == nil || commit.Author.Login == nil || commit.SHA == nil {
			glog.Warningf("Skipping invalid commit for %d: %#v", *pr.Number, commit)
			continue
		}
		for _, file := range commit.Files {
			fileWeight := int64(1)
			if file.Changes != nil && *file.Changes != 0 {
				fileWeight = int64(*file.Changes)
			}
			// Judge file size on a log scale-- effectively this
			// makes three buckets, we shouldn't have many 10k+
			// line changes.
			fileWeight = int64(math.Log10(float64(fileWeight))) + 1
			fileOwners := b.config.findOwners(*file.Filename)
			if len(fileOwners) == 0 {
				glog.Warningf("Couldn't find an owner for: %s", *file.Filename)
			}
			for owner, ownerWeight := range fileOwners {
				if owner == *pr.User.Login {
					continue
				}
				potentialOwners[owner] = potentialOwners[owner] + fileWeight*ownerWeight
				weightSum += fileWeight * ownerWeight
			}
		}
	}
	if len(potentialOwners) == 0 {
		glog.Errorf("No owners found for PR %d", *pr.Number)
		return
	}
	glog.V(4).Infof("Weights: %#v\nSum: %v", potentialOwners, weightSum)

	if issue.Assignee != nil {
		cur := *issue.Assignee.Login
		glog.Infof("Current assignee %v has a %02.2f%% chance of having been chosen", cur, 100.0*float64(potentialOwners[cur])/float64(weightSum))
	}
	selection := rand.Int63n(weightSum)
	owner := ""
	for o, w := range potentialOwners {
		owner = o
		selection -= w
		if selection <= 0 {
			break
		}
	}
	glog.Infof("Assigning %v to %v (previously assigned to %v)", *pr.Number, owner, describeUser(issue.Assignee))
	config.AssignPR(*pr.Number, owner)
}
Ejemplo n.º 7
0
// MungePullRequests is the main function which asks that each munger be called
// for each PR
func MungePullRequests(config *github_util.Config) error {
	mfunc := func(pr *github_api.PullRequest, issue *github_api.Issue) error {
		return mungePR(config, pr, issue)
	}
	if err := config.ForEachPRDo([]string{}, mfunc); err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 8
0
// internalInitialize will initialize the munger for the given GCS bucket url.
func (sq *SubmitQueue) internalInitialize(config *github.Config, features *features.Features, GCSBucketUrl string) error {
	sq.Lock()
	defer sq.Unlock()

	sq.githubConfig = config
	if len(sq.JenkinsHost) == 0 {
		glog.Fatalf("--jenkins-host is required.")
	}

	if sq.FakeE2E {
		sq.e2e = &fake_e2e.FakeE2ETester{
			JobNames:           sq.JobNames,
			WeakStableJobNames: sq.WeakStableJobNames,
		}
	} else {
		sq.e2e = (&e2e.RealE2ETester{
			JobNames:             sq.JobNames,
			JenkinsHost:          sq.JenkinsHost,
			WeakStableJobNames:   sq.WeakStableJobNames,
			BuildStatus:          map[string]e2e.BuildInfo{},
			GoogleGCSBucketUtils: utils.NewUtils(GCSBucketUrl),
		}).Init()
	}

	if len(config.Address) > 0 {
		if len(config.WWWRoot) > 0 {
			http.Handle("/", gziphandler.GzipHandler(http.FileServer(http.Dir(config.WWWRoot))))
		}
		http.Handle("/prs", gziphandler.GzipHandler(http.HandlerFunc(sq.servePRs)))
		http.Handle("/history", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHistory)))
		http.Handle("/users", gziphandler.GzipHandler(http.HandlerFunc(sq.serveUsers)))
		http.Handle("/github-e2e-queue", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGithubE2EStatus)))
		http.Handle("/google-internal-ci", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGoogleInternalStatus)))
		http.Handle("/merge-info", gziphandler.GzipHandler(http.HandlerFunc(sq.serveMergeInfo)))
		http.Handle("/priority-info", gziphandler.GzipHandler(http.HandlerFunc(sq.servePriorityInfo)))
		http.Handle("/health", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHealth)))
		http.Handle("/sq-stats", gziphandler.GzipHandler(http.HandlerFunc(sq.serveSQStats)))
		http.Handle("/flakes", gziphandler.GzipHandler(http.HandlerFunc(sq.serveFlakes)))
		config.ServeDebugStats("/stats")
		go http.ListenAndServe(config.Address, nil)
	}

	if sq.githubE2EPollTime == 0 {
		sq.githubE2EPollTime = githubE2EPollTime
	}

	sq.health.StartTime = sq.clock.Now()
	sq.healthHistory = make([]healthRecord, 0)

	go sq.handleGithubE2EAndMerge()
	go sq.updateGoogleE2ELoop()
	return nil
}
Ejemplo n.º 9
0
func (sq *SubmitQueue) addWhitelistCommand(root *cobra.Command, config *github_util.Config) {
	genCommitters := &cobra.Command{
		Use:   "gencommiters",
		Short: "Generate the list of people with commit access",
		RunE: func(_ *cobra.Command, _ []string) error {
			if err := config.PreExecute(); err != nil {
				return err
			}
			return sq.doGenCommitters(config)
		},
	}
	root.PersistentFlags().StringVar(&sq.Whitelist, "user-whitelist", "./whitelist.txt", "Path to a whitelist file that contains users to auto-merge.  Required.")
	root.PersistentFlags().StringVar(&sq.Committers, "committers", "./committers.txt", "File in which the list of authorized committers is stored; only used if this list cannot be gotten at run time.  (Merged with whitelist; separate so that it can be auto-generated)")

	root.AddCommand(genCommitters)
}
Ejemplo n.º 10
0
// MungePullRequest is the workhorse the will actually make updates to the PR
func (OkToTestMunger) MungePullRequest(config *github_util.Config, pr *github.PullRequest, issue *github.Issue, commits []github.RepositoryCommit, events []github.IssueEvent) {
	if !github_util.HasLabel(issue.Labels, "lgtm") {
		return
	}
	status, err := config.GetStatus(pr, []string{"Jenkins GCE e2e"})
	if err != nil {
		glog.Errorf("unexpected error getting status: %v", err)
		return
	}
	if status == "incomplete" {
		glog.V(2).Infof("status is incomplete, adding ok to test")
		msg := `@k8s-bot ok to test

	pr builder appears to be missing, activating due to 'lgtm' label.`
		config.WriteComment(*pr.Number, msg)
	}
}
Ejemplo n.º 11
0
// Initialize will initialize the munger
func (sq *SubmitQueue) Initialize(config *github.Config, features *features.Features) error {
	sq.Lock()
	defer sq.Unlock()

	sq.githubConfig = config
	if len(sq.JenkinsHost) == 0 {
		glog.Fatalf("--jenkins-host is required.")
	}

	sq.lastE2EStable = true
	e2e := &e2e.E2ETester{
		JobNames:           sq.JobNames,
		JenkinsHost:        sq.JenkinsHost,
		WeakStableJobNames: sq.WeakStableJobNames,
		BuildStatus:        map[string]e2e.BuildInfo{},
	}
	sq.e2e = e2e

	if len(config.Address) > 0 {
		if len(config.WWWRoot) > 0 {
			http.Handle("/", gziphandler.GzipHandler(http.FileServer(http.Dir(config.WWWRoot))))
		}
		http.Handle("/prs", gziphandler.GzipHandler(http.HandlerFunc(sq.servePRs)))
		http.Handle("/history", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHistory)))
		http.Handle("/users", gziphandler.GzipHandler(http.HandlerFunc(sq.serveUsers)))
		http.Handle("/github-e2e-queue", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGithubE2EStatus)))
		http.Handle("/google-internal-ci", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGoogleInternalStatus)))
		http.Handle("/merge-info", gziphandler.GzipHandler(http.HandlerFunc(sq.serveMergeInfo)))
		http.Handle("/priority-info", gziphandler.GzipHandler(http.HandlerFunc(sq.servePriorityInfo)))
		config.ServeDebugStats("/stats")
		go http.ListenAndServe(config.Address, nil)
	}

	sq.prStatus = map[string]submitStatus{}
	sq.lastPRStatus = map[string]submitStatus{}

	sq.githubE2EQueue = map[int]*github.MungeObject{}
	if sq.githubE2EPollTime == 0 {
		sq.githubE2EPollTime = githubE2EPollTime
	}

	go sq.handleGithubE2EAndMerge()
	go sq.updateGoogleE2ELoop()
	return nil
}
Ejemplo n.º 12
0
// MungePullRequest is the workhorse the will actually make updates to the PR
func (p *PathLabelMunger) MungePullRequest(config *github_util.Config, pr *github.PullRequest, issue *github.Issue, commits []github.RepositoryCommit, events []github.IssueEvent) {
	labelMap := *p.labelMap

	needsLabels := sets.NewString()
	for _, c := range commits {
		for _, f := range c.Files {
			for prefix, label := range labelMap {
				if strings.HasPrefix(*f.Filename, prefix) && !github_util.HasLabel(issue.Labels, label) {
					needsLabels.Insert(label)
				}
			}
		}
	}

	if needsLabels.Len() != 0 {
		config.AddLabels(*pr.Number, needsLabels.List())
	}
}
Ejemplo n.º 13
0
func mungePR(config *github_util.Config, pr *github_api.PullRequest, issue *github_api.Issue) error {
	if pr == nil {
		fmt.Printf("found nil pr\n")
	}

	commits, err := config.GetFilledCommits(*pr.Number)
	if err != nil {
		return err
	}

	events, err := config.GetAllEventsForPR(*pr.Number)
	if err != nil {
		return err
	}

	for _, munger := range mungers {
		munger.MungePullRequest(config, pr, issue, commits, events)
	}
	return nil
}
Ejemplo n.º 14
0
// MungePullRequest is the workhorse the will actually make updates to the PR
func (LGTMAfterCommitMunger) MungePullRequest(config *github_util.Config, pr *github.PullRequest, issue *github.Issue, commits []github.RepositoryCommit, events []github.IssueEvent) {
	if !github_util.HasLabel(issue.Labels, "lgtm") {
		return
	}

	lastModified := github_util.LastModifiedTime(commits)
	lgtmTime := github_util.LabelTime("lgtm", events)

	if lastModified == nil || lgtmTime == nil {
		glog.Errorf("PR %d unable to determine lastModified or lgtmTime", *pr.Number)
		return
	}

	if lastModified.After(*lgtmTime) {
		lgtmRemovedBody := "PR changed after LGTM, removing LGTM."
		if err := config.WriteComment(*pr.Number, lgtmRemovedBody); err != nil {
			return
		}
		config.RemoveLabel(*pr.Number, "lgtm")
	}
}
Ejemplo n.º 15
0
// Initialize will initialize the munger
func (c *CherrypickQueue) Initialize(config *github.Config, features *features.Features) error {
	c.Lock()
	defer c.Unlock()

	if len(config.Address) > 0 {
		if len(config.WWWRoot) > 0 {
			http.Handle("/", http.FileServer(http.Dir(config.WWWRoot)))
		}
		http.HandleFunc("/queue", c.serveQueue)
		http.HandleFunc("/raw", c.serveRaw)
		http.HandleFunc("/queue-info", c.serveQueueInfo)
		config.ServeDebugStats("/stats")
		go http.ListenAndServe(config.Address, nil)
	}
	c.lastMergedAndApproved = map[int]*github.MungeObject{}
	c.lastMerged = map[int]*github.MungeObject{}
	c.lastUnmerged = map[int]*github.MungeObject{}
	c.mergedAndApproved = map[int]*github.MungeObject{}
	c.merged = map[int]*github.MungeObject{}
	c.unmerged = map[int]*github.MungeObject{}
	return nil
}
Ejemplo n.º 16
0
// MungePullRequest is the workhorse the will actually make updates to the PR
func (PingCIMunger) MungePullRequest(config *github_util.Config, pr *github.PullRequest, issue *github.Issue, commits []github.RepositoryCommit, events []github.IssueEvent) {
	if !github_util.HasLabel(issue.Labels, "lgtm") {
		return
	}
	if mergeable, err := config.IsPRMergeable(pr); err != nil {
		glog.V(2).Infof("Skipping %d - problem determining mergeability", *pr.Number)
	} else if !mergeable {
		glog.V(2).Infof("Skipping %d - not mergeable", *pr.Number)
	}
	status, err := config.GetStatus(pr, []string{"Shippable", "continuous-integration/travis-ci/pr"})
	if err != nil {
		glog.Errorf("unexpected error getting status: %v", err)
		return
	}
	if status == "incomplete" {
		glog.V(2).Infof("status is incomplete, closing and re-opening")
		msg := "Continuous integration appears to have missed, closing and re-opening to trigger it"
		config.WriteComment(*pr.Number, msg)

		config.ClosePR(pr)
		time.Sleep(5 * time.Second)
		config.OpenPR(pr, 10)
	}
}
Ejemplo n.º 17
0
// MungePullRequest is the workhorse the will actually make updates to the PR
func (s *SizeMunger) MungePullRequest(config *github_util.Config, pr *github.PullRequest, issue *github.Issue, commits []github.RepositoryCommit, events []github.IssueEvent) {
	s.getGeneratedFiles(config)
	genFiles := *s.genFiles
	genPrefixes := *s.genPrefixes

	if pr.Additions == nil {
		glog.Warningf("PR %d has nil Additions", *pr.Number)
		return
	}
	adds := *pr.Additions
	if pr.Deletions == nil {
		glog.Warningf("PR %d has nil Deletions", *pr.Number)
		return
	}
	dels := *pr.Deletions

	for _, c := range commits {
		for _, f := range c.Files {
			for _, p := range genPrefixes {
				if strings.HasPrefix(*f.Filename, p) {
					adds = adds - *f.Additions
					dels = dels - *f.Deletions
					continue
				}
			}
			if genFiles.Has(*f.Filename) {
				adds = adds - *f.Additions
				dels = dels - *f.Deletions
				continue
			}
		}
	}

	newSize := calculateSize(adds, dels)
	newLabel := labelSizePrefix + newSize

	existing := github_util.GetLabelsWithPrefix(issue.Labels, labelSizePrefix)
	needsUpdate := true
	for _, l := range existing {
		if l == newLabel {
			needsUpdate = false
			continue
		}
		config.RemoveLabel(*pr.Number, l)
	}
	if needsUpdate {
		config.AddLabels(*pr.Number, []string{newLabel})

		body := fmt.Sprintf("Labelling this PR as %s", newLabel)
		config.WriteComment(*pr.Number, body)
	}
}
Ejemplo n.º 18
0
// MungePullRequest is the workhorse the will actually make updates to the PR
func (NeedsRebaseMunger) MungePullRequest(config *github_util.Config, pr *github.PullRequest, issue *github.Issue, commits []github.RepositoryCommit, events []github.IssueEvent) {
	mergeable, err := config.IsPRMergeable(pr)
	if err != nil {
		glog.V(2).Infof("Skipping %d - problem determining mergeable", *pr.Number)
		return
	}
	if mergeable && github_util.HasLabel(issue.Labels, needsRebase) {
		config.RemoveLabel(*pr.Number, needsRebase)
	}
	if !mergeable && !github_util.HasLabel(issue.Labels, needsRebase) {
		config.AddLabels(*pr.Number, []string{needsRebase})
	}
}
Ejemplo n.º 19
0
// internalInitialize will initialize the munger.
// if overrideUrl is specified, will create testUtils
func (sq *SubmitQueue) internalInitialize(config *github.Config, features *features.Features, overrideUrl string) error {
	sq.Lock()
	defer sq.Unlock()

	// Clean up all of our flags which we wish --flag="" to mean []string{}
	sq.BlockingJobNames = cleanStringSlice(sq.BlockingJobNames)
	sq.NonBlockingJobNames = cleanStringSlice(sq.NonBlockingJobNames)
	sq.PresubmitJobNames = cleanStringSlice(sq.PresubmitJobNames)
	sq.WeakStableJobNames = cleanStringSlice(sq.WeakStableJobNames)
	sq.RequiredStatusContexts = cleanStringSlice(sq.RequiredStatusContexts)
	sq.RequiredRetestContexts = cleanStringSlice(sq.RequiredRetestContexts)
	sq.DoNotMergeMilestones = cleanStringSlice(sq.DoNotMergeMilestones)
	sq.Metadata.RepoPullUrl = fmt.Sprintf("https://github.com/%s/%s/pulls/", config.Org, config.Project)
	sq.Metadata.ProjectName = strings.Title(config.Project)
	sq.githubConfig = config

	// TODO: This is not how injection for tests should work.
	if sq.FakeE2E {
		sq.e2e = &fake_e2e.FakeE2ETester{
			JobNames:           sq.BlockingJobNames,
			WeakStableJobNames: sq.WeakStableJobNames,
		}
	} else {
		var gcs *utils.Utils
		if overrideUrl != "" {
			gcs = utils.NewTestUtils("bucket", "logs", overrideUrl)
		} else {
			gcs = utils.NewWithPresubmitDetection(
				sq.features.GCSInfo.BucketName, sq.features.GCSInfo.LogDir,
				sq.features.GCSInfo.PullKey, sq.features.GCSInfo.PullLogDir,
			)
		}

		sq.e2e = (&e2e.RealE2ETester{
			BlockingJobNames:     sq.BlockingJobNames,
			NonBlockingJobNames:  sq.NonBlockingJobNames,
			WeakStableJobNames:   sq.WeakStableJobNames,
			BuildStatus:          map[string]e2e.BuildInfo{},
			GoogleGCSBucketUtils: gcs,
		}).Init(admin.Mux)
	}

	sq.lgtmTimeCache = mungerutil.NewLabelTimeCache(lgtmLabel)

	if len(config.Address) > 0 {
		if len(config.WWWRoot) > 0 {
			http.Handle("/", gziphandler.GzipHandler(http.FileServer(http.Dir(config.WWWRoot))))
		}
		http.Handle("/prs", gziphandler.GzipHandler(http.HandlerFunc(sq.servePRs)))
		http.Handle("/history", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHistory)))
		http.Handle("/github-e2e-queue", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGithubE2EStatus)))
		http.Handle("/google-internal-ci", gziphandler.GzipHandler(http.HandlerFunc(sq.serveGoogleInternalStatus)))
		http.Handle("/merge-info", gziphandler.GzipHandler(http.HandlerFunc(sq.serveMergeInfo)))
		http.Handle("/priority-info", gziphandler.GzipHandler(http.HandlerFunc(sq.servePriorityInfo)))
		http.Handle("/health", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHealth)))
		http.Handle("/health.svg", gziphandler.GzipHandler(http.HandlerFunc(sq.serveHealthSVG)))
		http.Handle("/sq-stats", gziphandler.GzipHandler(http.HandlerFunc(sq.serveSQStats)))
		http.Handle("/flakes", gziphandler.GzipHandler(http.HandlerFunc(sq.serveFlakes)))
		http.Handle("/metadata", gziphandler.GzipHandler(http.HandlerFunc(sq.serveMetadata)))
		config.ServeDebugStats("/stats")
		go http.ListenAndServe(config.Address, nil)
	}

	admin.Mux.HandleFunc("/api/emergency/stop", sq.EmergencyStopHTTP)
	admin.Mux.HandleFunc("/api/emergency/resume", sq.EmergencyStopHTTP)
	admin.Mux.HandleFunc("/api/emergency/status", sq.EmergencyStopHTTP)

	if sq.githubE2EPollTime == 0 {
		sq.githubE2EPollTime = githubE2EPollTime
	}

	sq.healthHistory = make([]healthRecord, 0)

	go sq.handleGithubE2EAndMerge()
	go sq.updateGoogleE2ELoop()

	if sq.AdminPort != 0 {
		go http.ListenAndServe(fmt.Sprintf("0.0.0.0:%v", sq.AdminPort), admin.Mux)
	}
	return nil
}
Ejemplo n.º 20
0
// MungePullRequest is the workhorse the will actually make updates to the PR
func (sq *SubmitQueue) MungePullRequest(config *github_util.Config, pr *github_api.PullRequest, issue *github_api.Issue, commits []github_api.RepositoryCommit, events []github_api.IssueEvent) {
	e2e := sq.e2e
	userSet := sq.userWhitelist

	if !github_util.HasLabels(issue.Labels, []string{"cla: yes"}) {
		sq.SetPRStatus(pr, noCLA)
		return
	}

	if mergeable, err := config.IsPRMergeable(pr); err != nil {
		glog.V(2).Infof("Skipping %d - unable to determine mergeability", *pr.Number)
		sq.SetPRStatus(pr, undeterminedMergability)
		return
	} else if !mergeable {
		glog.V(4).Infof("Skipping %d - not mergable", *pr.Number)
		sq.SetPRStatus(pr, unmergeable)
		return
	}

	// Validate the status information for this PR
	contexts := sq.RequiredStatusContexts
	if len(sq.E2EStatusContext) > 0 && (len(sq.DontRequireE2ELabel) == 0 || !github_util.HasLabel(issue.Labels, sq.DontRequireE2ELabel)) {
		contexts = append(contexts, sq.E2EStatusContext)
	}
	if ok := config.IsStatusSuccess(pr, contexts); !ok {
		glog.Errorf("PR# %d Github CI status is not success", *pr.Number)
		sq.SetPRStatus(pr, ciFailure)
		return
	}

	if !github_util.HasLabel(issue.Labels, sq.WhitelistOverride) && !userSet.Has(*pr.User.Login) {
		glog.V(4).Infof("Dropping %d since %s isn't in whitelist and %s isn't present", *pr.Number, *pr.User.Login, sq.WhitelistOverride)
		if !github_util.HasLabel(issue.Labels, needsOKToMergeLabel) {
			config.AddLabels(*pr.Number, []string{needsOKToMergeLabel})
			body := "The author of this PR is not in the whitelist for merge, can one of the admins add the 'ok-to-merge' label?"
			config.WriteComment(*pr.Number, body)
		}
		sq.SetPRStatus(pr, needsok)
		return
	}

	// Tidy up the issue list.
	if github_util.HasLabel(issue.Labels, needsOKToMergeLabel) {
		config.RemoveLabel(*pr.Number, needsOKToMergeLabel)
	}

	if !github_util.HasLabels(issue.Labels, []string{"lgtm"}) {
		sq.SetPRStatus(pr, noLGTM)
		return
	}

	lastModifiedTime := github_util.LastModifiedTime(commits)
	lgtmTime := github_util.LabelTime("lgtm", events)

	if lastModifiedTime == nil || lgtmTime == nil {
		glog.Errorf("PR %d was unable to determine when LGTM was added or when last modified", *pr.Number)
		sq.SetPRStatus(pr, unknown)
		return
	}

	if lastModifiedTime.After(*lgtmTime) {
		glog.V(4).Infof("PR %d changed after LGTM. Will not merge", *pr.Number)
		sq.SetPRStatus(pr, lgtmEarly)
		return
	}

	if !e2e.Stable() {
		sq.SetPRStatus(pr, e2eFailure)
		return
	}

	// if there is a 'e2e-not-required' label, just merge it.
	if len(sq.DontRequireE2ELabel) > 0 && github_util.HasLabel(issue.Labels, sq.DontRequireE2ELabel) {
		config.MergePR(pr, "submit-queue")
		sq.SetPRStatus(pr, merged)
		return
	}

	sq.SetPRStatus(pr, githube2e)
	sq.Lock()
	sq.githubE2ERequest <- true
	sq.needsGithubE2E[*pr.Number] = pr
	sq.Unlock()

	return
}
Ejemplo n.º 21
0
// RefreshWhitelist updates the whitelist, re-getting the list of committers.
func (sq *SubmitQueue) RefreshWhitelist(config *github_util.Config) {
	info := map[string]userInfo{}
	if sq.additionalUserWhitelist == nil {
		users, err := loadWhitelist(sq.Whitelist)
		if err != nil {
			glog.Fatalf("error loading user whitelist: %v", err)
		}
		sq.additionalUserWhitelist = &users
	}

	if sq.committerList == nil {
		committerList, err := loadWhitelist(sq.Committers)
		if err != nil {
			glog.Fatalf("error loading committers whitelist: %v", err)
		}
		sq.committerList = &committerList
	}

	// We must use the values on disk in case it has users which don't have
	// explicit "pull" permission in the API
	allUsers := sets.NewString(sq.additionalUserWhitelist.List()...)

	pushUsers, pullUsers, err := config.UsersWithAccess()
	if err != nil {
		glog.Info("Falling back to static committers list.")
		allUsers = allUsers.Union(*sq.committerList)
		sq.userWhitelist = &allUsers
		return
	}
	for _, user := range pullUsers {
		allUsers.Insert(*user.Login)
		info[*user.Login] = userInfo{
			Access:    "pull access",
			AvatarURL: *user.AvatarURL,
		}
	}
	for _, user := range pushUsers {
		allUsers.Insert(*user.Login)
		info[*user.Login] = userInfo{
			Access:    "push access",
			AvatarURL: *user.AvatarURL,
		}
	}

	// Anyone we got from a config file and not from the API we need to look up info
	for _, login := range allUsers.List() {
		if _, ok := info[login]; ok {
			continue
		}
		user, err := config.GetUser(login)
		if err != nil {
			glog.Errorf("Unable to get user information about %q", login)
			continue
		}
		info[login] = userInfo{
			Access:    "explicitly whitelisted",
			AvatarURL: *user.AvatarURL,
		}
	}
	sq.userWhitelist = &allUsers
	sq.userInfo = info
	return
}