Exemple #1
0
// Update refreshes the history that GitInfo stores for the repo. If pull is
// true then git pull is performed before refreshing.
func (g *GitInfo) Update(pull, allBranches bool) error {
	g.mutex.Lock()
	defer g.mutex.Unlock()
	glog.Info("Beginning Update.")
	if pull {
		cmd := exec.Command("git", "pull")
		cmd.Dir = g.dir
		b, err := cmd.Output()
		if err != nil {
			return fmt.Errorf("Failed to sync to HEAD: %s - %s", err, string(b))
		}
	}
	glog.Info("Finished pull.")
	var hashes []string
	var timestamps map[string]time.Time
	var err error
	if allBranches {
		hashes, timestamps, err = readCommitsFromGitAllBranches(g.dir)
	} else {
		hashes, timestamps, err = readCommitsFromGit(g.dir, "HEAD")
	}
	glog.Infof("Finished reading commits: %s", g.dir)
	if err != nil {
		return fmt.Errorf("Failed to read commits from: %s : %s", g.dir, err)
	}
	g.hashes = hashes
	g.timestamps = timestamps
	return nil
}
Exemple #2
0
// StartCleaner is a process that periodically checks the status of every issue
// that has been previewed and removes all the local files for closed issues.
func StartCleaner(workDir string) {
	glog.Info("Starting Cleaner")
	c := reitveld.NewClient()
	for _ = range time.Tick(config.REFRESH) {
		matches, err := filepath.Glob(workDir + "/patches/*")
		glog.Infof("Matches: %v", matches)
		if err != nil {
			glog.Errorf("Failed to retrieve list of patched checkouts: %s", err)
			continue
		}
		for _, filename := range matches {
			_, file := filepath.Split(filename)
			glog.Info(file)
			m := issueAndPatch.FindStringSubmatch(file)
			if len(m) < 2 {
				continue
			}
			issue, err := strconv.ParseInt(m[1], 10, 64)
			if err != nil {
				glog.Errorf("Failed to parse %q as int: %s", m[1], err)
				continue
			}
			issueInfo, err := c.Issue(issue)
			if err != nil {
				glog.Errorf("Failed to retrieve issue status %d: %s", issue, err)
			}
			if issueInfo.Closed {
				if err := os.RemoveAll(filename); err != nil {
					glog.Errorf("Failed to remove %q: %s", filename, err)
				}
			}
		}
	}
}
Exemple #3
0
// Running benchmarks in parallel leads to multiple chrome instances coming up
// at the same time, when there are crashes chrome processes stick around which
// can severely impact the machine's performance. To stop this from
// happening chrome zombie processes are periodically killed.
func ChromeProcessesCleaner(locker sync.Locker, chromeCleanerTimer time.Duration) {
	for _ = range time.Tick(chromeCleanerTimer) {
		glog.Info("The chromeProcessesCleaner goroutine has started")
		glog.Info("Waiting for all existing tasks to complete before killing zombie chrome processes")
		locker.Lock()
		util.LogErr(ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, PKILL_TIMEOUT, nil, nil))
		locker.Unlock()
	}
}
Exemple #4
0
// SKPs are captured in parallel leading to multiple chrome instances coming up
// at the same time, when there are crashes chrome processes stick around which
// can severely impact the machine's performance. To stop this from
// happening chrome zombie processes are periodically killed.
func chromeProcessesCleaner(mutex *sync.RWMutex) {
	for _ = range time.Tick(*chromeCleanerTimer) {
		glog.Info("The chromeProcessesCleaner goroutine has started")
		glog.Info("Waiting for all existing tasks to complete before killing zombie chrome processes")
		mutex.Lock()
		skutil.LogErr(util.ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{},
			util.PKILL_TIMEOUT, nil, nil))
		mutex.Unlock()
	}
}
Exemple #5
0
func step(client *http.Client, store *storage.Service, hostname string) {
	glog.Info("About to read package list.")
	// Read the old and new packages from their respective storage locations.
	serverList, err := packages.InstalledForServer(client, store, hostname)
	if err != nil {
		glog.Errorf("Failed to retrieve remote package list: %s", err)
		return
	}
	localList, err := packages.FromLocalFile(*installedPackagesFile)
	if err != nil {
		glog.Errorf("Failed to retrieve local package list: %s", err)
		return
	}

	// Install any new or updated packages.
	newPackages, installed := differences(serverList.Names, localList)
	glog.Infof("New: %v, Installed: %v", newPackages, installed)

	for _, name := range newPackages {
		// If just an appname appears w/o a package name then that means
		// that package hasn't been selected, so just skip it for now.
		if len(strings.Split(name, "/")) == 1 {
			continue
		}
		installed = append(installed, name)
		if err := packages.ToLocalFile(installed, *installedPackagesFile); err != nil {
			glog.Errorf("Failed to write local package list: %s", err)
			continue
		}
		if err := packages.Install(client, store, name); err != nil {
			glog.Errorf("Failed to install package %s: %s", name, err)
			// Pop last name from 'installed' then rewrite the file since the
			// install failed.
			installed = installed[:len(installed)-1]
			if err := packages.ToLocalFile(installed, *installedPackagesFile); err != nil {
				glog.Errorf("Failed to rewrite local package list after install failure for %s: %s", name, err)
			}
			continue
		}

		// The pull application is special in that it's not restarted by the
		// the postinstall script of the debian package, because that might kill
		// pullg while it was updating itself. Instead pulld will just exit when
		// it notices that it has been updated and count on systemd to restart it.
		if containsPulld(newPackages) {
			glog.Info("The pulld package has been updated, exiting to allow a restart.")
			glog.Flush()
			os.Exit(0)
		}
	}
}
// New create a new TileStats. The eventbus is monitored for new tiles
// and the stats are recalculated every time the tile is updated.
func New(evt *eventbus.EventBus) *TileStats {
	ret := &TileStats{
		evt:   evt,
		stats: map[string]*TraceStats{},
	}
	evt.SubscribeAsync(db.NEW_TILE_AVAILABLE_EVENT, func(it interface{}) {
		tile := it.(*tiling.Tile)
		glog.Info("TileStats: Beginning.")
		ret.calcStats(tile)
		glog.Info("TileStats: Finished.")
	})

	return ret
}
Exemple #7
0
// See the ingester.ResultIngester interface.
func (t *TrybotResultIngester) BatchFinished(_ metrics.Counter) error {
	// Reset this instance regardless of the outcome of this call.
	defer func() {
		t.resultsByIssue = map[string]types.TryBotResults{}
	}()

	for issue, tries := range t.resultsByIssue {
		// Get the current results.
		pastTries, err := t.tbrStorage.Get(issue)
		if err != nil {
			return err
		}

		// Update the results with the results of this batch.
		needsUpdating := false
		for key, newTry := range tries {
			if found, ok := pastTries[key]; !ok || (ok && (found.TS < newTry.TS)) {
				pastTries[key] = newTry
				needsUpdating = true
			}
		}

		if needsUpdating {
			if err := t.tbrStorage.Write(issue, pastTries); err != nil {
				return err
			}
		}
	}

	glog.Info("Finished processing ingestion batch.")
	return nil
}
// LogFileInfo logs the FileInfoSLice in human readable form, namely file name and if it is a directory or not
func (s FileInfoSlice) LogFileInfo() {
	glog.Infof("Slice contains %d file elements", len(s))
	for _, fi := range s {
		glog.Infof("Name %s, Is directory: %t", fi.Name(), fi.IsDir())
	}
	glog.Info("End File Infos")
}
Exemple #9
0
func pullInit() {
	hostname, err := os.Hostname()
	if err != nil {
		// Never call glog before common.Init*.
		os.Exit(1)
	}
	common.InitWithMetrics("pulld."+hostname, graphiteServer)
	glog.Infof("Running with hostname: %s", hostname)

	client, err := auth.NewClient(*doOauth, *oauthCacheFile,
		storage.DevstorageFullControlScope,

		compute.ComputeReadonlyScope)
	if err != nil {
		glog.Fatalf("Failed to create authenticated HTTP client: %s", err)
	}
	glog.Info("Got authenticated client.")

	store, err = storage.New(client)
	if err != nil {
		glog.Fatalf("Failed to create storage service client: %s", err)
	}

	step(client, store, hostname)
	timeCh := time.Tick(time.Second * 60)
	go func() {
		for {
			select {
			case <-timeCh:
			case <-httpTriggerCh:
			}
			step(client, store, hostname)
		}
	}()
}
Exemple #10
0
// makeBugChomperPage builds and serves the BugChomper page.
func makeBugChomperPage(w http.ResponseWriter, r *http.Request) {
	// Redirect for login if needed.
	user := login.LoggedInAs(r)
	if user == "" {
		http.Redirect(w, r, login.LoginURL(w, r), http.StatusFound)
		return
	}
	glog.Infof("Logged in as %s", user)

	issueTracker := issue_tracker.New(login.GetHttpClient(r))
	w.Header().Set("Content-Type", "text/html")
	glog.Info("Loading bugs for " + user)
	bugList, err := issueTracker.GetBugs(PROJECT_NAME, user)
	if err != nil {
		reportError(w, err.Error(), http.StatusInternalServerError)
		return
	}
	bugsById := make(map[string]*issue_tracker.Issue)
	bugsByPriority := make(map[string][]*issue_tracker.Issue)
	for _, bug := range bugList.Items {
		bugsById[strconv.Itoa(bug.Id)] = bug
		var bugPriority string
		for _, label := range bug.Labels {
			if strings.HasPrefix(label, PRIORITY_PREFIX) {
				bugPriority = label[len(PRIORITY_PREFIX):]
			}
		}
		if _, ok := bugsByPriority[bugPriority]; !ok {
			bugsByPriority[bugPriority] = make(
				[]*issue_tracker.Issue, 0)
		}
		bugsByPriority[bugPriority] = append(
			bugsByPriority[bugPriority], bug)
	}
	bugsJson, err := json.Marshal(bugsById)
	if err != nil {
		reportError(w, err.Error(), http.StatusInternalServerError)
		return
	}
	data := struct {
		Title          string
		User           string
		BugsJson       template.JS
		BugsByPriority *map[string][]*issue_tracker.Issue
		Priorities     []string
		PriorityPrefix string
	}{
		Title:          "BugChomper",
		User:           user,
		BugsJson:       template.JS(string(bugsJson)),
		BugsByPriority: &bugsByPriority,
		Priorities:     issue_tracker.BugPriorities,
		PriorityPrefix: PRIORITY_PREFIX,
	}

	if err := templates.ExecuteTemplate(w, "bug_chomper.html", data); err != nil {
		reportError(w, err.Error(), http.StatusInternalServerError)
		return
	}
}
Exemple #11
0
// Update does a single full update, first updating the commits and creating
// new tiles if necessary, and then pulling in new data from Google Storage to
// populate the traces.
func (i *Ingester) Update() error {
	glog.Info("Beginning ingest.")
	begin := time.Now()
	if err := i.UpdateCommitInfo(true); err != nil {
		glog.Errorf("Update: Failed to update commit info: %s", err)
		return err
	}
	if err := i.UpdateTiles(); err != nil {
		glog.Errorf("Update: Failed to update tiles: %s", err)
		return err
	}
	i.lastSuccessfulUpdate = time.Now()
	i.elapsedTimePerUpdate.Update(int64(time.Since(begin).Seconds()))
	glog.Info("Finished ingest.")
	return nil
}
Exemple #12
0
// Start calculating and reporting statistics on the repo and tiles.
//
// We presume the git.Update(true) is called somewhere else, usually this is done
// in the trace/db.Builder, so the repo is always as good as the loaded tiles.
func Start(nanoTileStore *db.Builder, git *gitinfo.GitInfo) {
	coverage := metrics.NewRegisteredGaugeFloat64("stats.tests.bench_runs_per_changelist", metrics.DefaultRegistry)
	skpLatency := metrics.NewRegisteredTimer("stats.skp.update_latency", metrics.DefaultRegistry)
	commits := metrics.NewRegisteredGauge("stats.commits.total", metrics.DefaultRegistry)

	go func() {
		for _ = range time.Tick(2 * time.Minute) {
			tile := nanoTileStore.GetTile()
			numCommits := tile.LastCommitIndex() + 1
			numTraces := len(tile.Traces)
			total := 0
			for _, tr := range tile.Traces {
				for i := 0; i < numCommits; i++ {
					if !tr.IsMissing(i) {
						total += 1
					}
				}
			}
			cov := float64(total) / float64(numCommits*numTraces)
			glog.Info("Coverage: ", cov)
			coverage.Update(cov)

			last, err := git.LastSkpCommit()
			if err != nil {
				glog.Warning("Failed to read last SKP commit: %s", err)
				continue
			}
			skpLatency.Update(time.Since(last))
			commits.Update(int64(git.NumCommits()))
		}
	}()
}
Exemple #13
0
// GetBranches returns the list of branch heads in a Git repository.
// In order to separate local working branches from published branches, only
// remote branches in 'origin' are returned.
func GetBranches(dir string) ([]*GitBranch, error) {
	cmd := exec.Command("git", "show-ref")
	cmd.Dir = dir
	b, err := cmd.Output()
	if err != nil {
		return nil, fmt.Errorf("Failed to get branch list: %v", err)
	}
	branchPrefix := "refs/remotes/origin/"
	branches := []*GitBranch{}
	lines := strings.Split(string(b), "\n")
	for _, line := range lines {
		if line == "" {
			continue
		}
		glog.Info(line)
		parts := strings.SplitN(line, " ", 2)
		if len(parts) != 2 {
			return nil, fmt.Errorf("Could not parse output of 'git show-ref'.")
		}
		if strings.HasPrefix(parts[1], branchPrefix) {
			branches = append(branches, &GitBranch{
				Name: parts[1][len(branchPrefix):],
				Head: parts[0],
			})
		}
	}
	return branches, nil
}
// scanForNewCandidates runs scanHelper once every config.Aggregator.RescanPeriod, which scans the
// config.Generator.AflOutputPath for new fuzzes.
// If scanHelper returns an error, this method will terminate.
func (agg *BinaryAggregator) scanForNewCandidates() {
	defer agg.monitoringWaitGroup.Done()

	alreadyFoundBinaries := &SortedStringSlice{}
	// time.Tick does not fire immediately, so we fire it manually once.
	if err := agg.scanHelper(alreadyFoundBinaries); err != nil {
		glog.Errorf("Scanner terminated due to error: %v", err)
		return
	}
	glog.Infof("Sleeping for %s, then waking up to find new crashes again", config.Aggregator.RescanPeriod)

	t := time.Tick(config.Aggregator.RescanPeriod)
	for {
		select {
		case <-t:
			if err := agg.scanHelper(alreadyFoundBinaries); err != nil {
				glog.Errorf("Aggregator scanner terminated due to error: %v", err)
				return
			}
			glog.Infof("Sleeping for %s, then waking up to find new crashes again", config.Aggregator.RescanPeriod)
		case <-agg.monitoringShutdown:
			glog.Info("Aggregator scanner got signal to shut down")
			return
		}

	}
}
// waitForUploads waits for uploadPackages to be sent through the forUpload channel
// and then uploads them.  If any unrecoverable errors happen, this method terminates.
func (agg *BinaryAggregator) waitForBugReporting() {
	defer agg.pipelineWaitGroup.Done()
	glog.Info("Spawning bug reporting routine")
	for {
		select {
		case p := <-agg.forBugReporting:
			if err := agg.bugReportingHelper(p); err != nil {
				glog.Errorf("Bug reporting terminated due to error: %s", err)
				return
			}
		case <-agg.pipelineShutdown:
			glog.Info("Bug reporting routine recieved shutdown signal")
			return
		}
	}
}
Exemple #16
0
// Flush writes the current Tile out, should be called once all updates are
// done. Note that Move() writes out the former Tile as it moves to a new Tile,
// so this only needs to be called at the end of looping over a set of work.
func (tt TileTracker) Flush() {
	glog.Info("Flushing Tile.")
	if tt.lastTileNum != -1 {
		if err := tt.tileStore.Put(0, tt.lastTileNum, tt.currentTile); err != nil {
			glog.Errorf("Failed to write Tile: %s", err)
		}
	}
}
// update syncs the source code repository and loads any new commits.
func (c *CommitCache) update() (rv error) {
	defer timer.New("CommitCache.update()").Stop()
	glog.Info("Reloading commits.")
	if err := c.repo.Update(true, true); err != nil {
		return fmt.Errorf("Failed to update the repo: %v", err)
	}
	from := time.Time{}
	n := c.NumCommits()
	if n > 0 {
		last, err := c.Get(n - 1)
		if err != nil {
			return fmt.Errorf("Failed to get last commit: %v", err)
		}
		from = last.Timestamp
	}
	newCommitHashes := c.repo.From(from)
	glog.Infof("Processing %d new commits.", len(newCommitHashes))
	newCommits := make([]*gitinfo.LongCommit, len(newCommitHashes))
	if len(newCommitHashes) > 0 {
		for i, h := range newCommitHashes {
			d, err := c.repo.Details(h)
			if err != nil {
				return fmt.Errorf("Failed to obtain commit details for %s: %v", h, err)
			}
			newCommits[i] = d
		}
	}
	branchHeads, err := c.repo.GetBranches()
	if err != nil {
		return fmt.Errorf("Failed to read branch information from the repo: %v", err)
	}

	// Load new builds for the BuildCache.
	allCommits := append(c.Commits, newCommits...)
	buildCacheHashes := make([]string, 0, c.requestSize)
	for _, commit := range allCommits[len(allCommits)-c.requestSize:] {
		buildCacheHashes = append(buildCacheHashes, commit.Hash)
	}
	byId, byCommit, builderStatuses, err := build_cache.LoadData(buildCacheHashes)
	if err != nil {
		return fmt.Errorf("Failed to update BuildCache: %v", err)
	}

	// Update the cached values all at once at at the end.
	glog.Infof("Updating the cache.")
	// Write the cache to disk *after* unlocking it.
	defer func() {
		rv = c.toFile()
	}()
	defer timer.New("  CommitCache locked").Stop()
	c.mutex.Lock()
	defer c.mutex.Unlock()
	c.BranchHeads = branchHeads
	c.Commits = allCommits
	c.buildCache.UpdateWithData(byId, byCommit, builderStatuses)
	glog.Infof("Finished updating the cache.")
	return nil
}
Exemple #18
0
// SSH connects to the specified workers and runs the specified command. If the
// command does not complete in the given duration then all remaining workers are
// considered timed out. SSH also automatically substitutes the sequential number
// of the worker for the WORKER_NUM_KEYWORD since it is a common use case.
func SSH(cmd string, workers []string, timeout time.Duration) (map[string]string, error) {
	glog.Infof("Running \"%s\" on %s with timeout of %s", cmd, workers, timeout)
	numWorkers := len(workers)

	// Ensure that the key file exists.
	key, err := getKeyFile()
	if err != nil {
		return nil, fmt.Errorf("Failed to get key file: %s", err)
	}

	// Initialize the structure with the configuration for ssh.
	config := &ssh.ClientConfig{
		User: CtUser,
		Auth: []ssh.AuthMethod{
			ssh.PublicKeys(key),
		},
	}

	var wg sync.WaitGroup
	// m protects workersWithOutputs and remainingWorkers
	var m sync.Mutex
	// Will be populated and returned by this function.
	workersWithOutputs := map[string]string{}
	// Keeps track of which workers are still pending.
	remainingWorkers := map[string]int{}

	// Kick off a goroutine on all workers.
	for i, hostname := range workers {
		wg.Add(1)
		m.Lock()
		remainingWorkers[hostname] = 1
		m.Unlock()
		go func(index int, hostname string) {
			defer wg.Done()
			updatedCmd := strings.Replace(cmd, WORKER_NUM_KEYWORD, strconv.Itoa(index+1), -1)
			output, err := executeCmd(updatedCmd, hostname, config, timeout)
			if err != nil {
				glog.Errorf("Could not execute ssh cmd: %s", err)
			}
			m.Lock()
			defer m.Unlock()
			workersWithOutputs[hostname] = output
			delete(remainingWorkers, hostname)
			glog.Infoln()
			glog.Infof("[%d/%d] Worker %s has completed execution", numWorkers-len(remainingWorkers), numWorkers, hostname)
			glog.Infof("Remaining workers: %v", remainingWorkers)
		}(i, hostname)
	}

	wg.Wait()
	glog.Infoln()
	glog.Infof("Finished running \"%s\" on all %d workers", cmd, numWorkers)
	glog.Info("========================================")

	m.Lock()
	defer m.Unlock()
	return workersWithOutputs, nil
}
// Validate that the given chromiumBuild exists in the DB.
func Validate(chromiumBuild DBTask) error {
	buildCount := []int{}
	query := fmt.Sprintf("SELECT COUNT(*) FROM %s WHERE chromium_rev = ? AND skia_rev = ? AND ts_completed IS NOT NULL AND failure = 0", db.TABLE_CHROMIUM_BUILD_TASKS)
	if err := db.DB.Select(&buildCount, query, chromiumBuild.ChromiumRev, chromiumBuild.SkiaRev); err != nil || len(buildCount) < 1 || buildCount[0] == 0 {
		glog.Info(err)
		return fmt.Errorf("Unable to validate chromium_build parameter %v", chromiumBuild)
	}
	return nil
}
// Put writes a tile to the drive, and also updates the cache entry for it
// if one exists. It uses the mutex to ensure thread safety.
func (store *FileTileStore) Put(scale, index int, tile *tiling.Tile) error {
	glog.Info("Put()")
	// Make sure the scale and tile index are correct.
	if tile.Scale != scale || tile.TileIndex != index {
		return fmt.Errorf("Tile scale %d and index %d do not match real tile scale %d and index %d", scale, index, tile.Scale, tile.TileIndex)
	}

	if index < 0 {
		return fmt.Errorf("Can't write Tiles with an index < 0: %d", index)
	}

	// Begin by writing the Tile out into a temporary location.
	f, err := store.fileTileTemp(scale, index)
	if err != nil {
		return err
	}
	enc := gob.NewEncoder(f)
	if err := enc.Encode(tile); err != nil {
		return fmt.Errorf("Failed to encode tile %s: %s", f.Name(), err)
	}
	if err := f.Close(); err != nil {
		return fmt.Errorf("Failed to close temporary file: %v", err)
	}

	// Now rename the completed file to the real tile name. This is atomic and
	// doesn't affect current readers of the old tile contents.
	targetName, err := store.tileFilename(scale, index)
	if err != nil {
		return err
	}
	if err := os.MkdirAll(filepath.Dir(targetName), 0755); err != nil {
		return fmt.Errorf("Error creating directory for tile %s: %s", targetName, err)
	}
	glog.Infof("Renaming: %q %q", f.Name(), targetName)
	if err := os.Rename(f.Name(), targetName); err != nil {
		return fmt.Errorf("Failed to rename tile: %s", err)
	}
	filedata, err := os.Stat(targetName)
	if err != nil {
		return fmt.Errorf("Failed to stat new tile: %s", err)

	}
	store.lock.Lock()
	defer store.lock.Unlock()

	entry := &CacheEntry{
		tile:         tile,
		lastModified: filedata.ModTime(),
	}
	key := CacheKey{
		startIndex: index,
		scale:      scale,
	}
	store.cache.Add(key, entry)

	return nil
}
// Validate that the given skpRepository exists in the DB.
func Validate(skpRepository DBTask) error {
	rowCount := []int{}
	query := fmt.Sprintf("SELECT COUNT(*) FROM %s WHERE page_sets = ? AND chromium_rev = ? AND skia_rev = ? AND ts_completed IS NOT NULL AND failure = 0", db.TABLE_CAPTURE_SKPS_TASKS)
	if err := db.DB.Select(&rowCount, query, skpRepository.PageSets, skpRepository.ChromiumRev, skpRepository.SkiaRev); err != nil || len(rowCount) < 1 || rowCount[0] == 0 {
		glog.Info(err)
		return fmt.Errorf("Unable to validate skp_repository parameter %v", skpRepository)
	}
	return nil
}
Exemple #22
0
// getBinaryReportsFromGS pulls all files in baseFolder from the skia-fuzzer bucket and
// groups them by fuzz.  It parses these groups of files into a FuzzReportBinary and returns
// the slice of all reports generated in this way.
func getBinaryReportsFromGS(storageService *storage.Service, baseFolder string) ([]fuzz.FuzzReportBinary, error) {
	contents, err := storageService.Objects.List(config.Aggregator.Bucket).Prefix(baseFolder).Fields("nextPageToken", "items(name,size,timeCreated)").MaxResults(100000).Do()
	// Assumption, files are sorted alphabetically and have the structure
	// [baseFolder]/[filetype]/[fuzzname]/[fuzzname][suffix]
	// where suffix is one of _debug.dump, _debug.err, _release.dump or _release.err
	if err != nil {
		return nil, fmt.Errorf("Problem reading from Google Storage: %v", err)
	}

	glog.Infof("Loading %d files from gs://%s/%s", len(contents.Items), config.Aggregator.Bucket, baseFolder)

	reports := make([]fuzz.FuzzReportBinary, 0)

	var debugDump, debugErr, releaseDump, releaseErr string
	isInitialized := false
	currFuzzFolder := "" // will be something like binary_fuzzes/bad/skp/badbeef
	currFuzzName := ""
	currFuzzType := ""
	for _, item := range contents.Items {
		name := item.Name
		if strings.Count(name, "/") <= 3 {
			continue
		}

		if !isInitialized || !strings.HasPrefix(name, currFuzzFolder) {
			if isInitialized {
				reports = append(reports, fuzz.ParseBinaryReport(currFuzzType, currFuzzName, debugDump, debugErr, releaseDump, releaseErr))
			} else {
				isInitialized = true
			}

			parts := strings.Split(name, "/")
			currFuzzFolder = strings.Join(parts[0:4], "/")
			currFuzzType = parts[2]
			currFuzzName = parts[3]
			// reset for next one
			debugDump, debugErr, releaseDump, releaseErr = "", "", "", ""

		}
		if strings.HasSuffix(name, "_debug.dump") {
			debugDump = emptyStringOnError(gs.FileContentsFromGS(storageService, config.Aggregator.Bucket, name))
		} else if strings.HasSuffix(name, "_debug.err") {
			debugErr = emptyStringOnError(gs.FileContentsFromGS(storageService, config.Aggregator.Bucket, name))
		} else if strings.HasSuffix(name, "_release.dump") {
			releaseDump = emptyStringOnError(gs.FileContentsFromGS(storageService, config.Aggregator.Bucket, name))
		} else if strings.HasSuffix(name, "_release.err") {
			releaseErr = emptyStringOnError(gs.FileContentsFromGS(storageService, config.Aggregator.Bucket, name))
		}
	}

	if currFuzzName != "" {
		reports = append(reports, fuzz.ParseBinaryReport(currFuzzType, currFuzzName, debugDump, debugErr, releaseDump, releaseErr))
	}
	glog.Info("Done loading")
	return reports, nil
}
Exemple #23
0
// IngestNewBuildsLoop continually ingests new builds.
func IngestNewBuildsLoop(workdir string) {
	lv := metrics.NewLiveness("buildbot-ingest")
	repos := gitinfo.NewRepoMap(workdir)
	for _ = range time.Tick(30 * time.Second) {
		glog.Info("Ingesting builds.")
		if err := ingestNewBuilds(repos); err != nil {
			glog.Errorf("Failed to ingest new builds: %v", err)
		} else {
			lv.Update()
		}
	}
}
Exemple #24
0
func runServer(serverURL string) {
	r := mux.NewRouter()
	r.PathPrefix("/res/").HandlerFunc(util.MakeResourceHandler(*resourcesDir))
	r.HandleFunc("/", makeBugChomperPage).Methods("GET")
	r.HandleFunc("/", submitData).Methods("POST")
	r.HandleFunc("/json/version", skiaversion.JsonHandler)
	r.HandleFunc(OAUTH_CALLBACK_PATH, login.OAuth2CallbackHandler)
	r.HandleFunc("/logout/", login.LogoutHandler)
	r.HandleFunc("/loginstatus/", login.StatusHandler)
	http.Handle("/", util.LoggingGzipRequestResponse(r))
	glog.Info("Server is running at " + serverURL)
	glog.Fatal(http.ListenAndServe(*port, nil))
}
// StopBinaryGenerator terminates all afl-fuzz processes that were spawned,
// logging any errors.
func StopBinaryGenerator() {
	glog.Infof("Trying to stop %d fuzz processes", len(fuzzProcesses))
	for _, p := range fuzzProcesses {
		if p != nil {
			if err := p.Kill(); err != nil {
				glog.Warningf("Error while trying to kill afl process: %s", err)
			} else {
				glog.Info("Quietly shutdown fuzz process.")
			}
		}
	}
	fuzzProcesses = nil
}
Exemple #26
0
// CachedTileFromCommits returns a tile built from the given commits. The tiles are
// cached to speed up subsequent requests.
func (b *tileBuilder) CachedTileFromCommits(commits []*CommitID) (*tiling.Tile, error) {
	key := ""
	for _, cid := range commits {
		key += cid.String()
	}
	md5 := ""
	if hashes, err := b.db.ListMD5(commits); err == nil {
		md5 = strings.Join(hashes, "")
		glog.Infof("Got md5: %s", md5)
	} else {
		glog.Errorf("Failed to load the md5 hashes for a slice of commits: %s", err)
	}

	// Determine if we need to fetch a fresh tile from tracedb.
	getFreshTile := false
	b.mutex.Lock()
	interfaceCacheEntry, ok := b.tcache.Get(key)
	b.mutex.Unlock()
	var tileCacheEntry *cachedTile = nil
	if !ok {
		getFreshTile = true
	} else {
		tileCacheEntry, ok = interfaceCacheEntry.(*cachedTile)
		if !ok {
			getFreshTile = true
		} else if md5 != tileCacheEntry.md5 {
			getFreshTile = true
		}
	}
	if getFreshTile {
		glog.Info("Tile is missing or expired.")
		tile, hashes, err := b.db.TileFromCommits(commits)
		if err != nil {
			return nil, fmt.Errorf("Unable to create fresh tile: %s", err)
		}
		md5 := strings.Join(hashes, "")
		if md5 == "" {
			glog.Errorf("Not caching, didn't get a valid set of hashes, is traceserverd out of date? : %s", key)
		} else {
			b.mutex.Lock()
			b.tcache.Add(key, &cachedTile{
				tile: tile,
				md5:  strings.Join(hashes, ""),
			})
			b.mutex.Unlock()
		}
		return tile, nil
	} else {
		return tileCacheEntry.tile, nil
	}
}
Exemple #27
0
// submitData attempts to submit data from a POST request to the IssueTracker.
func submitData(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Content-Type", "text/html")
	issueTracker := issue_tracker.New(login.GetHttpClient(r))
	edits := r.FormValue("all_edits")
	var editsMap map[string]*issue_tracker.Issue
	if err := json.Unmarshal([]byte(edits), &editsMap); err != nil {
		errMsg := "Could not parse edits from form response: " + err.Error()
		reportError(w, errMsg, http.StatusInternalServerError)
		return
	}
	data := struct {
		Title    string
		Message  string
		BackLink string
	}{}
	if len(editsMap) == 0 {
		data.Title = "No Changes Submitted"
		data.Message = "You didn't change anything!"
		data.BackLink = ""
		if err := templates.ExecuteTemplate(w, "submitted.html", data); err != nil {
			reportError(w, err.Error(), http.StatusInternalServerError)
			return
		}
		return
	}
	errorList := make([]error, 0)
	for issueId, newIssue := range editsMap {
		glog.Info("Editing issue " + issueId)
		if err := issueTracker.SubmitIssueChanges(newIssue, ISSUE_COMMENT); err != nil {
			errorList = append(errorList, err)
		}
	}
	if len(errorList) > 0 {
		errorStrings := ""
		for _, err := range errorList {
			errorStrings += err.Error() + "\n"
		}
		errMsg := "Not all changes could be submitted: \n" + errorStrings
		reportError(w, errMsg, http.StatusInternalServerError)
		return
	}
	data.Title = "Submitted Changes"
	data.Message = "Your changes were submitted to the issue tracker."
	data.BackLink = ""
	if err := templates.ExecuteTemplate(w, "submitted.html", data); err != nil {
		reportError(w, err.Error(), http.StatusInternalServerError)
		return
	}
	return
}
Exemple #28
0
// Runs commonly-used initialization metrics.
func Init() {
	flag.Parse()
	defer glog.Flush()
	flag.VisitAll(func(f *flag.Flag) {
		glog.Infof("Flags: --%s=%v", f.Name, f.Value)
	})

	// See skbug.com/4386 for details on why the below section exists.
	glog.Info("Initializing logserver for log level INFO.")
	glog.Warning("Initializing logserver for log level WARNING.")
	glog.Error("Initializing logserver for log level ERROR.")

	// Use all cores.
	runtime.GOMAXPROCS(runtime.NumCPU())
}
Exemple #29
0
// New creates a new instance of Summaries.
func New(storages *storage.Storage, tallies *tally.Tallies, blamer *blame.Blamer) (*Summaries, error) {
	s := &Summaries{
		storages: storages,
		tallies:  tallies,
		blamer:   blamer,
	}

	var err error
	s.summaries, err = s.CalcSummaries(nil, "", false, true)
	if err != nil {
		return nil, fmt.Errorf("Failed to calculate summaries in New: %s", err)
	}

	// TODO(jcgregorio) Move to a channel for tallies and then combine
	// this and the expStore handling into a single switch statement.
	tallies.OnChange(func() {
		summaries, err := s.CalcSummaries(nil, "", false, true)
		if err != nil {
			glog.Errorf("Failed to refresh summaries: %s", err)
			return
		}
		s.mutex.Lock()
		s.summaries = summaries
		s.mutex.Unlock()
	})

	storages.EventBus.SubscribeAsync(expstorage.EV_EXPSTORAGE_CHANGED, func(e interface{}) {
		testNames := e.([]string)
		glog.Info("Updating summaries after expectations change.")
		partialSummaries, err := s.CalcSummaries(testNames, "", false, true)
		if err != nil {
			glog.Errorf("Failed to refresh summaries: %s", err)
			return
		}
		s.mutex.Lock()
		for k, v := range partialSummaries {
			s.summaries[k] = v
		}
		s.mutex.Unlock()
	})
	return s, nil
}
Exemple #30
0
func step(client *http.Client, store *storage.Service, hostname string) {
	glog.Info("About to read package list.")
	// Read the old and new packages from their respective storage locations.
	serverList, err := packages.InstalledForServer(client, store, hostname)
	if err != nil {
		glog.Errorf("Failed to retrieve remote package list: %s", err)
		return
	}
	localList, err := packages.FromLocalFile(*installedPackagesFile)
	if err != nil {
		glog.Errorf("Failed to retrieve local package list: %s", err)
		return
	}

	// Install any new or updated packages.
	newPackages, installed := differences(serverList.Names, localList)
	glog.Infof("New: %v, Installed: %v", newPackages, installed)

	for _, name := range newPackages {
		// If just an appname appears w/o a package name then that means
		// that package hasn't been selected, so just skip it for now.
		if len(strings.Split(name, "/")) == 1 {
			continue
		}
		installed = append(installed, name)
		if err := packages.ToLocalFile(installed, *installedPackagesFile); err != nil {
			glog.Errorf("Failed to write local package list: %s", err)
			continue
		}
		if err := packages.Install(client, store, name); err != nil {
			glog.Errorf("Failed to install package %s: %s", name, err)
			// Pop last name from 'installed' then rewrite the file since the
			// install failed.
			installed = installed[:len(installed)-1]
			if err := packages.ToLocalFile(installed, *installedPackagesFile); err != nil {
				glog.Errorf("Failed to rewrite local package list after install failure for %s: %s", name, err)
			}
			continue
		}
	}
}