// Fetch retrieves the file contents.
//
// Callers must call Close() on the returned io.ReadCloser.
func (b ResultsFileLocation) Fetch() (io.ReadCloser, error) {
	if strings.HasPrefix(b.URI, "file://") {
		return os.Open(b.URI[6:])
	} else {
		for i := 0; i < MAX_URI_GET_TRIES; i++ {
			glog.Infof("Fetching: %s", b.Name)
			request, err := gs.RequestForStorageURL(b.URI)
			if err != nil {
				glog.Warningf("Unable to create Storage MediaURI request: %s\n", err)
				continue
			}
			resp, err := client.Do(request)
			if err != nil {
				glog.Warningf("Unable to retrieve URI while creating file iterator: %s", err)
				continue
			}
			if resp.StatusCode != 200 {
				glog.Errorf("Failed to retrieve: %d  %s", resp.StatusCode, resp.Status)
			}
			glog.Infof("GS FETCH %s", b.URI)
			return resp.Body, nil
		}
		return nil, fmt.Errorf("Failed fetching JSON after %d attempts", MAX_URI_GET_TRIES)
	}
}
Exemple #2
0
// readTitle reads the first line from a Markdown file.
func readTitle(filename, def string) string {
	f, err := os.Open(filename)
	if err != nil {
		glog.Warningf("Failed to open file %s: %s", filename, err)
		return def
	}
	defer util.Close(f)
	reader := bufio.NewReader(f)
	title, err := reader.ReadString('\n')
	if err != nil {
		glog.Warningf("Failed to read title %s: %s", filename, err)
	}
	return strings.TrimSpace(title)
}
Exemple #3
0
func startMetrics(appName, graphiteServer string) {
	if graphiteServer == "" {
		glog.Warningf("No metrics server specified.")
		return
	}

	addr, err := net.ResolveTCPAddr("tcp", graphiteServer)
	if err != nil {
		glog.Fatalf("Unable to resolve metrics server address: %s", err)
	}

	// Get the hostname and create the app-prefix.
	hostName, err := os.Hostname()
	if err != nil {
		glog.Fatalf("Unable to retrieve hostname: %s", err)
	}
	appPrefix := fmt.Sprintf("%s.%s", appName, strings.Replace(hostName, ".", "-", -1))

	// Runtime metrics.
	metrics.RegisterRuntimeMemStats(metrics.DefaultRegistry)
	go metrics.CaptureRuntimeMemStats(metrics.DefaultRegistry, SAMPLE_PERIOD)
	go graphite.Graphite(metrics.DefaultRegistry, SAMPLE_PERIOD, appPrefix, addr)

	// Uptime.
	uptimeGuage := metrics.GetOrRegisterGaugeFloat64("uptime", metrics.DefaultRegistry)
	go func() {
		startTime := time.Now()
		uptimeGuage.Update(0)
		for _ = range time.Tick(SAMPLE_PERIOD) {
			uptimeGuage.Update(time.Since(startTime).Seconds())
		}
	}()
}
// emptyStringOnError returns a string of the passed in bytes or empty string if err is nil.
func emptyStringOnError(b []byte, err error) string {
	if err != nil {
		glog.Warningf("Ignoring error when fetching file contents: %v", err)
		return ""
	}
	return string(b)
}
// LoadBinaryFuzzesFromGoogleStorage pulls all fuzzes out of GCS that are on the given whitelist
// and loads them into memory (as staging).  After loading them, it updates the cache
// and moves them from staging to the current copy.
func (g *GSLoader) LoadBinaryFuzzesFromGoogleStorage(whitelist []string) error {
	revision := config.FrontEnd.SkiaVersion.Hash
	sort.Strings(whitelist)
	reports, err := g.getBinaryReportsFromGS(fmt.Sprintf("binary_fuzzes/%s/bad/", revision), whitelist)
	if err != nil {
		return err
	}
	fuzz.StagingFromCurrent()
	n := 0
	for report := range reports {
		if g.finder != nil {
			report.DebugStackTrace.LookUpFunctions(g.finder)
			report.ReleaseStackTrace.LookUpFunctions(g.finder)
		}
		fuzz.NewBinaryFuzzFound(report)
		n++
	}
	glog.Infof("%d new fuzzes loaded from Google Storage", n)
	fuzz.StagingToCurrent()

	_, oldBinaryFuzzNames, err := g.Cache.Load(revision)
	if err != nil {
		glog.Warningf("Could not read old binary fuzz names from cache.  Continuing...", err)
		oldBinaryFuzzNames = []string{}
	}

	return g.Cache.Store(fuzz.StagingCopy(), append(oldBinaryFuzzNames, whitelist...), revision)
}
// bugReportingHelper is a helper function to report bugs if the aggregator is configured to.
func (agg *BinaryAggregator) bugReportingHelper(p bugReportingPackage) error {
	defer atomic.AddInt64(&agg.bugReportCount, int64(1))
	if agg.MakeBugOnBadFuzz && p.IsBadFuzz {
		glog.Warningf("Should create bug for %s", p.FuzzName)
	}
	return nil
}
// New creates and returns a new CommitCache which watches the given repo.
// The initial update will load ALL commits from the repository, so expect
// this to be slow.
func New(repo *gitinfo.GitInfo, cacheFile string, requestSize int) (*CommitCache, error) {
	defer timer.New("commit_cache.New()").Stop()
	c, err := fromFile(cacheFile)
	if err != nil {
		glog.Warningf("Failed to read commit cache from file; starting from scratch. Error: %v", err)
		c = &CommitCache{}
	}
	c.buildCache = &build_cache.BuildCache{}
	c.cacheFile = cacheFile
	c.repo = repo
	c.requestSize = requestSize

	// Update the cache.
	if err := c.update(); err != nil {
		return nil, err
	}

	// Update in a loop.
	go func() {
		for _ = range time.Tick(time.Minute) {
			if err := c.update(); err != nil {
				glog.Errorf("Failed to update commit cache: %v", err)
			}
		}
	}()
	return c, nil
}
Exemple #8
0
// monitorIssueTracker reads the counts for all the types of issues in the skia
// issue tracker (code.google.com/p/skia) and stuffs the counts into Graphite.
func monitorIssueTracker() {
	c := &http.Client{
		Transport: &http.Transport{
			Dial: dialTimeout,
		},
	}

	if *useMetadata {
		*apikey = metadata.Must(metadata.ProjectGet(metadata.APIKEY))
	}

	// Create a new metrics registry for the issue tracker metrics.
	addr, err := net.ResolveTCPAddr("tcp", *graphiteServer)
	if err != nil {
		glog.Fatalln("Failed to resolve the Graphite server: ", err)
	}
	issueRegistry := metrics.NewRegistry()
	go graphite.Graphite(issueRegistry, common.SAMPLE_PERIOD, "issues", addr)

	// IssueStatus has all the info we need to capture and record a single issue status. I.e. capture
	// the count of all issues with a status of "New".
	type IssueStatus struct {
		Name   string
		Metric metrics.Gauge
		URL    string
	}

	allIssueStatusLabels := []string{
		"New", "Accepted", "Unconfirmed", "Started", "Fixed", "Verified", "Invalid", "WontFix", "Done", "Available", "Assigned",
	}

	issueStatus := []*IssueStatus{}
	for _, issueName := range allIssueStatusLabels {
		issueStatus = append(issueStatus, &IssueStatus{
			Name:   issueName,
			Metric: metrics.NewRegisteredGauge(strings.ToLower(issueName), issueRegistry),
			URL:    "https://www.googleapis.com/projecthosting/v2/projects/skia/issues?fields=totalResults&key=" + *apikey + "&status=" + issueName,
		})
	}

	liveness := imetrics.NewLiveness("issue-tracker")
	for _ = range time.Tick(ISSUE_TRACKER_PERIOD) {
		for _, issue := range issueStatus {
			resp, err := c.Get(issue.URL)
			jsonResp := map[string]int64{}
			dec := json.NewDecoder(resp.Body)
			if err := dec.Decode(&jsonResp); err != nil {
				glog.Warningf("Failed to decode JSON response: %s", err)
				util.Close(resp.Body)
				continue
			}
			issue.Metric.Update(jsonResp["totalResults"])
			glog.Infof("Num Issues: %s - %d", issue.Name, jsonResp["totalResults"])
			if err == nil && resp.Body != nil {
				util.Close(resp.Body)
			}
		}
		liveness.Update()
	}
}
Exemple #9
0
// rasterizeOnce applies a single rastetizer to the given pdf file.
// If the rasterizer fails, use the errorImage.  If everything
// succeeds, upload the PNG.
func (xformer *pdfXformer) rasterizeOnce(pdfPath string, rasterizerIndex int) (string, error) {
	rasterizer := xformer.rasterizers[rasterizerIndex]
	tempdir := filepath.Dir(pdfPath)
	pngPath := path.Join(tempdir, fmt.Sprintf("%s.%s", rasterizer.String(), PNG_EXT))
	defer removeIfExists(pngPath)
	glog.Infof("> > > > rasterizing with %s", rasterizer)
	err := rasterizer.Rasterize(pdfPath, pngPath)
	if err != nil {
		glog.Warningf("rasterizing %s with %s failed: %s", filepath.Base(pdfPath), rasterizer.String(), err)
		return xformer.errorImageMd5, nil
	}
	md5, err := md5OfFile(pngPath)
	if err != nil {
		return "", err
	}
	f, err := os.Open(pngPath)
	if err != nil {
		return "", err
	}
	defer util.Close(f)
	pngUploadPath := fmt.Sprintf("%s/%s.%s", *storageImagesDirectory, md5, PNG_EXT)
	didUpload, err := uploadFile(xformer.client, f, *storageBucket, pngUploadPath, *accessControlEntity)
	if err != nil {
		return "", err
	}
	if didUpload {
		glog.Infof("> > > > uploaded %s", pngUploadPath)
	}
	return md5, nil
}
Exemple #10
0
// ingestNewBuilds finds the set of uningested builds and ingests them.
func ingestNewBuilds(m string, repos *gitinfo.RepoMap) error {
	defer metrics.NewTimer("buildbot.ingestNewBuilds").Stop()
	glog.Infof("Ingesting builds for %s", m)
	// TODO(borenet): Investigate the use of channels here. We should be
	// able to start ingesting builds as the data becomes available rather
	// than waiting until the end.
	buildsToProcess, err := getUningestedBuilds(m)
	if err != nil {
		return fmt.Errorf("Failed to obtain the set of uningested builds: %v", err)
	}
	unfinished, err := getUnfinishedBuilds(m)
	if err != nil {
		return fmt.Errorf("Failed to obtain the set of unfinished builds: %v", err)
	}
	for _, b := range unfinished {
		if _, ok := buildsToProcess[b.Builder]; !ok {
			buildsToProcess[b.Builder] = []int{}
		}
		buildsToProcess[b.Builder] = append(buildsToProcess[b.Builder], b.Number)
	}

	if err := repos.Update(); err != nil {
		return err
	}

	// TODO(borenet): Can we ingest builders in parallel?
	errs := map[string]error{}
	for b, w := range buildsToProcess {
		for _, n := range w {
			if BUILD_BLACKLIST[b][n] {
				glog.Warningf("Skipping blacklisted build: %s # %d", b, n)
				continue
			}
			if IsTrybot(b) {
				continue
			}
			glog.Infof("Ingesting build: %s, %s, %d", m, b, n)
			build, err := retryGetBuildFromMaster(m, b, n, repos)
			if err != nil {
				errs[b] = fmt.Errorf("Failed to ingest build: %v", err)
				break
			}
			if err := IngestBuild(build, repos); err != nil {
				errs[b] = fmt.Errorf("Failed to ingest build: %v", err)
				break
			}
		}
	}
	if len(errs) > 0 {
		msg := fmt.Sprintf("Encountered errors ingesting builds for %s:", m)
		for b, err := range errs {
			msg += fmt.Sprintf("\n%s: %v", b, err)
		}
		return fmt.Errorf(msg)
	}
	glog.Infof("Done ingesting builds for %s", m)
	return nil
}
Exemple #11
0
// monitorIssueTracker reads the counts for all the types of issues in the Skia
// issue tracker (bugs.chromium.org/p/skia) and stuffs the counts into Graphite.
func monitorIssueTracker(c *http.Client) {
	// Create a new metrics registry for the issue tracker metrics.
	addr, err := net.ResolveTCPAddr("tcp", *graphiteServer)
	if err != nil {
		glog.Fatalln("Failed to resolve the Graphite server: ", err)
	}
	issueRegistry := metrics.NewRegistry()
	go graphite.Graphite(issueRegistry, common.SAMPLE_PERIOD, "issues", addr)

	// IssueStatus has all the info we need to capture and record a single issue status. I.e. capture
	// the count of all issues with a status of "New".
	type IssueStatus struct {
		Name   string
		Metric metrics.Gauge
		URL    string
	}

	allIssueStatusLabels := []string{
		"New", "Accepted", "Unconfirmed", "Started", "Fixed", "Verified", "Invalid", "WontFix", "Done", "Available", "Assigned",
	}

	issueStatus := []*IssueStatus{}
	for _, issueName := range allIssueStatusLabels {
		q := url.Values{}
		q.Set("fields", "totalResults")
		q.Set("status", issueName)
		issueStatus = append(issueStatus, &IssueStatus{
			Name:   issueName,
			Metric: metrics.NewRegisteredGauge(strings.ToLower(issueName), issueRegistry),
			URL:    issues.MONORAIL_BASE_URL + "?" + q.Encode(),
		})
	}

	liveness := imetrics.NewLiveness("issue-tracker")
	for _ = range time.Tick(ISSUE_TRACKER_PERIOD) {
		for _, issue := range issueStatus {
			resp, err := c.Get(issue.URL)
			if err != nil {
				glog.Errorf("Failed to retrieve response from %s: %s", issue.URL, err)
				continue
			}
			jsonResp := map[string]int64{}
			dec := json.NewDecoder(resp.Body)
			if err := dec.Decode(&jsonResp); err != nil {
				glog.Warningf("Failed to decode JSON response: %s", err)
				util.Close(resp.Body)
				continue
			}
			issue.Metric.Update(jsonResp["totalResults"])
			glog.Infof("Num Issues: %s - %d", issue.Name, jsonResp["totalResults"])
			if err == nil && resp.Body != nil {
				util.Close(resp.Body)
			}
		}
		liveness.Update()
	}
}
// download starts a go routine that waits for files to download from Google Storage
// and downloads them to downloadPath.  When it is done (on error or when the channel
// is closed), it signals to the WaitGroup that it is done.
// It also logs the progress on downloading the fuzzes
func (v *VersionUpdater) download(toDownload <-chan string, downloadPath string, wg *sync.WaitGroup) {
	defer wg.Done()
	for file := range toDownload {
		contents, err := gs.FileContentsFromGS(v.storageClient, config.GS.Bucket, file)
		if err != nil {
			glog.Warningf("Problem downloading fuzz %s, continuing anyway: %s", file, err)
		}
		hash := file[strings.LastIndex(file, "/")+1:]
		onDisk := filepath.Join(downloadPath, hash)
		if err = ioutil.WriteFile(onDisk, contents, 0644); err != nil && !os.IsExist(err) {
			glog.Warningf("Problem writing fuzz to %s, continuing anyway: %s", onDisk, err)
		}
		atomic.AddInt32(&completedCounter, 1)
		if completedCounter%100 == 0 {
			glog.Infof("%d fuzzes downloaded", completedCounter)
		}
	}
}
Exemple #13
0
// kernelJSONHandler returns the data needed for displaying Kernel Density Estimates.
//
// The return format is JSON of the form:
//
// {
//   "commit1": {
//     "key1": 1.234,
//     "key2": 1.235,
//     "key3": 1.230,
//     "key4": 1.239,
//     ...
//   },
//   "commit2": {
//     "key1": 1.434,
//     "key2": 1.834,
//     "key3": 1.234,
//     "key4": 1.134,
//     ...
//   },
//   missing: 5, // Number of missing values.
// }
//
// Takes the following query parameters:
//
//   commit1 - The hash for the first commit.
//   commit2 - The hash for the second commit.
//   query   - A paramset in URI query format used to filter the results at each commit.
//
func kernelJSONHandler(w http.ResponseWriter, r *http.Request) {
	// TODO(jcgregorio) Determine the tile(s) to load based on the commit hashes,
	// possibly loading two different tiles, one for each hash.
	tile := masterTileBuilder.GetTile()
	commit1 := r.FormValue("commit1")
	commit2 := r.FormValue("commit2")

	// Calulate the indices where the commit falls in the tile.
	commit1Index := -1
	commit2Index := -1

	// Confirm that the two commits appear in the tile.
	for i, c := range tile.Commits {
		if c.Hash == commit1 {
			commit1Index = i
		}
		if c.Hash == commit2 {
			commit2Index = i
		}
	}
	if commit1Index == -1 || commit2Index == -1 {
		glog.Warningf("Commits %s[%d] %s[%d]", commit1, commit1Index, commit2, commit2Index)
		util.ReportError(w, r, fmt.Errorf("Failed to find commits in tile."), fmt.Sprintf("Failed to find commits in tile."))
		return
	}
	w.Header().Set("Content-Type", "application/json")
	query := r.FormValue("query")
	q, err := url.ParseQuery(query)
	if err != nil {
		util.ReportError(w, r, err, fmt.Sprintf("Failed to parse query parameters."))
		return
	}
	ret := struct {
		Commit1 map[string]float64 `json:"commit1"`
		Commit2 map[string]float64 `json:"commit2"`
		Missing int32              `json:"missing"`
	}{
		Commit1: map[string]float64{},
		Commit2: map[string]float64{},
	}
	for key, tr := range tile.Traces {
		if tiling.Matches(tr, q) {
			if tr.IsMissing(commit1Index) || tr.IsMissing(commit2Index) {
				ret.Missing += 1
				continue
			}
			ret.Commit1[key] = tr.(*types.PerfTrace).Values[commit1Index]
			ret.Commit2[key] = tr.(*types.PerfTrace).Values[commit2Index]
		}
	}
	enc := json.NewEncoder(w)
	if err := enc.Encode(ret); err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
		return
	}
}
Exemple #14
0
// getUningestedBuilds returns a map whose keys are master names and values are
// sub-maps whose keys are builder names and values are slices of ints
// representing the numbers of builds which have not yet been ingested.
func getUningestedBuilds(db DB, m string) (map[string][]int, error) {
	defer metrics.NewTimer("buildbot.getUningestedBuilds").Stop()
	// Get the latest and last-processed builds for all builders.
	latest, err := getLatestBuilds(m)
	if err != nil {
		return nil, fmt.Errorf("Failed to get latest builds: %s", err)
	}
	lastProcessed, err := db.GetLastProcessedBuilds(m)
	if err != nil {
		return nil, fmt.Errorf("Failed to get last-processed builds: %s", err)
	}
	// Find the range of uningested builds for each builder.
	type numRange struct {
		Start int // The last-ingested build number.
		End   int // The latest build number.
	}
	ranges := map[string]*numRange{}
	for _, id := range lastProcessed {
		b, err := db.GetBuild(id)
		if err != nil {
			return nil, err
		}
		ranges[b.Builder] = &numRange{
			Start: b.Number,
			End:   b.Number,
		}
	}
	for b, n := range latest {
		if _, ok := ranges[b]; !ok {
			ranges[b] = &numRange{
				Start: -1,
				End:   n,
			}
		} else {
			ranges[b].End = n
		}
	}
	// Create a slice of build numbers for the uningested builds.
	unprocessed := map[string][]int{}
	for b, r := range ranges {
		if r.End < r.Start {
			glog.Warningf("Cannot create slice of builds to ingest for %q; invalid range (%d, %d)", b, r.Start, r.End)
			continue
		}
		builds := make([]int, r.End-r.Start)
		for i := r.Start + 1; i <= r.End; i++ {
			builds[i-r.Start-1] = i
		}
		if len(builds) > 0 {
			unprocessed[b] = builds
		}
	}
	return unprocessed, nil
}
Exemple #15
0
// ReadLinesFromPipe reads from the given pipe and logs its output.
func ReadLinesFromPipe(pipe io.Reader, lines chan string) {
	buf := []byte{}

	// writeLine recovers from a panic when writing to the channel.
	writeLine := func(s string) {
		defer func() {
			if r := recover(); r != nil {
				glog.Warningf("Panic writing to channel... are we exiting?")
			}
		}()
		lines <- s
	}

	// readLines reads output lines from the buffer and pushes them into the channel.
	readlines := func() {
		readIdx := 0
		// Read lines from buf.
		for i, b := range buf {
			if b == '\n' {
				s := string(buf[readIdx:i])
				writeLine(s)
				readIdx = i + 1
			}
		}
		// Remove the lines we read.
		buf = buf[readIdx:]
	}

	// Loop forever, reading bytes from the pipe.
	readbuf := make([]byte, 4096)
	for {
		read, err := pipe.Read(readbuf)
		if read > 0 {
			buf = append(buf, readbuf[:read]...)

			// Read lines from the buffers.
			readlines()
		}
		if err != nil {
			if err == io.EOF {
				break
			} else {
				glog.Error(err)
			}
		} else if read == 0 {
			time.Sleep(20 * time.Millisecond)
		}
	}
	// Read any remaining lines from the buffers.
	readlines()
	// "Flush" any incomplete lines from the buffers.
	writeLine(string(buf))
}
// StopBinaryGenerator terminates all afl-fuzz processes that were spawned,
// logging any errors.
func StopBinaryGenerator() {
	glog.Infof("Trying to stop %d fuzz processes", len(fuzzProcesses))
	for _, p := range fuzzProcesses {
		if p != nil {
			if err := p.Kill(); err != nil {
				glog.Warningf("Error while trying to kill afl process: %s", err)
			} else {
				glog.Info("Quietly shutdown fuzz process.")
			}
		}
	}
	fuzzProcesses = nil
}
Exemple #17
0
// getBuildFromMaster retrieves the given build from the build master's JSON
// interface as specified by the master, builder, and build number.
func getBuildFromMaster(master, builder string, buildNumber int, repos *gitinfo.RepoMap) (*Build, error) {
	var build Build
	url := fmt.Sprintf("%s%s/json/builders/%s/builds/%d", BUILDBOT_URL, master, builder, buildNumber)
	err := get(url, &build)
	if err != nil {
		return nil, fmt.Errorf("Failed to retrieve build #%d for %s: %s", buildNumber, builder, err)
	}
	build.Branch = build.branch()
	build.GotRevision = build.gotRevision()
	build.Master = master
	build.Builder = builder
	slaveProp := build.GetProperty("slavename").([]interface{})
	if slaveProp != nil && len(slaveProp) == 3 {
		build.BuildSlave = slaveProp[1].(string)
	}
	build.Started = build.Times[0]
	build.Finished = build.Times[1]
	propBytes, err := json.Marshal(&build.Properties)
	if err != nil {
		return nil, fmt.Errorf("Unable to convert build properties to JSON: %s", err)
	}
	build.PropertiesStr = string(propBytes)
	build.Repository = build.repository()
	if build.Repository == "" {
		// Attempt to determine the repository.
		glog.Infof("No repository set for %s #%d; attempting to find it.", build.Builder, build.Number)
		r, err := repos.RepoForCommit(build.GotRevision)
		if err == nil {
			glog.Infof("Found %s for %s", r, build.GotRevision)
			build.Repository = r
		} else {
			glog.Warningf("Encountered error determining repo for %s: %s", build.GotRevision, err)
		}
	}

	// Fixup each step.
	for _, s := range build.Steps {
		if len(s.ResultsRaw) > 0 {
			if s.ResultsRaw[0] == nil {
				s.ResultsRaw[0] = 0.0
			}
			s.Results = int(s.ResultsRaw[0].(float64))
		} else {
			s.Results = 0
		}
		s.Started = s.Times[0]
		s.Finished = s.Times[1]
	}

	return &build, nil
}
Exemple #18
0
func cleanupAppLogs(dir string, appLogLevelToSpace map[string]int64, filesToState map[string]fileState) bool {
	deletedFiles := false
	for appLogLevel := range appLogLevelToSpace {
		if appLogLevelToSpace[appLogLevel] > *appLogThreshold {
			glog.Infof("App %s is above the threshold. Usage: %d. Threshold: %d", appLogLevel, appLogLevelToSpace[appLogLevel], *appLogThreshold)
			tokens := strings.Split(appLogLevel, ".")
			app := tokens[0]
			logLevel := tokens[1]
			logGlob := filepath.Join(dir, app+".*"+logLevel+".*")
			matches, err := filepath.Glob(logGlob)
			if err != nil {
				glog.Fatalf("Could not glob for %s: %s", logGlob, err)
			}
			fileInfos := make([]os.FileInfo, len(matches))
			for i, match := range matches {
				fileInfo, err := os.Stat(match)
				if err != nil {
					glog.Fatalf("Could not stat %s: %s", match, err)
				}
				fileInfos[i] = fileInfo
			}

			// Sort by Modified time and keep deleting till we are at
			// (threshold - buffer) space left.
			sort.Sort(FileInfoModifiedSlice{fileInfos: fileInfos, reverseSort: false})
			index := 0
			for appLogLevelToSpace[appLogLevel] > *appLogThreshold-*appLogThresholdBuffer {
				if index+1 == len(fileInfos) {
					glog.Warningf("App %s is above the threshold and has only one file remaining: %s. Not deleting it.", appLogLevel, fileInfos[index].Name())
					break
				}
				fileName := fileInfos[index].Name()
				appLogLevelToSpace[appLogLevel] -= fileInfos[index].Size()
				if err = os.Remove(filepath.Join(dir, fileName)); err != nil {
					glog.Fatalf("Could not delete %s: %s", fileName, err)
				}
				// Remove the entry from the filesToState map.
				delete(filesToState, filepath.Join(dir, fileName))
				deletedFiles = true
				glog.Infof("Deleted %s", fileName)
				index++
			}
			// Just incase we delete a massive log file.
			if appLogLevelToSpace[appLogLevel] < 0 {
				appLogLevelToSpace[appLogLevel] = 0
			}
		}
	}
	return deletedFiles
}
Exemple #19
0
// Returns the response body of the specified GS object. Tries MAX_URI_GET_TRIES
// times if download is unsuccessful. Client must close the response body when
// finished with it.
func getRespBody(res *storage.Object, client *http.Client) (io.ReadCloser, error) {
	for i := 0; i < MAX_URI_GET_TRIES; i++ {
		glog.Infof("Fetching: %s", res.Name)
		request, err := gs.RequestForStorageURL(res.MediaLink)
		if err != nil {
			glog.Warningf("Unable to create Storage MediaURI request: %s\n", err)
			continue
		}

		resp, err := client.Do(request)
		if err != nil {
			glog.Warningf("Unable to retrieve Storage MediaURI: %s", err)
			continue
		}
		if resp.StatusCode != 200 {
			glog.Warningf("Failed to retrieve: %d  %s", resp.StatusCode, resp.Status)
			util.Close(resp.Body)
			continue
		}
		return resp.Body, nil
	}
	return nil, fmt.Errorf("Failed fetching file after %d attempts", MAX_URI_GET_TRIES)
}
// Get returns the build with the given ID.
func (c *BuildCache) Get(id buildbot.BuildID) (*buildbot.Build, error) {
	c.mutex.RLock()
	defer c.mutex.RUnlock()
	b, ok := c.byId[string(id)]
	if ok {
		return b, nil
	}
	glog.Warningf("Missing build with id %v; loading now.", id)
	build, err := c.db.GetBuild(id)
	if err != nil {
		return nil, err
	}
	return build, nil
}
Exemple #21
0
// newCachingTokenSource creates a new instance of CachingTokenSource that
// caches the token in cacheFilePath. ctx and config are used to create and
// retrieve the token in the first place.
// If no token is available it will run though the oauth flow for an
// installed app.
func newCachingTokenSource(cacheFilePath string, ctx context.Context, config *oauth2.Config) (oauth2.TokenSource, error) {
	var tok *oauth2.Token = nil
	var err error

	if cacheFilePath == "" {
		glog.Warningf("cacheFilePath is empty. Not caching auth token.")
	} else if _, err = os.Stat(cacheFilePath); err == nil {
		// If the file exists. Load from disk.
		f, err := os.Open(cacheFilePath)
		if err != nil {
			return nil, err
		}
		tok = &oauth2.Token{}
		if err = json.NewDecoder(f).Decode(tok); err != nil {
			return nil, err
		}
	} else if !os.IsNotExist(err) {
		return nil, err
	}

	// If there was no token, we run through the flow.
	if tok == nil {
		// Run through the flow.
		url := config.AuthCodeURL("state", oauth2.AccessTypeOffline)
		fmt.Printf("Your browser has been opened to visit:\n\n%s\n\nEnter the verification code:", url)

		var code string
		if _, err = fmt.Scan(&code); err != nil {
			return nil, err
		}
		tok, err = config.Exchange(ctx, code)
		if err != nil {
			return nil, err
		}

		if err = saveToken(cacheFilePath, tok); err != nil {
			return nil, err
		}
		glog.Infof("token: %v", tok)
	}

	// We have a token at this point.
	tokenSource := config.TokenSource(ctx, tok)
	return &cachingTokenSource{
		cacheFilePath: cacheFilePath,
		tokenSource:   tokenSource,
		lastToken:     tok,
	}, nil
}
Exemple #22
0
// RoundTrip implements the RoundTripper interface.
func (t *BackOffTransport) RoundTrip(req *http.Request) (*http.Response, error) {
	// Initialize the exponential backoff client.
	backOffClient := &backoff.ExponentialBackOff{
		InitialInterval:     t.backOffConfig.initialInterval,
		RandomizationFactor: t.backOffConfig.randomizationFactor,
		Multiplier:          t.backOffConfig.backOffMultiplier,
		MaxInterval:         t.backOffConfig.maxInterval,
		MaxElapsedTime:      t.backOffConfig.maxElapsedTime,
		Clock:               backoff.SystemClock,
	}
	// Make a copy of the request's Body so that we can reuse it if the request
	// needs to be backed off and retried.
	bodyBuf := bytes.Buffer{}
	if req.Body != nil {
		if _, err := bodyBuf.ReadFrom(req.Body); err != nil {
			return nil, fmt.Errorf("Failed to read request body: %v", err)
		}
	}

	var resp *http.Response
	var err error
	roundTripOp := func() error {
		if req.Body != nil {
			req.Body = ioutil.NopCloser(bytes.NewBufferString(bodyBuf.String()))
		}
		resp, err = t.Transport.RoundTrip(req)
		if err != nil {
			return fmt.Errorf("Error while making the round trip: %s", err)
		}
		if resp != nil {
			if resp.StatusCode >= 500 && resp.StatusCode <= 599 {
				return fmt.Errorf("Got server error statuscode %d while making the HTTP %s request to %s", resp.StatusCode, req.Method, req.URL)
			} else if resp.StatusCode < 200 || resp.StatusCode > 299 {
				// Stop backing off if there are non server errors.
				backOffClient.MaxElapsedTime = backoff.Stop
				return fmt.Errorf("Got non server error statuscode %d while making the HTTP %s request to %s", resp.StatusCode, req.Method, req.URL)
			}
		}
		return nil
	}
	notifyFunc := func(err error, wait time.Duration) {
		glog.Warningf("Got error: %s. Retrying HTTP request after sleeping for %s", err, wait)
	}

	if err := backoff.RetryNotify(roundTripOp, backOffClient, notifyFunc); err != nil {
		return nil, fmt.Errorf("HTTP request failed inspite of exponential backoff: %s", err)
	}
	return resp, nil
}
Exemple #23
0
// putBuild inserts the build into the database, replacing any previous version.
func putBuild(tx *bolt.Tx, b *Build) error {
	id := b.Id()
	if tx.Bucket(BUCKET_BUILDS).Get(id) == nil {
		// Measure the time between build start and first DB insertion.
		latency := time.Now().Sub(b.Started)
		if latency > INGEST_LATENCY_ALERT_THRESHOLD {
			// This is probably going to trigger an alert. Log the build for debugging.
			glog.Warningf("Build start to ingestion latency is greater than %s (%s): %s %s #%d", INGEST_LATENCY_ALERT_THRESHOLD, latency, b.Master, b.Builder, b.Number)
		}
		metrics.GetOrRegisterSlidingWindow("buildbot.startToIngestLatency", metrics.DEFAULT_WINDOW).Update(int64(latency))
	} else {
		if err := deleteBuild(tx, id); err != nil {
			return err
		}
	}
	return insertBuild(tx, b)
}
// Get returns the build with the given ID.
func (c *BuildCache) Get(id int) (*buildbot.Build, error) {
	c.mutex.RLock()
	defer c.mutex.RUnlock()
	b, ok := c.byId[id]
	if ok {
		return b, nil
	}
	glog.Warningf("Missing build with id %d; loading now.", id)
	builds, err := buildbot.GetBuildsFromDB([]int{id})
	if err != nil {
		return nil, err
	}
	if b, ok := builds[id]; ok {
		return b, nil
	}
	return nil, fmt.Errorf("No such build: %d", id)
}
// GetBuildsForCommits returns the build data for the given commits.
func (c *BuildCache) GetBuildsForCommits(commits []string) (map[string]map[string]*buildbot.BuildSummary, map[string][]*buildbot.BuilderComment, error) {
	c.mutex.RLock()
	defer c.mutex.RUnlock()
	builders := map[string][]*buildbot.BuilderComment{}
	for k, v := range c.builders {
		builders[k] = v
	}
	byCommit := map[string]map[string]*buildbot.BuildSummary{}
	missing := []string{}
	for _, hash := range commits {
		builds, ok := c.byCommit[hash]
		if ok {
			byCommit[hash] = builds
		} else {
			missing = append(missing, hash)
		}
	}
	missingBuilders := map[string]bool{}
	if len(missing) > 0 {
		glog.Warningf("Missing build data for some commits; loading now (%v)", missing)
		_, missingByCommit, builders, err := LoadData(missing)
		if err != nil {
			return nil, nil, fmt.Errorf("Failed to load missing builds: %v", err)
		}
		for hash, byBuilder := range missingByCommit {
			byCommit[hash] = byBuilder
			for builder, _ := range byBuilder {
				if _, ok := builders[builder]; !ok {
					missingBuilders[builder] = true
				}
			}
		}
	}
	missingBuilderList := make([]string, 0, len(missingBuilders))
	for b, _ := range missingBuilders {
		missingBuilderList = append(missingBuilderList, b)
	}
	missingComments, err := buildbot.GetBuildersComments(missingBuilderList)
	if err != nil {
		return nil, nil, fmt.Errorf("Failed to load missing builder comments: %v", err)
	}
	for b, s := range missingComments {
		builders[b] = s
	}
	return byCommit, builders, nil
}
// refreshLastTiles reloads the last (-1) tile.
func (store *FileTileStore) refreshLastTiles() {
	// Read tile -1.
	tile, err := store.getLastTile(0)
	if err != nil {
		glog.Warningf("Unable to retrieve last tile for scale %d: %s", 0, err)
		return
	}
	store.lock.Lock()
	defer store.lock.Unlock()

	entry := &CacheEntry{
		tile:         tile,
		lastModified: time.Now(),
	}
	key := CacheKey{
		startIndex: -1,
		scale:      0,
	}
	store.cache.Add(key, entry)
}
Exemple #27
0
// getBuildFromMaster retrieves the given build from the build master's JSON
// interface as specified by the master, builder, and build number.
func getBuildFromMaster(master, builder string, buildNumber int, repos *gitinfo.RepoMap) (*Build, error) {
	var build Build
	url := fmt.Sprintf("%s%s/json/builders/%s/builds/%d", BUILDBOT_URL, master, builder, buildNumber)
	err := get(url, &build)
	if err != nil {
		return nil, fmt.Errorf("Failed to retrieve build #%d for %s: %s", buildNumber, builder, err)
	}
	build.fixup()
	if build.Repository == "" {
		// Attempt to determine the repository.
		glog.Infof("No repository set for %s #%d; attempting to find it.", build.Builder, build.Number)
		r, err := repos.RepoForCommit(build.GotRevision)
		if err == nil {
			glog.Infof("Found %s for %s", r, build.GotRevision)
			build.Repository = r
		} else {
			glog.Warningf("Encountered error determining repo for %s: %s", build.GotRevision, err)
		}
	}

	return &build, nil
}
Exemple #28
0
// SyncDir runs "git pull" and "gclient sync" on the specified directory.
func SyncDir(dir string) error {
	err := os.Chdir(dir)
	if err != nil {
		return fmt.Errorf("Could not chdir to %s: %s", dir, err)
	}

	for i := 0; i < MAX_SYNC_TRIES; i++ {
		if i > 0 {
			glog.Warningf("%d. retry for syncing %s", i, dir)
		}

		err = syncDirStep()
		if err == nil {
			break
		}
		glog.Errorf("Error syncing %s", dir)
	}

	if err != nil {
		glog.Errorf("Failed to sync %s after %d attempts", dir, MAX_SYNC_TRIES)
	}
	return err
}
// This method looks for and returns DiffMetrics of the specified digests from the
// local diffmetrics dir. It is thread safe because it locks the diff store's
// mutex before accessing the digest cache.
func (fs *FileDiffStore) getDiffMetricsFromFileCache(baseName string) (*diff.DiffMetrics, error) {
	diffMetricsFilePath := fs.getDiffMetricPath(baseName)

	// Lock the mutex before reading from the local diff directory.
	fs.diffDirLock.Lock()
	defer fs.diffDirLock.Unlock()
	if _, err := os.Stat(diffMetricsFilePath); err != nil {
		if os.IsNotExist(err) {
			// File does not exist.
			return nil, nil
		} else {
			// There was some other error.
			glog.Warningf("Some other error: %s: %s", baseName, err)
			return nil, err
		}
	}

	diffMetrics, err := openDiffMetrics(diffMetricsFilePath)
	if err != nil {
		glog.Warning("Some error opening: %s: %s", baseName, err)
		return nil, err
	}
	return diffMetrics, nil
}
// Downloads image file from Google Storage and caches it in a local directory. It
// is thread safe because it locks the diff store's mutext before accessing the
// digest cache. If the provided digest does not exist in Google Storage then
// downloadFailureCount is incremented.
//
func (fs *FileDiffStore) cacheImageFromGS(d string) error {
	storage, err := storage.New(fs.client)
	if err != nil {
		return fmt.Errorf("Failed to create interface to Google Storage: %s\n", err)
	}

	objLocation := filepath.Join(fs.storageBaseDir, fmt.Sprintf("%s.%s", d, IMG_EXTENSION))
	res, err := storage.Objects.Get(fs.gsBucketName, objLocation).Do()
	if err != nil {
		downloadFailureCount.Inc(1)
		return err
	}

	for i := 0; i < MAX_URI_GET_TRIES; i++ {
		if i > 0 {
			glog.Warningf("%d. retry for digest %s", i, d)
		}

		err = func() error {
			respBody, err := fs.getRespBody(res)
			if err != nil {
				return err
			}
			defer util.Close(respBody)

			// TODO(stephana): Creating and renaming temporary files this way
			// should be made into a generic utility function.
			// See also FileTileStore for a similar implementation.
			// Create a temporary file.
			tempOut, err := ioutil.TempFile(fs.localTempFileDir, fmt.Sprintf("tempfile-%s", d))
			if err != nil {
				return fmt.Errorf("Unable to create temp file: %s", err)
			}

			md5Hash := md5.New()
			multiOut := io.MultiWriter(md5Hash, tempOut)

			if _, err = io.Copy(multiOut, respBody); err != nil {
				return err
			}
			err = tempOut.Close()
			if err != nil {
				return fmt.Errorf("Error closing temp file: %s", err)
			}

			// Check the MD5.
			objMD5, err := base64.StdEncoding.DecodeString(res.Md5Hash)
			if err != nil {
				return fmt.Errorf("Unable to decode MD5 hash from %s", d)
			}

			if !bytes.Equal(md5Hash.Sum(nil), objMD5) {
				return fmt.Errorf("MD5 hash for digest %s incorrect.", d)
			}

			// Rename the file after we acquired a lock
			outputBaseName := fs.getImageBaseName(d)
			outputFile, err := fs.createRadixPath(fs.localImgDir, outputBaseName)
			if err != nil {
				return fmt.Errorf("Error creating output file: %s", err)
			}

			fs.digestDirLock.Lock()
			defer fs.digestDirLock.Unlock()
			if err := os.Rename(tempOut.Name(), outputFile); err != nil {
				return fmt.Errorf("Unable to move file: %s", err)
			}

			downloadSuccessCount.Inc(1)
			return nil
		}()

		if err == nil {
			break
		}
		glog.Errorf("Error fetching file for digest %s: %s", d, err)
	}

	if err != nil {
		glog.Errorf("Failed fetching file after %d attempts", MAX_URI_GET_TRIES)
		downloadFailureCount.Inc(1)
	}
	return err
}