示例#1
0
// download fetches the content of the given location in GS and stores it at the given
// output path.
func (p *pdfProcessor) download(bucket, dir, fileName, outputPath string) error {
	objectPath := dir + "/" + fileName
	r, err := p.storageClient.Bucket(bucket).Object(objectPath).NewReader(context.Background())
	if err != nil {
		return err
	}
	defer util.Close(r)

	tempFile, err := ioutil.TempFile(p.pdfCacheDir, "pdfingestion-download")
	if err != nil {
		return err
	}

	_, err = io.Copy(tempFile, r)
	util.Close(tempFile)
	if err != nil {
		util.Remove(tempFile.Name())
		return err
	}

	if err := os.Rename(tempFile.Name(), outputPath); err != nil {
		return err
	}

	glog.Infof("Downloaded: %s/%s", bucket, objectPath)
	return nil
}
示例#2
0
// monitorIssueTracker reads the counts for all the types of issues in the skia
// issue tracker (code.google.com/p/skia) and stuffs the counts into Graphite.
func monitorIssueTracker() {
	c := &http.Client{
		Transport: &http.Transport{
			Dial: dialTimeout,
		},
	}

	if *useMetadata {
		*apikey = metadata.Must(metadata.ProjectGet(metadata.APIKEY))
	}

	// Create a new metrics registry for the issue tracker metrics.
	addr, err := net.ResolveTCPAddr("tcp", *graphiteServer)
	if err != nil {
		glog.Fatalln("Failed to resolve the Graphite server: ", err)
	}
	issueRegistry := metrics.NewRegistry()
	go graphite.Graphite(issueRegistry, common.SAMPLE_PERIOD, "issues", addr)

	// IssueStatus has all the info we need to capture and record a single issue status. I.e. capture
	// the count of all issues with a status of "New".
	type IssueStatus struct {
		Name   string
		Metric metrics.Gauge
		URL    string
	}

	allIssueStatusLabels := []string{
		"New", "Accepted", "Unconfirmed", "Started", "Fixed", "Verified", "Invalid", "WontFix", "Done", "Available", "Assigned",
	}

	issueStatus := []*IssueStatus{}
	for _, issueName := range allIssueStatusLabels {
		issueStatus = append(issueStatus, &IssueStatus{
			Name:   issueName,
			Metric: metrics.NewRegisteredGauge(strings.ToLower(issueName), issueRegistry),
			URL:    "https://www.googleapis.com/projecthosting/v2/projects/skia/issues?fields=totalResults&key=" + *apikey + "&status=" + issueName,
		})
	}

	liveness := imetrics.NewLiveness("issue-tracker")
	for _ = range time.Tick(ISSUE_TRACKER_PERIOD) {
		for _, issue := range issueStatus {
			resp, err := c.Get(issue.URL)
			jsonResp := map[string]int64{}
			dec := json.NewDecoder(resp.Body)
			if err := dec.Decode(&jsonResp); err != nil {
				glog.Warningf("Failed to decode JSON response: %s", err)
				util.Close(resp.Body)
				continue
			}
			issue.Metric.Update(jsonResp["totalResults"])
			glog.Infof("Num Issues: %s - %d", issue.Name, jsonResp["totalResults"])
			if err == nil && resp.Body != nil {
				util.Close(resp.Body)
			}
		}
		liveness.Update()
	}
}
示例#3
0
// monitorIssueTracker reads the counts for all the types of issues in the Skia
// issue tracker (bugs.chromium.org/p/skia) and stuffs the counts into Graphite.
func monitorIssueTracker(c *http.Client) {
	// Create a new metrics registry for the issue tracker metrics.
	addr, err := net.ResolveTCPAddr("tcp", *graphiteServer)
	if err != nil {
		glog.Fatalln("Failed to resolve the Graphite server: ", err)
	}
	issueRegistry := metrics.NewRegistry()
	go graphite.Graphite(issueRegistry, common.SAMPLE_PERIOD, "issues", addr)

	// IssueStatus has all the info we need to capture and record a single issue status. I.e. capture
	// the count of all issues with a status of "New".
	type IssueStatus struct {
		Name   string
		Metric metrics.Gauge
		URL    string
	}

	allIssueStatusLabels := []string{
		"New", "Accepted", "Unconfirmed", "Started", "Fixed", "Verified", "Invalid", "WontFix", "Done", "Available", "Assigned",
	}

	issueStatus := []*IssueStatus{}
	for _, issueName := range allIssueStatusLabels {
		q := url.Values{}
		q.Set("fields", "totalResults")
		q.Set("status", issueName)
		issueStatus = append(issueStatus, &IssueStatus{
			Name:   issueName,
			Metric: metrics.NewRegisteredGauge(strings.ToLower(issueName), issueRegistry),
			URL:    issues.MONORAIL_BASE_URL + "?" + q.Encode(),
		})
	}

	liveness := imetrics.NewLiveness("issue-tracker")
	for _ = range time.Tick(ISSUE_TRACKER_PERIOD) {
		for _, issue := range issueStatus {
			resp, err := c.Get(issue.URL)
			if err != nil {
				glog.Errorf("Failed to retrieve response from %s: %s", issue.URL, err)
				continue
			}
			jsonResp := map[string]int64{}
			dec := json.NewDecoder(resp.Body)
			if err := dec.Decode(&jsonResp); err != nil {
				glog.Warningf("Failed to decode JSON response: %s", err)
				util.Close(resp.Body)
				continue
			}
			issue.Metric.Update(jsonResp["totalResults"])
			glog.Infof("Num Issues: %s - %d", issue.Name, jsonResp["totalResults"])
			if err == nil && resp.Body != nil {
				util.Close(resp.Body)
			}
		}
		liveness.Update()
	}
}
示例#4
0
// This does the following:
//   `pdftoppm -r 72 -f 1 -l 1 < $PDF 2>/dev/null | pnmtopng 2> /dev/null > $PNG`
func (Poppler) Rasterize(pdfInputPath, pngOutputPath string) error {
	if !(Poppler{}).Enabled() {
		return fmt.Errorf("pdftoppm or pnmtopng is missing")
	}

	pdftoppm := exec.Command(pdftoppmExecutable, "-r", "72", "-f", "1", "-l", "1")
	pnmtopng := exec.Command(pnmtopngExecutable)

	defer processKiller(pdftoppm)
	defer processKiller(pnmtopng)

	pr, pw, err := os.Pipe()
	if err != nil {
		return err
	}
	defer fileCloser(pw)
	defer fileCloser(pr)
	pdftoppm.Stdout = pw
	pnmtopng.Stdin = pr

	iFile, err := os.Open(pdfInputPath)
	if err != nil {
		return err
	}
	defer util.Close(iFile)
	pdftoppm.Stdin = iFile

	oFile, err := os.Create(pngOutputPath)
	if err != nil {
		return err
	}
	defer util.Close(oFile)
	pnmtopng.Stdout = oFile

	if err := pdftoppm.Start(); err != nil {
		return err
	}
	if err := pnmtopng.Start(); err != nil {
		return err
	}

	go func() {
		time.Sleep(5 * time.Second)
		_ = pdftoppm.Process.Kill()
	}()
	if err := pdftoppm.Wait(); err != nil {
		return err
	}
	if err := pw.Close(); err != nil {
		return err
	}
	if err := pnmtopng.Wait(); err != nil {
		return err
	}
	return nil
}
// findBadBinaryPaths looks through all the afl-fuzz directories contained in the passed in path and
// returns the path to all files that are in a crash* folder that are not already in
// 'alreadyFoundBinaries'
// It also sends them to the forAnalysis channel when it finds them.
// The output from afl-fuzz looks like:
// $AFL_ROOT/
//		-fuzzer0/
//			-crashes/  <-- bad binary fuzzes end up here
//			-hangs/
//			-queue/
//			-fuzzer_stats
//		-fuzzer1/
//		...
func findBadBinaryPaths(alreadyFoundBinaries *SortedStringSlice) ([]string, error) {
	badBinaryPaths := make([]string, 0)

	aflDir, err := os.Open(config.Generator.AflOutputPath)
	if err != nil {
		return nil, err
	}
	defer util.Close(aflDir)

	fuzzerFolders, err := aflDir.Readdir(-1)
	if err != nil {
		return nil, err
	}

	for _, fuzzerFolderInfo := range fuzzerFolders {
		// fuzzerFolderName an os.FileInfo like fuzzer0, fuzzer1
		path := filepath.Join(config.Generator.AflOutputPath, fuzzerFolderInfo.Name())
		fuzzerDir, err := os.Open(path)
		if err != nil {
			return nil, err
		}
		defer util.Close(fuzzerDir)

		fuzzerContents, err := fuzzerDir.Readdir(-1)
		if err != nil {
			return nil, err
		}
		for _, info := range fuzzerContents {
			// Look through fuzzerN/crashes
			if info.IsDir() && strings.HasPrefix(info.Name(), "crashes") {
				crashPath := filepath.Join(path, info.Name())
				crashDir, err := os.Open(crashPath)
				if err != nil {
					return nil, err
				}
				defer util.Close(crashDir)

				crashContents, err := crashDir.Readdir(-1)
				if err != nil {
					return nil, err
				}
				for _, crash := range crashContents {
					// Make sure the files are actually crashable files we haven't found before
					if crash.Name() != "README.txt" {
						if fuzzPath := filepath.Join(crashPath, crash.Name()); !alreadyFoundBinaries.Contains(fuzzPath) {
							badBinaryPaths = append(badBinaryPaths, fuzzPath)
						}
					}
				}
			}
		}
	}
	return badBinaryPaths, nil
}
示例#6
0
// RequestBuild adds a request for the given build.
func (c *Client) RequestBuild(builder, master, commit, repo, author string) (*Build, error) {
	p := buildBucketParameters{
		BuilderName: builder,
		Changes: []*buildBucketChange{
			&buildBucketChange{
				Author: &buildBucketAuthor{
					Email: author,
				},
				Repository: repo,
				Revision:   commit,
			},
		},
		Properties: map[string]string{
			"reason": "Triggered by SkiaScheduler",
		},
	}
	jsonParams, err := json.Marshal(p)
	if err != nil {
		return nil, err
	}
	body := buildBucketRequest{
		Bucket:         fmt.Sprintf("master.%s", master),
		ParametersJSON: string(jsonParams),
	}
	jsonBody, err := json.Marshal(body)
	if err != nil {
		return nil, err
	}
	url := apiUrl + "/builds"
	req, err := http.NewRequest("PUT", url, bytes.NewReader(jsonBody))
	if err != nil {
		return nil, err
	}
	req.Header.Set("Content-Type", "application/json; charset=utf-8")
	resp, err := c.Do(req)
	if err != nil {
		return nil, err
	}
	if resp.StatusCode != http.StatusOK {
		defer util.Close(resp.Body)
		b, err := ioutil.ReadAll(resp.Body)
		if err != nil {
			return nil, fmt.Errorf("Failed to schedule build (code %s); couldn't read response body: %v", resp.Status, err)
		}
		return nil, fmt.Errorf("Response code is %s. Response body:\n%s", resp.Status, string(b))
	}
	defer util.Close(resp.Body)
	var res buildBucketResponse
	if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
		return nil, fmt.Errorf("Failed to decode response body: %v", err)
	}
	return res.Build, nil
}
示例#7
0
func mergeUploadCSVFiles(runID string, gs *util.GsUtil) ([]string, error) {
	localOutputDir := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runID)
	skutil.MkdirAll(localOutputDir, 0700)
	noOutputSlaves := []string{}
	// Copy outputs from all slaves locally.
	for i := 0; i < util.NumWorkers(); i++ {
		workerNum := i + 1
		workerLocalOutputPath := filepath.Join(localOutputDir, fmt.Sprintf("slave%d", workerNum)+".csv")
		workerRemoteOutputPath := filepath.Join(util.BenchmarkRunsDir, runID, fmt.Sprintf("slave%d", workerNum), "outputs", runID+".output")
		respBody, err := gs.GetRemoteFileContents(workerRemoteOutputPath)
		if err != nil {
			glog.Errorf("Could not fetch %s: %s", workerRemoteOutputPath, err)
			noOutputSlaves = append(noOutputSlaves, fmt.Sprintf(util.WORKER_NAME_TEMPLATE, workerNum))
			continue
		}
		defer skutil.Close(respBody)
		out, err := os.Create(workerLocalOutputPath)
		if err != nil {
			return noOutputSlaves, fmt.Errorf("Unable to create file %s: %s", workerLocalOutputPath, err)
		}
		defer skutil.Close(out)
		defer skutil.Remove(workerLocalOutputPath)
		if _, err = io.Copy(out, respBody); err != nil {
			return noOutputSlaves, fmt.Errorf("Unable to copy to file %s: %s", workerLocalOutputPath, err)
		}
	}
	// Call csv_merger.py to merge all results into a single results CSV.
	_, currentFile, _, _ := runtime.Caller(0)
	pathToPyFiles := filepath.Join(
		filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))),
		"py")
	pathToCsvMerger := filepath.Join(pathToPyFiles, "csv_merger.py")
	outputFileName := runID + ".output"
	args := []string{
		pathToCsvMerger,
		"--csv_dir=" + localOutputDir,
		"--output_csv_name=" + filepath.Join(localOutputDir, outputFileName),
	}
	err := util.ExecuteCmd("python", args, []string{}, util.CSV_MERGER_TIMEOUT, nil, nil)
	if err != nil {
		return noOutputSlaves, fmt.Errorf("Error running csv_merger.py: %s", err)
	}
	// Copy the output file to Google Storage.
	remoteOutputDir := filepath.Join(util.BenchmarkRunsDir, runID, "consolidated_outputs")
	if err := gs.UploadFile(outputFileName, localOutputDir, remoteOutputDir); err != nil {
		return noOutputSlaves, fmt.Errorf("Unable to upload %s to %s: %s", outputFileName, remoteOutputDir, err)
	}
	return noOutputSlaves, nil
}
示例#8
0
func mergeUploadCSVFiles(runID string, gs *util.GsUtil) error {
	localOutputDir := filepath.Join(util.StorageDir, util.BenchmarkRunsDir, runID)
	skutil.MkdirAll(localOutputDir, 0700)
	// Copy outputs from all slaves locally.
	for i := 0; i < util.NUM_WORKERS; i++ {
		workerNum := i + 1
		workerLocalOutputPath := filepath.Join(localOutputDir, fmt.Sprintf("slave%d", workerNum)+".csv")
		workerRemoteOutputPath := filepath.Join(util.BenchmarkRunsDir, runID, fmt.Sprintf("slave%d", workerNum), "outputs", runID+".output")
		respBody, err := gs.GetRemoteFileContents(workerRemoteOutputPath)
		if err != nil {
			glog.Errorf("Could not fetch %s: %s", workerRemoteOutputPath, err)
			// TODO(rmistry): Should we instead return here? We can only return
			// here if all 100 slaves reliably run without any failures which they
			// really should.
			continue
		}
		defer skutil.Close(respBody)
		out, err := os.Create(workerLocalOutputPath)
		if err != nil {
			return fmt.Errorf("Unable to create file %s: %s", workerLocalOutputPath, err)
		}
		defer skutil.Close(out)
		defer skutil.Remove(workerLocalOutputPath)
		if _, err = io.Copy(out, respBody); err != nil {
			return fmt.Errorf("Unable to copy to file %s: %s", workerLocalOutputPath, err)
		}
	}
	// Call csv_merger.py to merge all results into a single results CSV.
	_, currentFile, _, _ := runtime.Caller(0)
	pathToPyFiles := filepath.Join(
		filepath.Dir((filepath.Dir(filepath.Dir(filepath.Dir(currentFile))))),
		"py")
	pathToCsvMerger := filepath.Join(pathToPyFiles, "csv_merger.py")
	outputFileName := runID + ".output"
	args := []string{
		pathToCsvMerger,
		"--csv_dir=" + localOutputDir,
		"--output_csv_name=" + filepath.Join(localOutputDir, outputFileName),
	}
	if err := util.ExecuteCmd("python", args, []string{}, 1*time.Hour, nil, nil); err != nil {
		return fmt.Errorf("Error running csv_merger.py: %s", err)
	}
	// Copy the output file to Google Storage.
	remoteOutputDir := filepath.Join(util.BenchmarkRunsDir, runID, "consolidated_outputs")
	if err := gs.UploadFile(outputFileName, localOutputDir, remoteOutputDir); err != nil {
		return fmt.Errorf("Unable to upload %s to %s: %s", outputFileName, remoteOutputDir, err)
	}
	return nil
}
示例#9
0
// JSON decodes the provided resultsFile and uploads results to chromeperf.appspot.com
func uploadResultsToPerfDashboard(resultsFile string, client *http.Client) error {
	jsonFile, err := os.Open(resultsFile)
	defer skutil.Close(jsonFile)
	if err != nil {
		return fmt.Errorf("Could not open %s: %s", resultsFile, err)
	}
	// Read the JSON and convert to dashboard JSON v1.
	var chartData interface{}
	if err := json.NewDecoder(jsonFile).Decode(&chartData); err != nil {
		return fmt.Errorf("Could not parse %s: %s", resultsFile, err)
	}
	// TODO(rmistry): Populate the below with data that can be monitored.
	versions := map[string]string{
		"chromium": *gitHash,
	}
	supplemental := map[string]string{}
	// Create a custom dictionary and convert it to JSON.
	dashboardFormat := map[string]interface{}{
		"master":       getCamelCaseMasterName(*buildbotMaster),
		"bot":          *buildbotBuilder,
		"chart_data":   chartData,
		"point_id":     time.Now().Unix(),
		"versions":     versions,
		"supplemental": supplemental,
	}
	marshalledData, err := json.Marshal(dashboardFormat)
	if err != nil {
		return fmt.Errorf("Could not create dashboard JSON for %s: %s", resultsFile, err)
	}

	// Post the above to https://chromeperf.appspot.com/add_point with one parameter called data.
	postData := url.Values{}
	postData.Set("data", string(marshalledData))
	req, err := http.NewRequest("POST", "https://chromeperf.appspot.com/add_point", strings.NewReader(postData.Encode()))
	if err != nil {
		return fmt.Errorf("Could not create HTTP request for %s: %s", resultsFile, err)
	}
	resp, err := client.Do(req)
	if err != nil {
		return fmt.Errorf("Could not post to chromeperf for %s: %s", resultsFile, err)
	}
	defer skutil.Close(resp.Body)
	if resp.StatusCode != 200 {
		return fmt.Errorf("Could not post to chromeperf for %s, response status code was %d", resultsFile, resp.StatusCode)
	}
	glog.Infof("Successfully uploaded the following to chromeperf: %s", string(marshalledData))
	return nil
}
示例#10
0
func (fs *FileDiffStore) writeDiffMetricsToFileCache(baseName string, diffMetrics *diff.DiffMetrics) error {
	// Lock the mutex before writing to the local diff directory.
	fs.diffDirLock.Lock()
	defer fs.diffDirLock.Unlock()

	// Make paths relative. This has to be reversed in getDiffMetricsFromFileCache.
	fName, err := fs.createDiffMetricPath(baseName)
	if err != nil {
		return err
	}

	f, err := os.Create(fName)
	if err != nil {
		return fmt.Errorf("Unable to create file %s: %s", fName, err)
	}
	defer util.Close(f)

	d, err := json.MarshalIndent(diffMetrics, "", "    ")
	if err != nil {
		return fmt.Errorf("Failed to encode to JSON: %s", err)
	}
	if _, err := f.Write(d); err != nil {
		return fmt.Errorf("Failed to write to file: %v", err)
	}
	return nil
}
示例#11
0
// showcutHandler handles the POST requests of the shortcut page.
//
// Shortcuts are of the form:
//
//    {
//       "scale": 0,
//       "tiles": [-1],
//       "hash": "a1092123890...",
//       "ids": [
//            "x86:...",
//            "x86:...",
//            "x86:...",
//       ]
//    }
//
// hash - The git hash of where a step was detected. Can be null.
//
func shortcutHandler(w http.ResponseWriter, r *http.Request) {
	// TODO(jcgregorio): Add unit tests.
	match := shortcutHandlerPath.FindStringSubmatch(r.URL.Path)
	if match == nil {
		http.NotFound(w, r)
		return
	}
	if r.Method == "POST" {
		// check header
		if ct := r.Header.Get("Content-Type"); ct != "application/json" {
			util.ReportError(w, r, fmt.Errorf("Error: received %s", ct), "Invalid content type.")
			return
		}
		defer util.Close(r.Body)
		id, err := shortcut.Insert(r.Body)
		if err != nil {
			util.ReportError(w, r, err, "Error inserting shortcut.")
			return
		}
		w.Header().Set("Content-Type", "application/json")
		enc := json.NewEncoder(w)
		if err := enc.Encode(map[string]string{"id": id}); err != nil {
			glog.Errorf("Failed to write or encode output: %s", err)
		}
	} else {
		http.NotFound(w, r)
	}
}
示例#12
0
// See the ingester.ResultIngester interface.
func (i GoldIngester) Ingest(tt *ingester.TileTracker, opener ingester.Opener, fileInfo *ingester.ResultsFileLocation, counter metrics.Counter) error {
	r, err := opener()
	if err != nil {
		return err
	}
	defer util.Close(r)

	res, err := ParseDMResultsFromReader(r)
	if err != nil {
		return err
	}

	// Run the pre-ingestion hook.
	if i.preIngestionHook != nil {
		if err := i.preIngestionHook(res); err != nil {
			return fmt.Errorf("Error running pre-ingestion hook: %s", err)
		}
	}

	if res.GitHash != "" {
		glog.Infof("Got Git hash: %s", res.GitHash)
		if err := tt.Move(res.GitHash); err != nil {
			return fmt.Errorf("Failed to move to correct Tile: %s: %s", res.GitHash, err)
		}
		addResultToTile(res, tt.Tile(), tt.Offset(res.GitHash), counter)
	} else {
		return fmt.Errorf("Missing hash.")
	}

	return nil
}
示例#13
0
func addBuildCommentHandler(w http.ResponseWriter, r *http.Request) {
	defer timer.New("addBuildCommentHandler").Stop()
	if !userHasEditRights(r) {
		util.ReportError(w, r, fmt.Errorf("User does not have edit rights."), "User does not have edit rights.")
		return
	}
	w.Header().Set("Content-Type", "application/json")
	cache, err := getCommitCache(w, r)
	if err != nil {
		return
	}
	buildId, err := strconv.ParseInt(mux.Vars(r)["buildId"], 10, 32)
	if err != nil {
		util.ReportError(w, r, err, fmt.Sprintf("Invalid build id: %v", err))
		return
	}
	comment := struct {
		Comment string `json:"comment"`
	}{}
	if err := json.NewDecoder(r.Body).Decode(&comment); err != nil {
		util.ReportError(w, r, err, fmt.Sprintf("Failed to add comment: %v", err))
		return
	}
	defer util.Close(r.Body)
	c := buildbot.BuildComment{
		BuildId:   int(buildId),
		User:      login.LoggedInAs(r),
		Timestamp: float64(time.Now().UTC().Unix()),
		Message:   comment.Comment,
	}
	if err := cache.AddBuildComment(int(buildId), &c); err != nil {
		util.ReportError(w, r, err, fmt.Sprintf("Failed to add comment: %v", err))
		return
	}
}
示例#14
0
// AreTimeStampsEqual checks whether the TIMESTAMP in the local dir matches the
// TIMESTAMP in the remote Google Storage dir.
func (gs *GsUtil) AreTimeStampsEqual(localDir, gsDir string) (bool, error) {
	// Get timestamp from the local directory.
	localTimestampPath := filepath.Join(localDir, TIMESTAMP_FILE_NAME)
	fileContent, err := ioutil.ReadFile(localTimestampPath)
	if err != nil {
		return false, fmt.Errorf("Could not read %s: %s", localTimestampPath, err)
	}
	localTimestamp := strings.Trim(string(fileContent), "\n")

	// Get timestamp from the Google Storage directory.
	gsTimestampPath := filepath.Join(gsDir, TIMESTAMP_FILE_NAME)
	respBody, err := gs.GetRemoteFileContents(gsTimestampPath)
	if err != nil {
		return false, err
	}
	defer util.Close(respBody)
	resp, err := ioutil.ReadAll(respBody)
	if err != nil {
		return false, err
	}
	gsTimestamp := strings.Trim(string(resp), "\n")

	// Return the comparison of the two timestamps.
	return localTimestamp == gsTimestamp, nil
}
示例#15
0
func writeOutAllSourceImages() {
	// Pull all the source images from the db and write them out to inout.
	rows, err := db.Query("SELECT id, image, create_ts FROM source_images ORDER BY create_ts DESC")
	if err != nil {
		glog.Errorf("Failed to open connection to SQL server: %q\n", err)
		panic(err)
	}
	defer util.Close(rows)
	for rows.Next() {
		var id int
		var image []byte
		var create_ts time.Time
		if err := rows.Scan(&id, &image, &create_ts); err != nil {
			glog.Errorf("failed to fetch from database: %q", err)
			continue
		}
		filename := fmt.Sprintf(filepath.Join(config.Fiddle.InoutPath, "image-%d.png"), id)
		if _, err := os.Stat(filename); os.IsExist(err) {
			glog.Infof("Skipping write since file exists: %q", filename)
			continue
		}
		if err := ioutil.WriteFile(filename, image, 0666); err != nil {
			glog.Errorf("failed to write image file: %q", err)
		}
	}
}
示例#16
0
// isCommitted returns true iff the issue has been committed.
func (r *Rietveld) isCommitted(i *Issue) (bool, error) {
	committed, err := regexp.MatchString(COMMITTED_ISSUE_REGEXP, i.Description)
	if err != nil {
		return false, err
	}
	if committed {
		return true, nil
	}

	// The description sometimes doesn't get updated in time. Check the
	// commit queue status for its result.
	url := fmt.Sprintf(CQ_STATUS_URL, i.Issue, i.Patchsets[len(i.Patchsets)-1])
	resp, err := r.client.Get(url)
	if err != nil {
		return false, fmt.Errorf("Failed to GET %s: %s", url, err)
	}
	defer util.Close(resp.Body)
	dec := json.NewDecoder(resp.Body)
	var rv struct {
		Success bool `json:"success"`
	}
	if err := dec.Decode(&rv); err != nil {
		return false, fmt.Errorf("Failed to decode JSON: %s", err)
	}
	return rv.Success, nil
}
示例#17
0
// GetBugs retrieves all Issues with the given owner from the IssueTracker,
// returning an IssueList.
func (it IssueTracker) GetBugs(project string, owner string) (*IssueList, error) {
	errFmt := "error retrieving issues: %s"
	params := map[string]string{
		"owner":      url.QueryEscape(owner),
		"can":        "open",
		"maxResults": "9999",
	}
	requestURL := ISSUE_API_URL + project + "/issues?"
	first := true
	for k, v := range params {
		if first {
			first = false
		} else {
			requestURL += "&"
		}
		requestURL += k + "=" + v
	}
	resp, err := it.client.Get(requestURL)
	if err != nil {
		return nil, fmt.Errorf(errFmt, err)
	}
	defer util.Close(resp.Body)
	body, _ := ioutil.ReadAll(resp.Body)
	if resp.StatusCode != http.StatusOK {
		return nil, fmt.Errorf(errFmt, fmt.Sprintf(
			"issue tracker returned code %d:%v", resp.StatusCode, string(body)))
	}

	var bugList IssueList
	if err := json.Unmarshal(body, &bugList); err != nil {
		return nil, fmt.Errorf(errFmt, err)
	}
	return &bugList, nil
}
示例#18
0
// GetRecent returns the most recent n activity records in types.Activity struct format.
func GetRecent(n int) ([]*types.Activity, error) {
	ret := []*types.Activity{}
	rows, err := db.DB.Query("SELECT id, timestamp, userid, action, url FROM activitylog ORDER BY id DESC LIMIT ?", n)
	if err != nil {
		return nil, fmt.Errorf("Failed to read from database: %s", err)
	}
	defer util.Close(rows)
	glog.Infoln("Processing activity rows.")
	for rows.Next() {
		var id int
		var timestamp int64
		var userid string
		var action string
		var url string
		if err := rows.Scan(&id, &timestamp, &userid, &action, &url); err != nil {
			return nil, fmt.Errorf("Failed to read row from database: %s", err)
		}
		r := &types.Activity{
			ID:     id,
			TS:     timestamp,
			UserID: userid,
			Action: action,
			URL:    url,
		}
		ret = append(ret, r)
	}

	return ret, nil
}
示例#19
0
// FileSystemResult returns a ResultFileLocation for files. path is the path
// where the target file resides and rootDir is the root of all paths.
func FileSystemResult(path, rootDir string) (ResultFileLocation, error) {
	// Read file into buffer and calculate the md5 in the process.
	file, err := os.Open(path)
	if err != nil {
		return nil, err
	}
	defer util.Close(file)

	var buf bytes.Buffer
	md5, err := util.MD5FromReader(file, &buf)
	if err != nil {
		return nil, fmt.Errorf("Unable to get MD5 hash of %s: %s", path, err)
	}

	absRootDir, err := filepath.Abs(rootDir)
	if err != nil {
		return nil, err
	}

	absPath, err := filepath.Abs(path)
	if err != nil {
		return nil, err
	}

	return &fsResultFileLocation{
		path: strings.TrimPrefix(absPath, absRootDir+"/"),
		buf:  buf.Bytes(),
		md5:  hex.EncodeToString(md5),
	}, nil
}
示例#20
0
func postAlertsJsonHandler(w http.ResponseWriter, r *http.Request) {
	// Get the alert ID.
	alertIdStr, ok := mux.Vars(r)["alertId"]
	if !ok {
		util.ReportError(w, r, fmt.Errorf("No alert ID provided."), "No alert ID provided.")
		return
	}
	alertId, err := strconv.ParseInt(alertIdStr, 10, 64)
	if err != nil {
		util.ReportError(w, r, fmt.Errorf("Invalid alert ID %s", alertIdStr), "Not found.")
		return
	}

	var req struct {
		Until   int    `json:"until"`
		Comment string `json:"comment"`
	}
	defer util.Close(r.Body)
	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
		util.ReportError(w, r, err, "Failed to decode request body.")
		return
	}

	handleAlert(alertId, req.Comment, req.Until, w, r)
}
示例#21
0
func AddTaskHandler(w http.ResponseWriter, r *http.Request, task AddTaskVars) {
	if !ctfeutil.UserHasEditRights(r) {
		skutil.ReportError(w, r, fmt.Errorf("Must have google or chromium account to add tasks"), "")
		return
	}
	if task.IsAdminTask() && !ctfeutil.UserHasAdminRights(r) {
		skutil.ReportError(w, r, fmt.Errorf("Must be admin to add admin tasks; contact rmistry@"), "")
		return
	}
	w.Header().Set("Content-Type", "application/json")
	if err := json.NewDecoder(r.Body).Decode(&task); err != nil {
		skutil.ReportError(w, r, err, fmt.Sprintf("Failed to add %T task", task))
		return
	}
	defer skutil.Close(r.Body)

	task.GetAddTaskCommonVars().Username = login.LoggedInAs(r)
	task.GetAddTaskCommonVars().TsAdded = ctutil.GetCurrentTs()
	if len(task.GetAddTaskCommonVars().Username) > 255 {
		skutil.ReportError(w, r, fmt.Errorf("Username is too long, limit 255 bytes"), "")
		return
	}

	if _, err := AddTask(task); err != nil {
		skutil.ReportError(w, r, err, fmt.Sprintf("Failed to insert %T task", task))
		return
	}
}
示例#22
0
func RedoTaskHandler(prototype Task, w http.ResponseWriter, r *http.Request) {
	if !ctfeutil.UserHasEditRights(r) {
		skutil.ReportError(w, r, fmt.Errorf("Must have google or chromium account to redo tasks"), "")
		return
	}
	w.Header().Set("Content-Type", "application/json")
	vars := struct{ Id int64 }{}
	if err := json.NewDecoder(r.Body).Decode(&vars); err != nil {
		skutil.ReportError(w, r, err, "Failed to parse redo request")
		return
	}
	defer skutil.Close(r.Body)

	rowQuery := fmt.Sprintf("SELECT * FROM %s WHERE id = ? AND ts_completed IS NOT NULL", prototype.TableName())
	binds := []interface{}{vars.Id}
	data, err := prototype.Select(rowQuery, binds...)
	if err != nil {
		skutil.ReportError(w, r, err, "Unable to find requested task.")
		return
	}
	tasks := AsTaskSlice(data)
	if len(tasks) != 1 {
		skutil.ReportError(w, r, err, "Unable to find requested task.")
		return
	}

	addTaskVars := tasks[0].GetPopulatedAddTaskVars()
	// Replace the username with the new requester.
	addTaskVars.GetAddTaskCommonVars().Username = login.LoggedInAs(r)
	if _, err := AddTask(addTaskVars); err != nil {
		skutil.ReportError(w, r, err, "Could not redo the task.")
		return
	}
}
示例#23
0
// changeHandler handles actions on individual services.
//
// The actions are forwarded off to the pulld service
// running on the machine hosting that service.
func changeHandler(w http.ResponseWriter, r *http.Request) {
	if login.LoggedInAs(r) == "" {
		util.ReportError(w, r, fmt.Errorf("You must be logged on to push."), "")
		return
	}
	if err := r.ParseForm(); err != nil {
		util.ReportError(w, r, err, "Failed to parse form.")
		return
	}
	action := r.Form.Get("action")
	name := r.Form.Get("name")
	machine := ip.Resolve(r.Form.Get("machine"))
	url := fmt.Sprintf("http://%s:10114/_/change?name=%s&action=%s", machine, name, action)
	resp, err := client.Post(url, "", nil)
	if err != nil {
		util.ReportError(w, r, err, fmt.Sprintf("Failed to reach %s: %v %s", machine, resp, err))
		return
	}
	defer util.Close(resp.Body)
	if resp.StatusCode != 200 {
		util.ReportError(w, r, err, fmt.Sprintf("Failed to reach %s: %v %s", machine, resp, err))
		return
	}
	w.Header().Set("Content-Type", "application/json")
	if _, err := io.Copy(w, resp.Body); err != nil {
		glog.Errorf("Failed to copy JSON error out: %s", err)
	}
}
示例#24
0
func getCLDetail(clURLString string) (clDetail, error) {
	if clURLString == "" {
		return clDetail{}, fmt.Errorf("No CL specified")
	}

	matches := clURLRegexp.FindStringSubmatch(clURLString)
	if len(matches) < 2 || matches[1] == "" {
		// Don't return error, since user could still be typing.
		return clDetail{}, nil
	}
	clString := matches[1]
	detailJsonUrl := "https://codereview.chromium.org/api/" + clString
	glog.Infof("Reading CL detail from %s", detailJsonUrl)
	detailResp, err := httpClient.Get(detailJsonUrl)
	if err != nil {
		return clDetail{}, fmt.Errorf("Unable to retrieve CL detail: %v", err)
	}
	defer skutil.Close(detailResp.Body)
	if detailResp.StatusCode == 404 {
		// Don't return error, since user could still be typing.
		return clDetail{}, nil
	}
	if detailResp.StatusCode != 200 {
		return clDetail{}, fmt.Errorf("Unable to retrieve CL detail; status code %d", detailResp.StatusCode)
	}
	detail := clDetail{}
	err = json.NewDecoder(detailResp.Body).Decode(&detail)
	return detail, err
}
示例#25
0
func addCommitCommentHandler(w http.ResponseWriter, r *http.Request) {
	defer timer.New("addCommitCommentHandler").Stop()
	if !userHasEditRights(r) {
		util.ReportError(w, r, fmt.Errorf("User does not have edit rights."), "User does not have edit rights.")
		return
	}
	w.Header().Set("Content-Type", "application/json")
	commit := mux.Vars(r)["commit"]
	comment := struct {
		Comment string `json:"comment"`
	}{}
	if err := json.NewDecoder(r.Body).Decode(&comment); err != nil {
		util.ReportError(w, r, err, fmt.Sprintf("Failed to add comment: %v", err))
		return
	}
	defer util.Close(r.Body)

	c := buildbot.CommitComment{
		Commit:    commit,
		User:      login.LoggedInAs(r),
		Timestamp: time.Now().UTC(),
		Message:   comment.Comment,
	}
	if err := db.PutCommitComment(&c); err != nil {
		util.ReportError(w, r, err, fmt.Sprintf("Failed to add commit comment: %v", err))
		return
	}
}
示例#26
0
func getCLPatch(detail clDetail, patchsetID int) (string, error) {
	if len(detail.Patchsets) == 0 {
		return "", fmt.Errorf("CL has no patchsets")
	}
	if patchsetID <= 0 {
		// If no valid patchsetID has been specified then use the last patchset.
		patchsetID = detail.Patchsets[len(detail.Patchsets)-1]
	}
	patchUrl := fmt.Sprintf("https://codereview.chromium.org/download/issue%d_%d.diff", detail.Issue, patchsetID)
	glog.Infof("Downloading CL patch from %s", patchUrl)
	patchResp, err := httpClient.Get(patchUrl)
	if err != nil {
		return "", fmt.Errorf("Unable to retrieve CL patch: %v", err)
	}
	defer skutil.Close(patchResp.Body)
	if patchResp.StatusCode != 200 {
		return "", fmt.Errorf("Unable to retrieve CL patch; status code %d", patchResp.StatusCode)
	}
	if int64(patchResp.ContentLength) > db.LONG_TEXT_MAX_LENGTH {
		return "", fmt.Errorf("Patch is too large; length is %d bytes.", patchResp.ContentLength)
	}
	patchBytes, err := ioutil.ReadAll(patchResp.Body)
	if err != nil {
		return "", fmt.Errorf("Unable to retrieve CL patch: %v", err)
	}
	// Double-check length in case ContentLength was -1.
	if int64(len(patchBytes)) > db.LONG_TEXT_MAX_LENGTH {
		return "", fmt.Errorf("Patch is too large; length is %d bytes.", len(patchBytes))
	}
	return string(patchBytes), nil
}
示例#27
0
// UploadFile uploads the specified file to the remote dir in Google Storage. It
// also sets the appropriate ACLs on the uploaded file.
func (gs *GsUtil) UploadFile(fileName, localDir, gsDir string) error {
	localFile := filepath.Join(localDir, fileName)
	gsFile := filepath.Join(gsDir, fileName)
	object := &storage.Object{Name: gsFile}
	f, err := os.Open(localFile)
	if err != nil {
		return fmt.Errorf("Error opening %s: %s", localFile, err)
	}
	defer util.Close(f)
	if _, err := gs.service.Objects.Insert(GS_BUCKET_NAME, object).Media(f).Do(); err != nil {
		return fmt.Errorf("Objects.Insert failed: %s", err)
	}
	glog.Infof("Copied %s to %s", localFile, fmt.Sprintf("gs://%s/%s", GS_BUCKET_NAME, gsFile))

	// All objects uploaded to CT's bucket via this util must be readable by
	// the google.com domain. This will be fine tuned later if required.
	objectAcl := &storage.ObjectAccessControl{
		Bucket: GS_BUCKET_NAME, Entity: "domain-google.com", Object: gsFile, Role: "READER",
	}
	if _, err := gs.service.ObjectAccessControls.Insert(GS_BUCKET_NAME, gsFile, objectAcl).Do(); err != nil {
		return fmt.Errorf("Could not update ACL of %s: %s", object.Name, err)
	}
	glog.Infof("Updated ACL of %s", fmt.Sprintf("gs://%s/%s", GS_BUCKET_NAME, gsFile))

	return nil
}
示例#28
0
// rasterizeOnce applies a single rastetizer to the given pdf file.
// If the rasterizer fails, use the errorImage.  If everything
// succeeds, upload the PNG.
func (xformer *pdfXformer) rasterizeOnce(pdfPath string, rasterizerIndex int) (string, error) {
	rasterizer := xformer.rasterizers[rasterizerIndex]
	tempdir := filepath.Dir(pdfPath)
	pngPath := path.Join(tempdir, fmt.Sprintf("%s.%s", rasterizer.String(), PNG_EXT))
	defer removeIfExists(pngPath)
	glog.Infof("> > > > rasterizing with %s", rasterizer)
	err := rasterizer.Rasterize(pdfPath, pngPath)
	if err != nil {
		glog.Warningf("rasterizing %s with %s failed: %s", filepath.Base(pdfPath), rasterizer.String(), err)
		return xformer.errorImageMd5, nil
	}
	md5, err := md5OfFile(pngPath)
	if err != nil {
		return "", err
	}
	f, err := os.Open(pngPath)
	if err != nil {
		return "", err
	}
	defer util.Close(f)
	pngUploadPath := fmt.Sprintf("%s/%s.%s", *storageImagesDirectory, md5, PNG_EXT)
	didUpload, err := uploadFile(xformer.client, f, *storageBucket, pngUploadPath, *accessControlEntity)
	if err != nil {
		return "", err
	}
	if didUpload {
		glog.Infof("> > > > uploaded %s", pngUploadPath)
	}
	return md5, nil
}
示例#29
0
// List returns the last N Rietveld issue IDs that have been ingested.
func (t *TrybotResultStorage) List(offset, size int) ([]*IssueListItem, int, error) {
	var total int
	if err := t.vdb.DB.QueryRow("SELECT count(*) FROM tries").Scan(&total); err != nil {
		return nil, 0, err
	}

	if total == 0 {
		return []*IssueListItem{}, 0, nil
	}

	rows, err := t.vdb.DB.Query("SELECT issue, max_patchset, last_updated FROM tries ORDER BY last_updated DESC LIMIT ?,?", offset, size)
	if err != nil {
		return nil, 0, fmt.Errorf("Failed to read try data from database: %s", err)
	}
	defer util.Close(rows)

	ret := make([]*IssueListItem, 0, size)
	for rows.Next() {
		listItem := &IssueListItem{}
		if err := rows.Scan(&listItem.Issue, &listItem.MaxPatchset, &listItem.LastUpdated); err != nil {
			return nil, 0, fmt.Errorf("List: Failed to read issue from row: %s", err)
		}
		ret = append(ret, listItem)
	}
	return ret, total, nil
}
示例#30
0
// FromQuery is part of the IssueTracker interface. See documentation there.
func (c *CodesiteIssueTracker) FromQuery(q string) ([]Issue, error) {
	url := fmt.Sprintf(URL_TEMPLATE, url.QueryEscape(q), c.apiKey)

	//  This will return a JSON response of the form:
	//
	//  {
	//   "items": [
	//    {
	//     "id": 2874,
	//     "title": "this is a bug with..."
	//     "state": "open"
	//    }
	//   ]
	//  }
	resp, err := c.client.Get(url)
	if err != nil || resp.StatusCode != 200 {
		return nil, fmt.Errorf("Failed to retrieve issue tracker response: %s Status Code: %d", err, resp.StatusCode)
	}
	defer util.Close(resp.Body)

	issueResponse := &IssueResponse{
		Items: []Issue{},
	}
	if err := json.NewDecoder(resp.Body).Decode(&issueResponse); err != nil {
		return nil, err
	}

	return issueResponse.Items, err
}