Esempio n. 1
0
// startWorkers creates a worker pool of numWorkers workers to make requests.
//
// Requests to make arrive over the targetCh channel, latency measurements in
// millis go out over the latencies channel, and wg is used to synchronize the
// workers' completion.
func startWorkers(targets <-chan Target, latencies chan<- float64, wg *sync.WaitGroup) error {

	wg.Add(*numWorkers)
	for i := 0; i < *numWorkers; i++ {
		c := util.NewTimeoutClient()
		go func(c *http.Client) {
			for t := range targets {
				t0 := time.Now()
				resp, err := c.Get(t.URL)
				t1 := time.Now()

				if err != nil {
					fmt.Printf("Failure for Get: %v %v\n", t.URL, err)
					continue
				}
				// TODO(jcgregorio) Add stats for failures if we start seeing them.
				if resp.StatusCode != t.Code {
					fmt.Printf("Wrong status code expected %v, got %v at %v\n", t.Code, resp.StatusCode, t.URL)
				}
				duration := t1.Sub(t0)
				latencies <- float64(duration.Nanoseconds() / 1000000)
			}
			wg.Done()
		}(c)
	}

	return nil
}
Esempio n. 2
0
func UpdateWebappTaskV2(vars task_common.UpdateTaskVars) error {
	postUrl := WebappRoot + vars.UriPath()
	glog.Infof("Updating %v on %s", vars, postUrl)

	json, err := json.Marshal(vars)
	if err != nil {
		return fmt.Errorf("Failed to marshal %v: %s", vars, err)
	}
	req, err := http.NewRequest("POST", postUrl, bytes.NewReader(json))
	if err != nil {
		return fmt.Errorf("Could not create HTTP request: %s", err)
	}
	hash, err := webhook.ComputeAuthHashBase64(json)
	if err != nil {
		return fmt.Errorf("Could not compute authentication hash: %s", err)
	}
	req.Header.Set(webhook.REQUEST_AUTH_HASH_HEADER, hash)
	client := util.NewTimeoutClient()
	resp, err := client.Do(req)
	if err != nil {
		return fmt.Errorf("Could not update webapp task: %s", err)
	}
	defer util.Close(resp.Body)
	if resp.StatusCode != 200 {
		response, _ := ioutil.ReadAll(resp.Body)
		return fmt.Errorf("Could not update webapp task, response status code was %d: %s", resp.StatusCode, response)
	}
	return nil
}
Esempio n. 3
0
func main() {
	common.InitWithMetrics("certpoller", graphiteServer)
	client := util.NewTimeoutClient()
	certs := []*cert{}
	// Populate certs based on cmd-line args.
	for _, metadata := range flag.Args() {
		c := &cert{
			metadata: metadata,
			file:     fileFromMetadata(metadata),
			etag:     "",
		}
		err := get(client, c)
		if err != nil {
			glog.Fatalf("Failed to retrieve the cert %s: %s", c, err)
		}
		certs = append(certs, c)
	}

	for _ = range time.Tick(30 * time.Minute) {
		for _, c := range certs {
			if err := get(client, c); err != nil {
				glog.Errorf("Failed to update cert %s: %s", c.metadata, err)
			}
		}
	}
}
Esempio n. 4
0
func UpdateWebappTask(gaeTaskID int64, webappURL string, extraData map[string]string) error {
	glog.Infof("Updating %s on %s with %s", gaeTaskID, webappURL, extraData)
	pwdBytes, err := ioutil.ReadFile(ctutil.WebappPasswordPath)
	if err != nil {
		return fmt.Errorf("Could not read the webapp password file: %s", err)
	}
	pwd := strings.TrimSpace(string(pwdBytes))
	postData := url.Values{}
	postData.Set("key", strconv.FormatInt(gaeTaskID, 10))
	postData.Add("password", pwd)
	for k, v := range extraData {
		postData.Add(k, v)
	}
	req, err := http.NewRequest("POST", webappURL, strings.NewReader(postData.Encode()))
	if err != nil {
		return fmt.Errorf("Could not create HTTP request: %s", err)
	}
	client := util.NewTimeoutClient()
	resp, err := client.Do(req)
	if err != nil {
		return fmt.Errorf("Could not update webapp task: %s", err)
	}
	defer util.Close(resp.Body)
	if resp.StatusCode != 200 {
		return fmt.Errorf("Could not update webapp task, response status code was %d: %s", resp.StatusCode, err)
	}
	return nil
}
Esempio n. 5
0
func init() {
	var err error
	st, err = storage.New(util.NewTimeoutClient())
	if err != nil {
		panic("Can't construct HTTP client")
	}

	ingester.Register(config.CONSTRUCTOR_NANO_TRYBOT, NewTrybotResultIngester)
}
Esempio n. 6
0
// NewBranchTileBuilder returns an instance of BranchTileBuilder that allows
// creating tiles based on the given VCS or code review system based on
// querying db.
//
// TODO(stephana): The EventBus is used to update the internal cache as commits are updated.
func NewBranchTileBuilder(db DB, git *gitinfo.GitInfo, reviewURL string, evt *eventbus.EventBus) BranchTileBuilder {
	return &tileBuilder{
		db:        db,
		vcs:       git,
		review:    rietveld.New(reviewURL, util.NewTimeoutClient()),
		reviewURL: reviewURL,
		cache:     lru.New(MAX_ISSUE_CACHE_SIZE),
		tcache:    lru.New(MAX_TILE_CACHE_SIZE),
	}
}
Esempio n. 7
0
// NewFileDiffStore intializes and returns a file based implementation of
// DiffStore. The optional http.Client is used to make HTTP requests to Google
// Storage. If nil is supplied then a default client is used. The baseDir is
// the local base directory where the DEFAULT_IMG_DIR_NAME,
// DEFAULT_DIFF_DIR_NAME and the DEFAULT_DIFFMETRICS_DIR_NAME directories
// exist. gsBucketName is the bucket images will be downloaded from.
// storageBaseDir is the directory in the bucket (if empty
// DEFAULT_GS_IMG_DIR_NAME is used).  workerPoolSize is the max number of
// simultaneous goroutines that will be created when running Get or AbsPath.
// Use RECOMMENDED_WORKER_POOL_SIZE if unsure what this value should be.
func NewFileDiffStore(client *http.Client, baseDir, gsBucketName string, storageBaseDir string, cacheFactory CacheFactory, workerPoolSize int) (diff.DiffStore, error) {
	if client == nil {
		client = util.NewTimeoutClient()
	}

	if storageBaseDir == "" {
		storageBaseDir = DEFAULT_GS_IMG_DIR_NAME
	}

	imageCache, err := lru.New(IMAGE_LRU_CACHE_SIZE)
	if err != nil {
		return nil, fmt.Errorf("Unable to alloace image LRU cache: %s", err)
	}

	diffCache := cacheFactory("di", DiffMetricsCodec(0))
	unavailableChan := make(chan *diff.DigestFailure, 10)

	statusDir := fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_STATUS_DIR_NAME)))
	failureDB, err := bolt.Open(filepath.Join(statusDir, FAILUREDB_NAME), 0600, nil)
	if err != nil {
		return nil, fmt.Errorf("Unable to open failuredb: %s", err)
	}

	fs := &FileDiffStore{
		client:              client,
		localImgDir:         fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_IMG_DIR_NAME))),
		localDiffDir:        fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_DIFF_DIR_NAME))),
		localDiffMetricsDir: fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_DIFFMETRICS_DIR_NAME))),
		localTempFileDir:    fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_TEMPFILE_DIR_NAME))),
		gsBucketName:        gsBucketName,
		storageBaseDir:      storageBaseDir,
		imageCache:          imageCache,
		diffCache:           diffCache,
		unavailableDigests:  map[string]*diff.DigestFailure{},
		unavailableChan:     unavailableChan,
		failureDB:           failureDB,
	}

	if err := fs.loadDigestFailures(); err != nil {
		return nil, err
	}
	go func() {
		for {
			digestFailure := <-unavailableChan
			if err := fs.addDigestFailure(digestFailure); err != nil {
				glog.Errorf("Unable to store digest failure: %s", err)
			} else if err = fs.loadDigestFailures(); err != nil {
				glog.Errorf("Unable to load failures: %s", err)
			}
		}
	}()

	fs.activateWorkers(workerPoolSize)
	return fs, nil
}
Esempio n. 8
0
func TestAreTimestampsEqual(t *testing.T) {
	gs, err := NewGsUtil(util.NewTimeoutClient())
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}

	tmpDir := filepath.Join(os.TempDir(), "util_test")
	util.Mkdir(tmpDir, 0777)
	defer util.RemoveAll(tmpDir)

	f, err := os.Create(filepath.Join(tmpDir, TIMESTAMP_FILE_NAME))
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
	defer util.Close(f)

	// Test with matching timestamps.
	if _, err := f.WriteString(GS_TEST_TIMESTAMP_VALUE); err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
	result1, err := gs.AreTimeStampsEqual(tmpDir, "unit-tests/util/")
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
	assert.True(t, result1)

	// Test with differing timestamps.
	if _, err := f.WriteString(GS_TEST_TIMESTAMP_VALUE); err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
	result2, err := gs.AreTimeStampsEqual(tmpDir, "unit-tests/util/")
	if err != nil {
		t.Errorf("Unexpected error: %s", err)
	}
	assert.False(t, result2)

	// Test with Google Storage timestamp missing.
	result3, err := gs.AreTimeStampsEqual(tmpDir, "unit-tests/util/dummy_name/")
	if err == nil {
		t.Error("Expected an error")
	}
	assert.False(t, result3)

	// Test with local timestamp missing.
	result4, err := gs.AreTimeStampsEqual(tmpDir+"dummy_name", "unit-tests/util/")
	if err == nil {
		t.Error("Expected an error")
	}
	assert.False(t, result4)
}
// NewFileDiffStore intializes and returns a file based implementation of
// DiffStore. The optional http.Client is used to make HTTP requests to Google
// Storage. If nil is supplied then a default client is used. The baseDir is the
// local base directory where the DEFAULT_IMG_DIR_NAME, DEFAULT_DIFF_DIR_NAME and
// the DEFAULT_DIFFMETRICS_DIR_NAME directories exist. gsBucketName is the bucket
// images will be downloaded from. storageBaseDir is the directory in the
// bucket (if empty DEFAULT_GS_IMG_DIR_NAME is used).
// workerPoolSize is the max number of simultaneous goroutines that will be
// created when running Get or AbsPath.
// Use RECOMMENDED_WORKER_POOL_SIZE if unsure what this value should be.
func NewFileDiffStore(client *http.Client, baseDir, gsBucketName string, storageBaseDir string, cacheFactory CacheFactory, workerPoolSize int) (diff.DiffStore, error) {
	if client == nil {
		client = util.NewTimeoutClient()
	}

	if storageBaseDir == "" {
		storageBaseDir = DEFAULT_GS_IMG_DIR_NAME
	}

	imageCache, err := lru.New(IMAGE_LRU_CACHE_SIZE)
	if err != nil {
		return nil, fmt.Errorf("Unable to alloace image LRU cache: %s", err)
	}

	diffCache := cacheFactory("di", DiffMetricsCodec(0))
	unavailableChan := make(chan string, 10)

	fs := &FileDiffStore{
		client:              client,
		localImgDir:         fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_IMG_DIR_NAME))),
		localDiffDir:        fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_DIFF_DIR_NAME))),
		localDiffMetricsDir: fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_DIFFMETRICS_DIR_NAME))),
		localTempFileDir:    fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_TEMPFILE_DIR_NAME))),
		gsBucketName:        gsBucketName,
		storageBaseDir:      storageBaseDir,
		imageCache:          imageCache,
		diffCache:           diffCache,
		unavailableDigests:  map[string]bool{},
		unavailableChan:     unavailableChan,
	}

	// TODO(stephana): Clean this up and store digests to ignore in the
	// database and expose them on the front-end.
	// This is the hash of the empty, we should ignore this right away.
	unavailableChan <- "d41d8cd98f00b204e9800998ecf8427e"
	go func() {
		var ignoreDigest string
		for {
			ignoreDigest = <-unavailableChan
			func() {
				fs.unavailableMutex.Lock()
				defer fs.unavailableMutex.Unlock()
				fs.unavailableDigests[ignoreDigest] = true
			}()
		}
	}()

	fs.activateWorkers(workerPoolSize)
	return fs, nil
}
Esempio n. 10
0
func NewBuilder(git *gitinfo.GitInfo, address string, tileSize int, traceBuilder tiling.TraceBuilder) (*Builder, error) {
	review := rietveld.New(rietveld.RIETVELD_SKIA_URL, util.NewTimeoutClient())
	builder, err := db.NewBuilder(git, address, tileSize, traceBuilder)
	if err != nil {
		return nil, fmt.Errorf("Failed to construct Builder: %s", err)
	}

	return &Builder{
		Builder: builder,
		vcs:     git,
		review:  review,
		cache:   map[string]*rietveld.Issue{},
	}, nil
}
Esempio n. 11
0
func openUri(uriPath string) (*http.Response, error) {
	uri := GS_TEST_DATA_ROOT_URI + uriPath

	client := util.NewTimeoutClient()
	request, err := gs.RequestForStorageURL(uri)
	if err != nil {
		return nil, err
	}

	resp, err := client.Do(request)
	if err != nil {
		return nil, err
	}
	if resp.StatusCode != 200 {
		return nil, fmt.Errorf("Downloading %s failed. Got response status: %d", uri, resp.StatusCode)
	}

	return resp, nil
}
Esempio n. 12
0
// get retrieves the named value from the Metadata server. See
// https://developers.google.com/compute/docs/metadata
//
// level should be either "instance" or "project" for the kind of
// metadata to retrieve.
func get(name string, level string) (string, error) {
	req, err := http.NewRequest("GET", "http://metadata/computeMetadata/v1/"+level+"/attributes/"+name, nil)
	if err != nil {
		return "", fmt.Errorf("metadata.Get() failed to build request: %s", err)
	}
	c := util.NewTimeoutClient()
	req.Header.Add("Metadata-Flavor", "Google")
	resp, err := c.Do(req)
	if err != nil {
		return "", fmt.Errorf("metadata.Get() failed to make HTTP request for %s: %s", name, err)
	}
	if resp.StatusCode != http.StatusOK {
		return "", fmt.Errorf("HTTP response has status %d", resp.StatusCode)
	}
	defer util.Close(resp.Body)
	value, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return "", fmt.Errorf("Failed to read %s from metadata server: %s", name, err)
	}
	return string(value), nil
}
Esempio n. 13
0
func main() {
	defer common.LogPanic()
	common.Init()
	client := util.NewTimeoutClient()
	retVal := 255

	// Populate certs based on cmd-line args.
	for _, metadata := range flag.Args() {
		c := &cert{
			metadata: metadata,
			file:     fileFromMetadata(metadata),
			etag:     "",
		}
		err := get(client, c)
		if err != nil {
			glog.Fatalf("Failed to retrieve the cert %s: %s", c, err)
		}
		retVal = 0
	}

	os.Exit(retVal)
}
Esempio n. 14
0
	"github.com/gorilla/mux"
	"github.com/skia-dev/glog"

	"go.skia.org/infra/ct/go/ctfe/task_common"
	ctfeutil "go.skia.org/infra/ct/go/ctfe/util"
	"go.skia.org/infra/ct/go/db"
	ctutil "go.skia.org/infra/ct/go/util"
	skutil "go.skia.org/infra/go/util"
	"go.skia.org/infra/go/webhook"
)

var (
	addTaskTemplate     *template.Template = nil
	runsHistoryTemplate *template.Template = nil

	httpClient = skutil.NewTimeoutClient()
)

func ReloadTemplates(resourcesDir string) {
	addTaskTemplate = template.Must(template.ParseFiles(
		filepath.Join(resourcesDir, "templates/chromium_perf.html"),
		filepath.Join(resourcesDir, "templates/header.html"),
		filepath.Join(resourcesDir, "templates/titlebar.html"),
	))
	runsHistoryTemplate = template.Must(template.ParseFiles(
		filepath.Join(resourcesDir, "templates/chromium_perf_runs_history.html"),
		filepath.Join(resourcesDir, "templates/header.html"),
		filepath.Join(resourcesDir, "templates/titlebar.html"),
	))
}
Esempio n. 15
0
func NewIssueTracker(apiKey string) IssueTracker {
	return &CodesiteIssueTracker{
		apiKey: apiKey,
		client: util.NewTimeoutClient(),
	}
}
Esempio n. 16
0
// NewClient creates a new client for interacting with Rietveld.
func NewClient() *Reitveld {
	return &Reitveld{
		client: util.NewTimeoutClient(),
	}
}
Esempio n. 17
0
func TestPertTrace(t *testing.T) {
	b, err := ioutil.ReadFile(filepath.Join("testdata", "rietveld_response.txt"))
	assert.Nil(t, err)
	m := mockhttpclient.NewURLMock()
	// Mock this only once to confirm that caching works.
	m.MockOnce("https://codereview.chromium.org/api/1490543002", b)

	review := rietveld.New(rietveld.RIETVELD_SKIA_URL, util.NewTimeoutClient())

	vcsCommits := []*vcsinfo.LongCommit{
		&vcsinfo.LongCommit{
			ShortCommit: &vcsinfo.ShortCommit{
				Hash:    "foofoofoo",
				Author:  "*****@*****.**",
				Subject: "some commit",
			},
		},
	}
	vcs := ingestion.MockVCS(vcsCommits)

	builder := &Builder{
		Builder: nil,
		vcs:     vcs,
		review:  review,
		cache:   map[string]*rietveld.Issue{},
	}

	commits := []*db.CommitID{
		&db.CommitID{
			Timestamp: time.Now(),
			ID:        "1",
			Source:    "https://codereview.chromium.org/1490543002",
		},
		&db.CommitID{
			Timestamp: time.Now(),
			ID:        "foofoofoo",
			Source:    "master",
		},
	}

	long := builder.convertToLongCommits(commits, "master")
	assert.Equal(t, 1, len(long), "Only one commit should match master.")
	assert.Equal(t, "foofoofoo", long[0].ID)
	assert.Equal(t, "some commit", long[0].Desc)
	assert.Equal(t, "*****@*****.**", long[0].Author)

	long = builder.convertToLongCommits(commits, "https://codereview.chromium.org/1490543002")
	assert.Equal(t, 1, len(long), "Only one commit should match the trybot.")
	assert.Equal(t, "1", long[0].ID)
	assert.Equal(t, "no merge conflicts here.", long[0].Desc)
	assert.Equal(t, "jcgregorio", long[0].Author)

	long = builder.convertToLongCommits(commits, "")
	assert.Equal(t, 2, len(long), "Both commits should now appear.")
	assert.Equal(t, "1", long[0].ID)
	assert.Equal(t, "no merge conflicts here.", long[0].Desc)
	assert.Equal(t, "jcgregorio", long[0].Author)
	assert.Equal(t, "foofoofoo", long[1].ID)
	assert.Equal(t, "some commit", long[1].Desc)
	assert.Equal(t, "*****@*****.**", long[1].Author)

	badCommits := []*db.CommitID{
		&db.CommitID{
			Timestamp: time.Now(),
			ID:        "2",
			Source:    "https://codereview.chromium.org/99999999",
		},
		&db.CommitID{
			Timestamp: time.Now(),
			ID:        "barbarbar",
			Source:    "master",
		},
	}
	long = builder.convertToLongCommits(badCommits, "")
	assert.Equal(t, 2, len(long), "Both commits should now appear.")
	assert.Equal(t, "2", long[0].ID)
	assert.Equal(t, "", long[0].Desc)
	assert.Equal(t, "", long[0].Author)
	assert.Equal(t, "barbarbar", long[1].ID)
	assert.Equal(t, "", long[1].Desc)
	assert.Equal(t, "", long[1].Author)

}
Esempio n. 18
0
func main() {
	defer common.LogPanic()
	worker_common.Init()

	defer util.TimeTrack(time.Now(), "Running Chromium Perf on Swarming")
	defer glog.Flush()

	// Validate required arguments.
	if *chromiumBuild == "" {
		glog.Error("Must specify --chromium_build")
		return
	}
	if *benchmarkName == "" {
		glog.Error("Must specify --benchmark_name")
		return
	}
	if *telemetryBinariesDir == "" {
		glog.Error("Must specify --telemetry_binaries_dir")
		return
	}
	if *pageSetsDir == "" {
		glog.Error("Must specify --page_sets_dir")
		return
	}
	if *buildbotMaster == "" {
		glog.Error("Must specify --master")
		return
	}
	if *buildbotBuilder == "" {
		glog.Error("Must specify --builder")
		return
	}
	if *gitHash == "" {
		glog.Error("Must specify --git_hash")
		return
	}
	chromiumBinary := filepath.Join(*chromiumBuild, util.BINARY_CHROME)

	// Establish output paths.
	localOutputDir := util.BenchmarkRunsDir
	skutil.MkdirAll(localOutputDir, 0700)

	fileInfos, err := ioutil.ReadDir(*pageSetsDir)
	if err != nil {
		glog.Errorf("Unable to read the pagesets dir %s: %s", *pageSetsDir, err)
		return
	}

	for _, fileInfo := range fileInfos {
		if fileInfo.IsDir() {
			continue
		}
		if err := runBenchmark(fileInfo.Name(), *pageSetsDir, localOutputDir, *chromiumBuild, chromiumBinary, *browserExtraArgs); err != nil {
			glog.Errorf("Error while running benchmark: %s", err)
			return
		}
	}

	// Convert output to dashboard JSON v1 in order to upload to chromeperf.
	// More information is in http://www.chromium.org/developers/speed-infra/performance-dashboard/sending-data-to-the-performance-dashboard
	client := skutil.NewTimeoutClient()
	outputFileInfos, err := ioutil.ReadDir(localOutputDir)
	if err != nil {
		glog.Errorf("Unable to read %s: %s", localOutputDir, err)
		return
	}
	for _, fileInfo := range outputFileInfos {
		if !fileInfo.IsDir() {
			continue
		}
		resultsFile := filepath.Join(localOutputDir, fileInfo.Name(), "results-chart.json")
		if err := uploadResultsToPerfDashboard(resultsFile, client); err != nil {
			glog.Errorf("Could not upload to perf dashboard: %s", err)
			continue
		}
	}
}