示例#1
0
// processRows reads all the rows from the clusters table and constructs a
// slice of ClusterSummary's from them.
func processRows(rows *sql.Rows, err error) ([]*types.ClusterSummary, error) {
	if err != nil {
		return nil, fmt.Errorf("Failed to read from database: %s", err)
	}
	defer util.Close(rows)

	glog.Infof("Found rows %v", rows)

	ret := []*types.ClusterSummary{}

	for rows.Next() {
		var body string
		var id int64
		if err := rows.Scan(&id, &body); err != nil {
			return nil, fmt.Errorf("Failed to read row from database: %s", err)
		}
		c := &types.ClusterSummary{}
		if err := json.Unmarshal([]byte(body), c); err != nil {
			glog.Errorf("Found invalid JSON in clusters table: %d %s", id, err)
			return nil, fmt.Errorf("Failed to read row from database: %s", err)
		}
		c.ID = id
		glog.Infof("ID: %d", id)
		ret = append(ret, c)
	}

	return ret, nil
}
示例#2
0
// UploadFile uploads the specified file to the remote dir in Google Storage. It
// also sets the appropriate ACLs on the uploaded file.
func (gs *GsUtil) UploadFile(fileName, localDir, gsDir string) error {
	localFile := filepath.Join(localDir, fileName)
	gsFile := filepath.Join(gsDir, fileName)
	object := &storage.Object{Name: gsFile}
	f, err := os.Open(localFile)
	if err != nil {
		return fmt.Errorf("Error opening %s: %s", localFile, err)
	}
	defer util.Close(f)
	if _, err := gs.service.Objects.Insert(GS_BUCKET_NAME, object).Media(f).Do(); err != nil {
		return fmt.Errorf("Objects.Insert failed: %s", err)
	}
	glog.Infof("Copied %s to %s", localFile, fmt.Sprintf("gs://%s/%s", GS_BUCKET_NAME, gsFile))

	// All objects uploaded to CT's bucket via this util must be readable by
	// the google.com domain. This will be fine tuned later if required.
	objectAcl := &storage.ObjectAccessControl{
		Bucket: GS_BUCKET_NAME, Entity: "domain-google.com", Object: gsFile, Role: "READER",
	}
	if _, err := gs.service.ObjectAccessControls.Insert(GS_BUCKET_NAME, gsFile, objectAcl).Do(); err != nil {
		return fmt.Errorf("Could not update ACL of %s: %s", object.Name, err)
	}
	glog.Infof("Updated ACL of %s", fmt.Sprintf("gs://%s/%s", GS_BUCKET_NAME, gsFile))

	return nil
}
示例#3
0
func _main(ts db.DB) {
	week := time.Hour * 24 * 7
	commits, err := ts.List(time.Now().Add(-week), time.Now())
	if err != nil {
		glog.Errorf("Failed to load commits: %s", err)
		return
	}
	if len(commits) > 50 {
		commits = commits[:50]
	}

	begin := time.Now()
	_, _, err = ts.TileFromCommits(commits)
	if err != nil {
		glog.Errorf("Failed to load Tile: %s", err)
		return
	}
	glog.Infof("Time to load tile: %v", time.Now().Sub(begin))
	// Now load a second time.
	begin = time.Now()
	_, _, err = ts.TileFromCommits(commits)
	if err != nil {
		glog.Errorf("Failed to load Tile: %s", err)
		return
	}
	glog.Infof("Time to load tile the second time: %v", time.Now().Sub(begin))
}
示例#4
0
func main() {
	defer common.LogPanic()
	common.Init()

	args := flag.Args()
	if len(args) != 2 {
		flag.Usage()
		os.Exit(1)
	}

	bucket, prefix := args[0], args[1]
	v, err := skiaversion.GetVersion()
	if err != nil {
		glog.Fatal(err)
	}
	glog.Infof("Version %s, built at %s", v.Commit, v.Date)

	if *nsqdAddress == "" {
		glog.Fatal("Missing address of nsqd server.")
	}

	globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress)
	if err != nil {
		glog.Fatalf("Unable to connect to NSQ server: %s", err)
	}

	eventBus := eventbus.New(globalEventBus)
	eventBus.SubscribeAsync(event.StorageEvent(bucket, prefix), func(evData interface{}) {
		data := evData.(*event.GoogleStorageEventData)
		glog.Infof("Google Storage notification from bucket\n %s:  %s : %s", data.Updated, data.Bucket, data.Name)
	})
	select {}
}
示例#5
0
// processTimeRange calls gs.GetLatestGSDirs to get a list of
func (xformer *pdfXformer) processTimeRange(start time.Time, end time.Time) {
	glog.Infof("Processing time range: (%s, %s)", start.Truncate(time.Second), end.Truncate(time.Second))
	for _, dir := range gs.GetLatestGSDirs(start.Unix(), end.Unix(), *storageJsonDirectory) {
		glog.Infof("> Reading gs://%s/%s\n", *storageBucket, dir)
		requestedObjects := xformer.client.storageService.Objects.List(*storageBucket).Prefix(dir).Fields(
			"nextPageToken", "items/updated", "items/md5Hash", "items/mediaLink", "items/name", "items/metadata")
		for requestedObjects != nil {
			responseObjects, err := requestedObjects.Do()
			if err != nil {
				glog.Errorf("request %#v failed: %s", requestedObjects, err)
			} else {
				for _, jsonObject := range responseObjects.Items {
					xformer.counter++
					glog.Infof("> > Processing object:  gs://%s/%s {%d}", *storageBucket, jsonObject.Name, xformer.counter)
					xformer.processJsonFile(jsonObject)
				}
			}
			if len(responseObjects.NextPageToken) > 0 {
				requestedObjects.PageToken(responseObjects.NextPageToken)
			} else {
				requestedObjects = nil
			}
		}
	}
	glog.Infof("finished time range.")
}
示例#6
0
// get retrieves the metadata file if it's changed and writes it to the correct location.
func get(client *http.Client, cert *cert) error {
	// We aren't using the metadata package here because we need to set/get etags.
	r, err := http.NewRequest("GET", "http://metadata/computeMetadata/v1/project/attributes/"+cert.metadata, nil)
	if err != nil {
		return fmt.Errorf("Failed to create request for metadata: %s", err)
	}
	r.Header.Set("Metadata-Flavor", "Google")
	if cert.etag != "" {
		r.Header.Set("If-None-Match", cert.etag)
	}
	resp, err := client.Do(r)
	if err != nil {
		return fmt.Errorf("Failed to retrieve metadata for %s: %s", cert.metadata, err)
	}
	if resp.StatusCode != 200 {
		if cert.etag != "" && resp.StatusCode == 304 {
			// The etag is set and matches what we've already seen, so the file is
			// unchanged. Note that this can't happen the first time get() is called
			// for each file as etag won't be set, so we'll fall through to the code
			// below in that case.
			glog.Infof("etag unchanged for %s: %s", cert.file, cert.etag)
			return nil
		} else {
			return fmt.Errorf("Unexpected status response: %d: %s", resp.StatusCode, resp.Status)
		}
	}

	body, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return fmt.Errorf("Failed to read metadata for %s: %s", cert.metadata, err)
	}
	glog.Infof("Read body for %s: len %d", cert.file, len(body))

	// Store the etag to be used for the next time through get().
	cert.etag = resp.Header.Get("ETag")

	newMD5Hash := fmt.Sprintf("%x", md5.Sum(body))
	if cert.etag != "" || newMD5Hash != md5File(cert.file) {
		// Write out the file to a temp file then sudo mv it over to the right location.
		f, err := ioutil.TempFile("", "certpoller")
		if err != nil {
			glog.Errorf("Failed to create tmp cert file for %s: %s", cert.metadata, err)
		}
		n, err := f.Write(body)
		if err != nil || n != len(body) {
			return fmt.Errorf("Failed to write cert len(body)=%d, n=%d: %s", len(body), n, err)
		}
		tmpName := f.Name()
		if err := f.Close(); err != nil {
			return fmt.Errorf("Failed to close temporary file: %v", err)
		}
		cmd := exec.Command("sudo", "mv", tmpName, cert.file)
		err = cmd.Run()
		if err != nil {
			return fmt.Errorf("Failed to mv certfile into place for %s: %s", cert.metadata, err)
		}
	}
	glog.Infof("Successfully wrote %s", cert.file)
	return nil
}
示例#7
0
func main() {
	common.Init()

	v, err := skiaversion.GetVersion()
	if err != nil {
		glog.Fatal(err)
	}
	glog.Infof("Version %s, built at %s", v.Commit, v.Date)

	if *nsqdAddress == "" {
		glog.Fatal("Missing address of nsqd server.")
	}

	globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress)
	if err != nil {
		glog.Fatalf("Unable to connect to NSQ server: %s", err)
	}

	eventBus := eventbus.New(globalEventBus)

	// Send events every so often.
	for _ = range time.Tick(2 * time.Second) {
		evData := &event.GoogleStorageEventData{
			Bucket:  "test-bucket",
			Name:    "test-name",
			Updated: time.Now().String(),
		}
		eventBus.Publish(event.GLOBAL_GOOGLE_STORAGE, evData)
		glog.Infof("Sent Event: %#v ", evData)
	}
}
示例#8
0
func main() {
	defer common.LogPanic()
	common.Init()

	args := flag.Args()
	if len(args) != 3 {
		glog.Errorf("Expected arguments: branch target buildID")
		glog.Errorf("i.e.:  git_master-skia razor-userdebug 1772442")
		os.Exit(1)
	}

	// Set the arguments necessary to lookup the git hash.
	branch := args[0]
	target := args[1]
	buildID := args[2]
	glog.Infof("Branch, target, buildID: %s, %s, %s", branch, target, buildID)

	// Set up the oauth client.
	var client *http.Client
	var err error

	// In this case we don't want a backoff transport since the Apiary backend
	// seems to fail a lot, so we basically want to fall back to polling if a
	// call fails.
	transport := &http.Transport{
		Dial: util.DialTimeout,
	}

	if *local {
		// Use a local client secret file to load data.
		client, err = auth.InstalledAppClient(OAUTH_CACHE_FILEPATH, CLIENT_SECRET_FILEPATH,
			transport,
			androidbuildinternal.AndroidbuildInternalScope,
			storage.CloudPlatformScope)
		if err != nil {
			glog.Fatalf("Unable to create installed app oauth client:%s", err)
		}
	} else {
		// Use compute engine service account.
		client = auth.GCEServiceAccountClient(transport)
	}

	f, err := androidbuild.New("/tmp/android-gold-ingest", client)
	if err != nil {
		glog.Fatalf("Failed to construct client: %s", err)
	}
	for {
		r, err := f.Get(branch, target, buildID)
		if err != nil {
			glog.Errorf("Failed to get requested info: %s", err)
			time.Sleep(1 * time.Minute)
			continue
		}
		if r != nil {
			glog.Infof("Successfully found: %#v", *r)
		}

		time.Sleep(1 * time.Minute)
	}
}
示例#9
0
func main() {
	common.Init()

	v, err := skiaversion.GetVersion()
	if err != nil {
		glog.Fatal(err)
	}
	glog.Infof("Version %s, built at %s", v.Commit, v.Date)

	if *nsqdAddress == "" {
		glog.Fatal("Missing address of nsqd server.")
	}

	globalEventBus, err := geventbus.NewNSQEventBus(*nsqdAddress)
	if err != nil {
		glog.Fatalf("Unable to connect to NSQ server: %s", err)
	}

	eventBus := eventbus.New(globalEventBus)
	eventBus.SubscribeAsync(event.GLOBAL_GOOGLE_STORAGE, func(evData interface{}) {
		data := evData.(*event.GoogleStorageEventData)
		glog.Infof("Google Storage notification from bucket\n %s:  %s : %s", data.Updated, data.Bucket, data.Name)
	})
	select {}
}
示例#10
0
func pollAndExecOnce() {
	pending, err := frontend.GetOldestPendingTaskV2()
	if err != nil {
		glog.Error(err)
		return
	}
	task := asPollerTask(pending)
	if task == nil {
		return
	}
	taskName, id := task.GetTaskName(), task.GetCommonCols().Id
	glog.Infof("Preparing to execute task %s %d", taskName, id)
	if err = updateAndBuild(); err != nil {
		glog.Error(err)
		return
	}
	glog.Infof("Executing task %s %d", taskName, id)
	if err = task.Execute(); err == nil {
		glog.Infof("Completed task %s %d", taskName, id)
	} else {
		glog.Errorf("Task %s %d failed: %v", taskName, id, err)
		if !*dryRun {
			if err := updateWebappTaskSetFailed(task); err != nil {
				glog.Error(err)
			}
		}
	}
}
// LogFileInfo logs the FileInfoSLice in human readable form, namely file name and if it is a directory or not
func (s FileInfoSlice) LogFileInfo() {
	glog.Infof("Slice contains %d file elements", len(s))
	for _, fi := range s {
		glog.Infof("Name %s, Is directory: %t", fi.Name(), fi.IsDir())
	}
	glog.Info("End File Infos")
}
示例#12
0
// scheduleBuilds finds builders with no pending builds, pops the
// highest-priority builds for each from the queue, and requests builds using
// buildbucket.
func scheduleBuilds(q *build_queue.BuildQueue, bb *buildbucket.Client) error {
	errMsg := "Failed to schedule builds: %v"

	// Get the list of idle buildslaves, update the BuildQueue.
	var wg sync.WaitGroup
	var free []*buildslave
	var freeErr error
	wg.Add(1)
	go func() {
		defer wg.Done()
		free, freeErr = getFreeBuildslaves()
	}()

	var updateErr error
	wg.Add(1)
	go func() {
		defer wg.Done()
		updateErr = q.Update()
	}()

	wg.Wait()
	if updateErr != nil {
		return fmt.Errorf(errMsg, updateErr)
	}
	if freeErr != nil {
		return fmt.Errorf(errMsg, freeErr)
	}

	// Sort the list of idle buildslaves, for saner log viewing.
	sort.Sort(buildslaveSlice(free))

	// Schedule builds on free buildslaves.
	errs := []error{}
	glog.Infof("Free buildslaves:")
	for _, s := range free {
		glog.Infof("\t%s", s.Name)
		build, err := q.Pop(s.Builders)
		if err == build_queue.ERR_EMPTY_QUEUE {
			continue
		}
		if *local {
			glog.Infof("Would schedule: %s @ %s, score = %0.3f", build.Builder, build.Commit[0:7], build.Score)
		} else {
			scheduled, err := bb.RequestBuild(build.Builder, s.Master, build.Commit, build.Repo, build.Author)
			if err != nil {
				errs = append(errs, err)
			} else {
				glog.Infof("Scheduled: %s @ %s, score = %0.3f, id = %s", build.Builder, build.Commit[0:7], build.Score, scheduled.Id)
			}
		}
	}
	if len(errs) > 0 {
		errString := "Got errors when scheduling builds:"
		for _, err := range errs {
			errString += fmt.Sprintf("\n%v", err)
		}
		return fmt.Errorf(errString)
	}
	return nil
}
示例#13
0
// rasterizeOnce applies a single rastetizer to the given pdf file.
// If the rasterizer fails, use the errorImage.  If everything
// succeeds, upload the PNG.
func (xformer *pdfXformer) rasterizeOnce(pdfPath string, rasterizerIndex int) (string, error) {
	rasterizer := xformer.rasterizers[rasterizerIndex]
	tempdir := filepath.Dir(pdfPath)
	pngPath := path.Join(tempdir, fmt.Sprintf("%s.%s", rasterizer.String(), PNG_EXT))
	defer removeIfExists(pngPath)
	glog.Infof("> > > > rasterizing with %s", rasterizer)
	err := rasterizer.Rasterize(pdfPath, pngPath)
	if err != nil {
		glog.Warningf("rasterizing %s with %s failed: %s", filepath.Base(pdfPath), rasterizer.String(), err)
		return xformer.errorImageMd5, nil
	}
	md5, err := md5OfFile(pngPath)
	if err != nil {
		return "", err
	}
	f, err := os.Open(pngPath)
	if err != nil {
		return "", err
	}
	defer util.Close(f)
	pngUploadPath := fmt.Sprintf("%s/%s.%s", *storageImagesDirectory, md5, PNG_EXT)
	didUpload, err := uploadFile(xformer.client, f, *storageBucket, pngUploadPath, *accessControlEntity)
	if err != nil {
		return "", err
	}
	if didUpload {
		glog.Infof("> > > > uploaded %s", pngUploadPath)
	}
	return md5, nil
}
示例#14
0
// Fetch retrieves the file contents.
//
// Callers must call Close() on the returned io.ReadCloser.
func (b ResultsFileLocation) Fetch() (io.ReadCloser, error) {
	if strings.HasPrefix(b.URI, "file://") {
		return os.Open(b.URI[6:])
	} else {
		for i := 0; i < MAX_URI_GET_TRIES; i++ {
			glog.Infof("Fetching: %s", b.Name)
			request, err := gs.RequestForStorageURL(b.URI)
			if err != nil {
				glog.Warningf("Unable to create Storage MediaURI request: %s\n", err)
				continue
			}
			resp, err := client.Do(request)
			if err != nil {
				glog.Warningf("Unable to retrieve URI while creating file iterator: %s", err)
				continue
			}
			if resp.StatusCode != 200 {
				glog.Errorf("Failed to retrieve: %d  %s", resp.StatusCode, resp.Status)
			}
			glog.Infof("GS FETCH %s", b.URI)
			return resp.Body, nil
		}
		return nil, fmt.Errorf("Failed fetching JSON after %d attempts", MAX_URI_GET_TRIES)
	}
}
示例#15
0
// NewIngestionProcess creates a Process for ingesting data.
func NewIngestionProcess(git *gitinfo.GitInfo, tileDir, datasetName string, ri ingester.ResultIngester, config map[string]string, every time.Duration, nCommits int, minDuration time.Duration, statusDir, metricName string) ProcessStarter {
	return func() {
		i, err := ingester.NewIngester(git, tileDir, datasetName, ri, nCommits, minDuration, config, statusDir, metricName)
		if err != nil {
			glog.Fatalf("Failed to create Ingester: %s", err)
		}

		glog.Infof("Starting %s ingester. Run every %s.", datasetName, every.String())

		// oneStep is a single round of ingestion.
		oneStep := func() {
			glog.Infof("Running ingester: %s", datasetName)
			err := i.Update()
			if err != nil {
				glog.Error(err)
			}
			glog.Infof("Finished running ingester: %s", datasetName)
		}

		// Start the ingester.
		go func() {
			oneStep()
			for _ = range time.Tick(every) {
				oneStep()
			}
		}()
	}
}
// waitForAnalysis waits for files that need to be analyzed (from forAnalysis) and makes a copy of
// them in config.Aggregator.BinaryFuzzPath with their hash as a file name.
// It then analyzes it using the supplied AnalysisPackage and then signals the results should be
// uploaded. If any unrecoverable errors happen, this method terminates.
func (agg *BinaryAggregator) waitForAnalysis(identifier int, analysisPackage AnalysisPackage) {
	defer agg.pipelineWaitGroup.Done()
	defer go_metrics.GetOrRegisterCounter("analysis_process_count", go_metrics.DefaultRegistry).Dec(int64(1))
	glog.Infof("Spawning analyzer %d", identifier)

	// our own unique working folder
	executableDir := filepath.Join(config.Aggregator.ExecutablePath, fmt.Sprintf("analyzer%d", identifier))
	if err := analysisPackage.Setup(executableDir); err != nil {
		glog.Errorf("Analyzer %d terminated due to error: %s", identifier, err)
		return
	}
	for {
		select {
		case badBinaryPath := <-agg.forAnalysis:
			atomic.AddInt64(&agg.analysisCount, int64(1))
			err := agg.analysisHelper(executableDir, badBinaryPath, analysisPackage)
			if err != nil {
				glog.Errorf("Analyzer %d terminated due to error: %s", identifier, err)
				return
			}
		case <-agg.pipelineShutdown:
			glog.Infof("Analyzer %d recieved shutdown signal", identifier)
			return
		}
	}
}
示例#17
0
func main() {
	// Set up flags.
	dbConf := database.ConfigFromFlags(buildbot.PROD_DB_HOST, buildbot.PROD_DB_PORT, database.USER_ROOT, buildbot.PROD_DB_NAME, buildbot.MigrationSteps())

	// Global init to initialize glog and parse arguments.
	common.Init()

	if err := dbConf.PromptForPassword(); err != nil {
		glog.Fatal(err)
	}
	vdb, err := dbConf.NewVersionedDB()
	if err != nil {
		glog.Fatal(err)
	}

	// Get the current database version
	maxDBVersion := vdb.MaxDBVersion()
	glog.Infof("Latest database version: %d", maxDBVersion)

	dbVersion, err := vdb.DBVersion()
	if err != nil {
		glog.Fatalf("Unable to retrieve database version. Error: %s", err)
	}
	glog.Infof("Current database version: %d", dbVersion)

	if dbVersion < maxDBVersion {
		glog.Infof("Migrating to version: %d", maxDBVersion)
		err = vdb.Migrate(maxDBVersion)
		if err != nil {
			glog.Fatalf("Unable to retrieve database version. Error: %s", err)
		}
	}

	glog.Infoln("Database migration finished.")
}
// waitForUploads waits for uploadPackages to be sent through the forUpload channel
// and then uploads them.  If any unrecoverable errors happen, this method terminates.
func (agg *BinaryAggregator) waitForUploads(identifier int) {
	defer agg.pipelineWaitGroup.Done()
	defer go_metrics.GetOrRegisterCounter("upload_process_count", go_metrics.DefaultRegistry).Dec(int64(1))
	glog.Infof("Spawning uploader %d", identifier)
	for {
		select {
		case p := <-agg.forUpload:
			atomic.AddInt64(&agg.uploadCount, int64(1))
			if !agg.UploadGreyFuzzes && p.FuzzType == GREY_FUZZ {
				glog.Infof("Skipping upload of grey fuzz %s", p.Name)
				continue
			}
			if err := agg.upload(p); err != nil {
				glog.Errorf("Uploader %d terminated due to error: %s", identifier, err)
				return
			}
			agg.forBugReporting <- bugReportingPackage{
				FuzzName:   p.Name,
				CommitHash: config.Generator.SkiaVersion.Hash,
				IsBadFuzz:  p.FuzzType == BAD_FUZZ,
			}
		case <-agg.pipelineShutdown:
			glog.Infof("Uploader %d recieved shutdown signal", identifier)
			return
		}
	}
}
// scanForNewCandidates runs scanHelper once every config.Aggregator.RescanPeriod, which scans the
// config.Generator.AflOutputPath for new fuzzes.
// If scanHelper returns an error, this method will terminate.
func (agg *BinaryAggregator) scanForNewCandidates() {
	defer agg.monitoringWaitGroup.Done()

	alreadyFoundBinaries := &SortedStringSlice{}
	// time.Tick does not fire immediately, so we fire it manually once.
	if err := agg.scanHelper(alreadyFoundBinaries); err != nil {
		glog.Errorf("Scanner terminated due to error: %v", err)
		return
	}
	glog.Infof("Sleeping for %s, then waking up to find new crashes again", config.Aggregator.RescanPeriod)

	t := time.Tick(config.Aggregator.RescanPeriod)
	for {
		select {
		case <-t:
			if err := agg.scanHelper(alreadyFoundBinaries); err != nil {
				glog.Errorf("Aggregator scanner terminated due to error: %v", err)
				return
			}
			glog.Infof("Sleeping for %s, then waking up to find new crashes again", config.Aggregator.RescanPeriod)
		case <-agg.monitoringShutdown:
			glog.Info("Aggregator scanner got signal to shut down")
			return
		}

	}
}
示例#20
0
// UpdateCommitInfo finds all the new commits since the last time we ran and
// adds them to the tiles, creating new tiles if necessary.
func (i *Ingester) UpdateCommitInfo(pull bool) error {
	glog.Infof("Ingest %s: Starting UpdateCommitInfo", i.datasetName)
	if err := i.git.Update(pull, false); err != nil {
		return fmt.Errorf("Ingest %s: Failed git pull for during UpdateCommitInfo: %s", i.datasetName, err)
	}

	// Compute Git CL number for each Git hash.
	allHashes := i.git.From(time.Time(BEGINNING_OF_TIME))
	hashToNumber := map[string]int{}
	for i, h := range allHashes {
		hashToNumber[h] = i
	}
	i.hashToNumber = hashToNumber

	// Find the time of the last Commit seen.
	ts := time.Time(BEGINNING_OF_TIME)
	lastTile, err := i.tileStore.Get(0, -1)
	if err == nil && lastTile != nil {
		ts = i.lastCommitTimeInTile(lastTile)
	} else {
		// Boundary condition; just started making Tiles and none exist.
		newTile := tiling.NewTile()
		newTile.Scale = 0
		newTile.TileIndex = 0
		if err := i.tileStore.Put(0, 0, newTile); err != nil {
			return fmt.Errorf("Ingest %s: UpdateCommitInfo: Failed to write new tile: %s", i.datasetName, err)
		}
	}
	glog.Infof("Ingest %s: UpdateCommitInfo: Last commit timestamp: %s", i.datasetName, ts)

	// Find all the Git hashes that are new to us.
	newHashes := i.git.From(ts)

	glog.Infof("Ingest %s: len(newHashes): from %d", i.datasetName, len(newHashes))

	// Add Commit info to the Tiles for each new hash.
	tt := NewTileTracker(i.tileStore, i.hashToNumber)
	for _, hash := range newHashes {
		if err := tt.Move(hash); err != nil {
			glog.Errorf("UpdateCommitInfo Move(%s) failed with: %s", hash, err)
			continue
		}
		details, err := i.git.Details(hash, true)
		if err != nil {
			glog.Errorf("Failed to get details for hash: %s: %s", hash, err)
			continue
		}
		tt.Tile().Commits[tt.Offset(hash)] = &tiling.Commit{
			CommitTime: details.Timestamp.Unix(),
			Hash:       hash,
			Author:     details.Author,
		}
	}
	glog.Infof("Ingest %s: Starting to flush tile.", i.datasetName)
	tt.Flush()

	glog.Infof("Ingest %s: Finished UpdateCommitInfo", i.datasetName)
	return nil
}
示例#21
0
// tileHandler accepts URIs like /tiles/0/1
// where the URI format is /tiles/<tile-scale>/<tile-number>
//
// It returns JSON of the form:
//
//  {
//    tiles: [20],
//    scale: 0,
//    paramset: {
//      "os": ["Android", "ChromeOS", ..],
//      "arch": ["Arm7", "x86", ...],
//    },
//    commits: [
//      {
//        "commit_time": 140329432,
//        "hash": "0e03478100ea",
//        "author": "*****@*****.**",
//        "commit_msg": "The subject line of the commit.",
//      },
//      ...
//    ],
//    ticks: [
//      [1.5, "Mon"],
//      [3.5, "Tue"]
//    ],
//    skps: [
//      5, 13, 24
//    ]
//  }
//
//  Where skps are the commit indices where the SKPs were updated.
//
func tileHandler(w http.ResponseWriter, r *http.Request) {
	glog.Infof("Tile Handler: %q\n", r.URL.Path)
	handlerStart := time.Now()
	match := tileHandlerPath.FindStringSubmatch(r.URL.Path)
	if r.Method != "GET" || match == nil || len(match) != 3 {
		http.NotFound(w, r)
		return
	}
	tileScale, err := strconv.ParseInt(match[1], 10, 0)
	if err != nil {
		util.ReportError(w, r, err, "Failed parsing tile scale.")
		return
	}
	tileNumber, err := strconv.ParseInt(match[2], 10, 0)
	if err != nil {
		util.ReportError(w, r, err, "Failed parsing tile number.")
		return
	}
	glog.Infof("tile: %d %d", tileScale, tileNumber)
	tile, err := getTile(int(tileScale), int(tileNumber))
	if err != nil {
		util.ReportError(w, r, err, "Failed retrieving tile.")
		return
	}

	guiTile := tiling.NewTileGUI(tile.Scale, tile.TileIndex)
	guiTile.Commits = tile.Commits
	guiTile.ParamSet = tile.ParamSet
	// SkpCommits goes out to the git repo, add caching if this turns out to be
	// slow.
	if skps, err := git.SkpCommits(tile); err != nil {
		guiTile.Skps = []int{}
		glog.Errorf("Failed to calculate skps: %s", err)
	} else {
		guiTile.Skps = skps
	}

	ts := []int64{}
	for _, c := range tile.Commits {
		if c.CommitTime != 0 {
			ts = append(ts, c.CommitTime)
		}
	}
	glog.Infof("%#v", ts)
	guiTile.Ticks = human.FlotTickMarks(ts)

	// Marshal and send
	marshaledResult, err := json.Marshal(guiTile)
	if err != nil {
		util.ReportError(w, r, err, "Failed to marshal JSON.")
		return
	}
	w.Header().Set("Content-Type", "application/json")
	_, err = w.Write(marshaledResult)
	if err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
	}
	glog.Infoln("Total handler time: ", time.Since(handlerStart).Nanoseconds())
}
示例#22
0
// SSH connects to the specified workers and runs the specified command. If the
// command does not complete in the given duration then all remaining workers are
// considered timed out. SSH also automatically substitutes the sequential number
// of the worker for the WORKER_NUM_KEYWORD since it is a common use case.
func SSH(cmd string, workers []string, timeout time.Duration) (map[string]string, error) {
	glog.Infof("Running \"%s\" on %s with timeout of %s", cmd, workers, timeout)
	numWorkers := len(workers)

	// Ensure that the key file exists.
	key, err := getKeyFile()
	if err != nil {
		return nil, fmt.Errorf("Failed to get key file: %s", err)
	}

	// Initialize the structure with the configuration for ssh.
	config := &ssh.ClientConfig{
		User: CtUser,
		Auth: []ssh.AuthMethod{
			ssh.PublicKeys(key),
		},
	}

	var wg sync.WaitGroup
	// m protects workersWithOutputs and remainingWorkers
	var m sync.Mutex
	// Will be populated and returned by this function.
	workersWithOutputs := map[string]string{}
	// Keeps track of which workers are still pending.
	remainingWorkers := map[string]int{}

	// Kick off a goroutine on all workers.
	for i, hostname := range workers {
		wg.Add(1)
		m.Lock()
		remainingWorkers[hostname] = 1
		m.Unlock()
		go func(index int, hostname string) {
			defer wg.Done()
			updatedCmd := strings.Replace(cmd, WORKER_NUM_KEYWORD, strconv.Itoa(index+1), -1)
			output, err := executeCmd(updatedCmd, hostname, config, timeout)
			if err != nil {
				glog.Errorf("Could not execute ssh cmd: %s", err)
			}
			m.Lock()
			defer m.Unlock()
			workersWithOutputs[hostname] = output
			delete(remainingWorkers, hostname)
			glog.Infoln()
			glog.Infof("[%d/%d] Worker %s has completed execution", numWorkers-len(remainingWorkers), numWorkers, hostname)
			glog.Infof("Remaining workers: %v", remainingWorkers)
		}(i, hostname)
	}

	wg.Wait()
	glog.Infoln()
	glog.Infof("Finished running \"%s\" on all %d workers", cmd, numWorkers)
	glog.Info("========================================")

	m.Lock()
	defer m.Unlock()
	return workersWithOutputs, nil
}
示例#23
0
// update syncs the source code repository and loads any new commits.
func (c *CommitCache) update() (rv error) {
	defer timer.New("CommitCache.update()").Stop()
	glog.Info("Reloading commits.")
	if err := c.repo.Update(true, true); err != nil {
		return fmt.Errorf("Failed to update the repo: %v", err)
	}
	from := time.Time{}
	n := c.NumCommits()
	if n > 0 {
		last, err := c.Get(n - 1)
		if err != nil {
			return fmt.Errorf("Failed to get last commit: %v", err)
		}
		from = last.Timestamp
	}
	newCommitHashes := c.repo.From(from)
	glog.Infof("Processing %d new commits.", len(newCommitHashes))
	newCommits := make([]*gitinfo.LongCommit, len(newCommitHashes))
	if len(newCommitHashes) > 0 {
		for i, h := range newCommitHashes {
			d, err := c.repo.Details(h)
			if err != nil {
				return fmt.Errorf("Failed to obtain commit details for %s: %v", h, err)
			}
			newCommits[i] = d
		}
	}
	branchHeads, err := c.repo.GetBranches()
	if err != nil {
		return fmt.Errorf("Failed to read branch information from the repo: %v", err)
	}

	// Load new builds for the BuildCache.
	allCommits := append(c.Commits, newCommits...)
	buildCacheHashes := make([]string, 0, c.requestSize)
	for _, commit := range allCommits[len(allCommits)-c.requestSize:] {
		buildCacheHashes = append(buildCacheHashes, commit.Hash)
	}
	byId, byCommit, builderStatuses, err := build_cache.LoadData(buildCacheHashes)
	if err != nil {
		return fmt.Errorf("Failed to update BuildCache: %v", err)
	}

	// Update the cached values all at once at at the end.
	glog.Infof("Updating the cache.")
	// Write the cache to disk *after* unlocking it.
	defer func() {
		rv = c.toFile()
	}()
	defer timer.New("  CommitCache locked").Stop()
	c.mutex.Lock()
	defer c.mutex.Unlock()
	c.BranchHeads = branchHeads
	c.Commits = allCommits
	c.buildCache.UpdateWithData(byId, byCommit, builderStatuses)
	glog.Infof("Finished updating the cache.")
	return nil
}
示例#24
0
// ingestNewBuilds finds the set of uningested builds and ingests them.
func ingestNewBuilds(m string, repos *gitinfo.RepoMap) error {
	defer metrics.NewTimer("buildbot.ingestNewBuilds").Stop()
	glog.Infof("Ingesting builds for %s", m)
	// TODO(borenet): Investigate the use of channels here. We should be
	// able to start ingesting builds as the data becomes available rather
	// than waiting until the end.
	buildsToProcess, err := getUningestedBuilds(m)
	if err != nil {
		return fmt.Errorf("Failed to obtain the set of uningested builds: %v", err)
	}
	unfinished, err := getUnfinishedBuilds(m)
	if err != nil {
		return fmt.Errorf("Failed to obtain the set of unfinished builds: %v", err)
	}
	for _, b := range unfinished {
		if _, ok := buildsToProcess[b.Builder]; !ok {
			buildsToProcess[b.Builder] = []int{}
		}
		buildsToProcess[b.Builder] = append(buildsToProcess[b.Builder], b.Number)
	}

	if err := repos.Update(); err != nil {
		return err
	}

	// TODO(borenet): Can we ingest builders in parallel?
	errs := map[string]error{}
	for b, w := range buildsToProcess {
		for _, n := range w {
			if BUILD_BLACKLIST[b][n] {
				glog.Warningf("Skipping blacklisted build: %s # %d", b, n)
				continue
			}
			if IsTrybot(b) {
				continue
			}
			glog.Infof("Ingesting build: %s, %s, %d", m, b, n)
			build, err := retryGetBuildFromMaster(m, b, n, repos)
			if err != nil {
				errs[b] = fmt.Errorf("Failed to ingest build: %v", err)
				break
			}
			if err := IngestBuild(build, repos); err != nil {
				errs[b] = fmt.Errorf("Failed to ingest build: %v", err)
				break
			}
		}
	}
	if len(errs) > 0 {
		msg := fmt.Sprintf("Encountered errors ingesting builds for %s:", m)
		for b, err := range errs {
			msg += fmt.Sprintf("\n%s: %v", b, err)
		}
		return fmt.Errorf(msg)
	}
	glog.Infof("Done ingesting builds for %s", m)
	return nil
}
示例#25
0
func printnode(n *node, depth int) {
	glog.Infof("Node: %*s%#v\n", depth*2, "", n.Index)
	for _, f := range n.Files {
		glog.Infof("File: %*s%#v\n", (depth+1)*2, "", *f)
	}
	for _, d := range n.Dirs {
		printnode(d, depth+1)
	}
}
示例#26
0
// singleStep does a single round of alerting.
func singleStep(issueTracker issues.IssueTracker) {
	latencyBegin := time.Now()
	tile := tileBuilder.GetTile()
	summary, err := clustering.CalculateClusterSummaries(tile, CLUSTER_SIZE, CLUSTER_STDDEV, skpOnly)
	if err != nil {
		glog.Errorf("Alerting: Failed to calculate clusters: %s", err)
		return
	}
	fresh := []*types.ClusterSummary{}
	for _, c := range summary.Clusters {
		if math.Abs(c.StepFit.Regression) > clustering.INTERESTING_THRESHHOLD {
			fresh = append(fresh, c)
		}
	}
	old, err := ListFrom(tile.Commits[0].CommitTime)
	if err != nil {
		glog.Errorf("Alerting: Failed to get existing clusters: %s", err)
		return
	}
	glog.Infof("Found %d old", len(old))
	glog.Infof("Found %d fresh", len(fresh))
	updated := CombineClusters(fresh, old)
	glog.Infof("Found %d to update", len(updated))

	for _, c := range updated {
		if c.Status == "" {
			c.Status = "New"
		}
		if err := Write(c); err != nil {
			glog.Errorf("Alerting: Failed to write updated cluster: %s", err)
		}
	}

	current, err := ListFrom(tile.Commits[0].CommitTime)
	if err != nil {
		glog.Errorf("Alerting: Failed to get existing clusters: %s", err)
		return
	}
	count := 0
	for _, c := range current {
		if c.Status == "New" {
			count++
		}
		if issueTracker != nil {
			if err := updateBugs(c, issueTracker); err != nil {
				glog.Errorf("Error retrieving bugs: %s", err)
				return
			}
		} else {
			glog.Infof("Skipping ClusterSummary.Bugs update because apiKey is missing.")
			return
		}
	}
	newClustersGauge.Update(int64(count))
	runsCounter.Inc(1)
	alertingLatency.UpdateSince(latencyBegin)
}
示例#27
0
func handleAlert(alertId int64, comment string, until int, w http.ResponseWriter, r *http.Request) {
	email := login.LoggedInAs(r)
	if !userHasEditRights(email) {
		util.ReportError(w, r, fmt.Errorf("User does not have edit rights."), "You must be logged in to an account with edit rights to do that.")
		return
	}

	action, ok := mux.Vars(r)["action"]
	if !ok {
		util.ReportError(w, r, fmt.Errorf("No action provided."), "No action provided.")
		return
	}

	if action == "dismiss" {
		glog.Infof("%s %d", action, alertId)
		if err := alertManager.Dismiss(alertId, email, comment); err != nil {
			util.ReportError(w, r, err, "Failed to dismiss alert.")
			return
		}
		return
	} else if action == "snooze" {
		if until == 0 {
			util.ReportError(w, r, fmt.Errorf("Invalid snooze time."), fmt.Sprintf("Invalid snooze time"))
			return
		}
		until := time.Unix(int64(until), 0)
		glog.Infof("%s %d until %v", action, alertId, until.String())
		if err := alertManager.Snooze(alertId, until, email, comment); err != nil {
			util.ReportError(w, r, err, "Failed to snooze alert.")
			return
		}
		return
	} else if action == "unsnooze" {
		glog.Infof("%s %d", action, alertId)
		if err := alertManager.Unsnooze(alertId, email, comment); err != nil {
			util.ReportError(w, r, err, "Failed to unsnooze alert.")
			return
		}
		return
	} else if action == "addcomment" {
		if !StringIsInteresting(comment) {
			util.ReportError(w, r, fmt.Errorf("Invalid comment text."), comment)
			return
		}
		glog.Infof("%s %d: %s", action, alertId, comment)
		if err := alertManager.AddComment(alertId, email, comment); err != nil {
			util.ReportError(w, r, err, "Failed to add comment.")
			return
		}
		return
	} else {
		util.ReportError(w, r, fmt.Errorf("Invalid action %s", action), "The requested action is invalid.")
		return
	}

}
示例#28
0
// singleHandler is similar to /query/0/-1/traces?<param filters>, but takes an
// optional commit hash and returns a single value for each trace at that commit,
// or the latest value if a hash is not given or found. The resulting JSON is in
// SingleResponse format that looks like:
//
//  {
//    "traces": [
//      {
//        val: 1.1,
//        params: {"os: "Android", ...}
//      },
//      ...
//    ],
//    "hash": "abc123",
//  }
//
func singleHandler(w http.ResponseWriter, r *http.Request) {
	glog.Infof("Single Handler: %q\n", r.URL.Path)
	handlerStart := time.Now()
	match := singleHandlerPath.FindStringSubmatch(r.URL.Path)
	if r.Method != "GET" || match == nil || len(match) != 2 {
		http.NotFound(w, r)
		return
	}
	if err := r.ParseForm(); err != nil {
		util.ReportError(w, r, err, "Failed to parse query params.")
	}
	hash := match[1]

	tileNum, idx, err := git.TileAddressFromHash(hash, time.Time(ingester.BEGINNING_OF_TIME))
	if err != nil {
		glog.Infof("Did not find hash '%s', use latest: %q.\n", hash, err)
		tileNum = -1
		idx = -1
	}
	glog.Infof("Hash: %s tileNum: %d, idx: %d\n", hash, tileNum, idx)
	tile, err := getTile(0, tileNum)
	if err != nil {
		util.ReportError(w, r, err, "Failed retrieving tile.")
		return
	}

	if idx < 0 {
		idx = len(tile.Commits) - 1 // Defaults to the last slice element.
	}
	glog.Infof("Tile: %d; Idx: %d\n", tileNum, idx)

	ret := SingleResponse{
		Traces: []*SingleTrace{},
		Hash:   tile.Commits[idx].Hash,
	}
	for _, tr := range tile.Traces {
		if tiling.Matches(tr, r.Form) {
			v, err := vec.FillAt(tr.(*types.PerfTrace).Values, idx)
			if err != nil {
				util.ReportError(w, r, err, "Error while getting value at slice index.")
				return
			}
			t := &SingleTrace{
				Val:    v,
				Params: tr.Params(),
			}
			ret.Traces = append(ret.Traces, t)
		}
	}
	w.Header().Set("Content-Type", "application/json")
	enc := json.NewEncoder(w)
	if err := enc.Encode(ret); err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
	}
	glog.Infoln("Total handler time: ", time.Since(handlerStart).Nanoseconds())
}
// UpdateToNewSkiaVersion runs a series of commands to update the fuzzer to a new Skia Version.
// It will stop the Generator, pause the Aggregator, update to the
// new version, re-scan all previous fuzzes and then start the Generator and the Aggregator
// again.  It re-uses the Aggregator pipeline to do the re-analysis.
func (v *VersionUpdater) UpdateToNewSkiaVersion(newHash string) (*vcsinfo.LongCommit, error) {
	oldHash := config.Generator.SkiaVersion.Hash
	// stop afl-fuzz
	StopBinaryGenerator()

	// sync skia to version, which sets config.Generator.SkiaVersion
	if err := common.DownloadSkia(newHash, config.Generator.SkiaRoot, &config.Generator); err != nil {
		return nil, fmt.Errorf("Could not sync skia to %s: %s", newHash, err)
	}

	// download all bad and grey fuzzes
	badFuzzNames, greyFuzzNames, err := v.downloadAllBadAndGreyFuzzes(oldHash, config.Aggregator.BinaryFuzzPath)
	if err != nil {
		return nil, fmt.Errorf("Problem downloading all previous fuzzes: %s", err)
	}
	glog.Infof("There are %d badFuzzNames and %d greyFuzzNames to rescan.", len(badFuzzNames), len(greyFuzzNames))
	// This is a soft shutdown, i.e. it waits for aggregator's queues to be empty
	v.agg.ShutDown()
	if err := ClearBinaryGenerator(); err != nil {
		return nil, fmt.Errorf("Could not remove previous afl-fuzz results: %s", err)
	}

	if err := v.agg.RestartAnalysis(); err != nil {
		return nil, fmt.Errorf("Had problem restarting analysis/upload chain: %s", err)
	}
	// Reanalyze and reupload the fuzzes, making a bug on regressions.
	glog.Infof("Reanalyzing bad fuzzes")
	v.agg.MakeBugOnBadFuzz = false
	v.agg.UploadGreyFuzzes = true
	for _, name := range badFuzzNames {
		v.agg.ForceAnalysis(name)
	}
	v.agg.WaitForEmptyQueues()
	glog.Infof("Reanalyzing grey fuzzes")
	v.agg.MakeBugOnBadFuzz = true
	for _, name := range greyFuzzNames {
		v.agg.ForceAnalysis(name)
	}
	v.agg.WaitForEmptyQueues()
	v.agg.MakeBugOnBadFuzz = false
	v.agg.UploadGreyFuzzes = false
	glog.Infof("Done reanalyzing")

	// redownload samples (in case any are new)
	if err := DownloadBinarySeedFiles(v.storageClient); err != nil {
		return nil, fmt.Errorf("Could not download binary seed files: %s", err)
	}
	// change GCS version to have the current be up to date (fuzzer-fe will need to see that with its polling)
	if err := v.replaceCurrentSkiaVersionWith(oldHash, config.Generator.SkiaVersion.Hash); err != nil {
		return nil, fmt.Errorf("Could not update skia error: %s", err)
	}

	// restart afl-fuzz
	return config.Generator.SkiaVersion, StartBinaryGenerator()
}
示例#30
0
// UploadWorkerArtifacts uploads artifacts from a local dir to Google Storage.
func (gs *GsUtil) UploadWorkerArtifacts(dirName, pagesetType string, workerNum int) error {
	localDir := filepath.Join(StorageDir, dirName, pagesetType)
	gsDir := filepath.Join(dirName, pagesetType, fmt.Sprintf("slave%d", workerNum))

	if equal, _ := gs.AreTimeStampsEqual(localDir, gsDir); equal {
		glog.Infof("Not uploading %s because TIMESTAMPS match", localDir)
		return nil
	}
	glog.Infof("Timestamps between %s and %s are different. Uploading to Google Storage", localDir, gsDir)
	return gs.UploadDir(localDir, gsDir, true)
}