Пример #1
0
// Add, see the diff.LRUCache interface for details.
func (c *RedisLRUCache) Add(key, value interface{}) bool {
	prefixedKey, rawKey, err := c.encodeKey(key)
	if err != nil {
		glog.Errorf("Unable to create redis key: %s", err)
		return false
	}

	byteVal, err := c.encodeVal(value)
	if err != nil {
		glog.Errorf("Unable to create redis value: %s", err)
		return false
	}

	conn := c.pool.Get()
	defer util.Close(conn)

	util.LogErr(conn.Send("MULTI"))
	util.LogErr(conn.Send("SET", prefixedKey, byteVal))
	util.LogErr(conn.Send("SADD", c.indexSetKey, rawKey))
	_, err = conn.Do("EXEC")
	if err != nil {
		glog.Errorf("Unable to add key: %s", err)
		return false
	}
	return true
}
Пример #2
0
// DownloadBinarySeedFiles downloads the seed skp files stored in Google
// Storage to be used by afl-fuzz.  It places them in
// config.Generator.FuzzSamples after cleaning the folder out.
// It returns an error on failure.
func DownloadBinarySeedFiles(storageClient *storage.Client) error {
	if err := os.RemoveAll(config.Generator.FuzzSamples); err != nil && !os.IsNotExist(err) {
		return fmt.Errorf("Could not clean binary seed path %s: %s", config.Generator.FuzzSamples, err)
	}
	if err := os.MkdirAll(config.Generator.FuzzSamples, 0755); err != nil {
		return fmt.Errorf("Could not create binary seed path %s: %s", config.Generator.FuzzSamples, err)
	}

	err := gs.AllFilesInDir(storageClient, config.GS.Bucket, "skp_samples", func(item *storage.ObjectAttrs) {
		name := item.Name
		// skip the parent folder
		if name == "skp_samples/" {
			return
		}
		content, err := gs.FileContentsFromGS(storageClient, config.GS.Bucket, name)
		if err != nil {
			glog.Errorf("Problem downloading %s from Google Storage, continuing anyway", item.Name)
			return
		}
		fileName := filepath.Join(config.Generator.FuzzSamples, strings.SplitAfter(name, "skp_samples/")[1])
		if err = ioutil.WriteFile(fileName, content, 0644); err != nil && !os.IsExist(err) {
			glog.Errorf("Problem creating binary seed file %s, continuing anyway", fileName)
		}
	})
	return err
}
Пример #3
0
func _main(ts db.DB) {
	week := time.Hour * 24 * 7
	commits, err := ts.List(time.Now().Add(-week), time.Now())
	if err != nil {
		glog.Errorf("Failed to load commits: %s", err)
		return
	}
	if len(commits) > 50 {
		commits = commits[:50]
	}

	begin := time.Now()
	_, _, err = ts.TileFromCommits(commits)
	if err != nil {
		glog.Errorf("Failed to load Tile: %s", err)
		return
	}
	glog.Infof("Time to load tile: %v", time.Now().Sub(begin))
	// Now load a second time.
	begin = time.Now()
	_, _, err = ts.TileFromCommits(commits)
	if err != nil {
		glog.Errorf("Failed to load Tile: %s", err)
		return
	}
	glog.Infof("Time to load tile the second time: %v", time.Now().Sub(begin))
}
Пример #4
0
func getPreviousState() (map[string]fileState, map[string]int64, map[string]int64, time.Time, error) {
	if _, err := os.Stat(*stateFile); os.IsNotExist(err) {
		// State file does not exist, return empty values.
		return map[string]fileState{}, map[string]int64{}, map[string]int64{}, time.Time{}, nil
	}
	f, err := os.Open(*stateFile)
	if err != nil {
		glog.Errorf("Failed to open old state file %s for reading: %s", *stateFile, err)
		// Delete it and return empty values.
		if err := os.Remove(*stateFile); err != nil {
			return nil, nil, nil, time.Time{}, fmt.Errorf("Could not delete old state file %s: %s", *stateFile, err)
		}
		glog.Errorf("Deleted old state file %s", *stateFile)
		return map[string]fileState{}, map[string]int64{}, map[string]int64{}, time.Time{}, nil
	}
	defer util.Close(f)
	state := &logserverState{}
	dec := gob.NewDecoder(f)
	if err := dec.Decode(state); err != nil {
		glog.Errorf("Failed to decode old state file %s: %s", *stateFile, err)
		// Delete it and return empty values.
		if err := os.Remove(*stateFile); err != nil {
			return nil, nil, nil, time.Time{}, fmt.Errorf("Could not delete old state file %s: %s", *stateFile, err)
		}
		glog.Errorf("Deleted old state file %s", *stateFile)
		return map[string]fileState{}, map[string]int64{}, map[string]int64{}, time.Time{}, nil
	}
	return state.FilesToState, state.AppLogLevelToSpace, state.AppLogLevelToCount, state.LastCompletedRun, nil
}
Пример #5
0
func (e *EventBus) publishEvent(topic string, arg interface{}, globally bool) {
	e.mutex.Lock()
	defer e.mutex.Unlock()
	if th, ok := e.handlers[topic]; ok {
		for _, callback := range th.callbacks {
			th.wg.Add(1)
			go func(callback CallbackFn) {
				defer th.wg.Done()
				callback(arg)
			}(callback)
		}
	}

	if globally {
		if eventCodec, ok := globalEvents[topic]; ok {
			go func() {
				byteData, err := eventCodec.Encode(arg)
				if err != nil {
					glog.Errorf("Unable to encode event data for topic %s:  %s", topic, err)
					return
				}

				if err := e.globalEventBus.Publish(topic, byteData); err != nil {
					glog.Errorf("Unable to publish global event for topic %s:  %s", topic, err)
				}
			}()
		}
	}
}
Пример #6
0
func (p FileInfoModifiedSlice) Less(i, j int) bool {
	iFileInfo := p.fileInfos[i]
	iModTime := iFileInfo.ModTime()
	if iFileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
		destFileInfo, err := getSymlinkFileInfo(iFileInfo)
		if err != nil {
			glog.Errorf("Could not follow %s: %s", iFileInfo.Name(), err)
		} else {
			iModTime = destFileInfo.ModTime()
		}
	}

	jFileInfo := p.fileInfos[j]
	jModTime := jFileInfo.ModTime()
	if jFileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
		destFileInfo, err := getSymlinkFileInfo(jFileInfo)
		if err != nil {
			glog.Errorf("Could not follow %s: %s", jFileInfo.Name(), err)
		} else {
			jModTime = destFileInfo.ModTime()
		}
	}

	if p.reverseSort {
		return iModTime.After(jModTime)
	} else {
		return iModTime.Before(jModTime)
	}
}
Пример #7
0
// performAnalysis waits for files that need to be analyzed (from forAnalysis) and makes a copy of
// them in config.Aggregator.BinaryFuzzPath with their hash as a file name.
// It then analyzes it using the supplied AnalysisPackage and then signals the results should be
// uploaded. If any unrecoverable errors happen, this method terminates.
func performAnalysis(identifier int, analysisPackage AnalysisPackage, forAnalysis <-chan string, forUpload chan<- uploadPackage, terminated chan<- string) {
	glog.Infof("Spawning analyzer %d", identifier)
	prepareForExit := func(err error) {
		glog.Errorf("Analyzer %d terminated due to error: %s", identifier, err)
		terminated <- fmt.Sprintf("analyzer%d", identifier)
	}
	// our own unique working folder
	executableDir := filepath.Join(config.Aggregator.ExecutablePath, fmt.Sprintf("analyzer%d", identifier))

	if err := analysisPackage.Setup(executableDir); err != nil {
		prepareForExit(err)
		return
	}

	for {
		badBinaryPath := <-forAnalysis
		hash, data, err := calculateHash(badBinaryPath)
		if err != nil {
			prepareForExit(err)
			return
		}
		newFuzzPath := filepath.Join(config.Aggregator.BinaryFuzzPath, hash)
		if err := ioutil.WriteFile(newFuzzPath, data, 0644); err != nil {
			prepareForExit(err)
			return
		}
		if upload, err := analysisPackage.Analyze(executableDir, hash); err != nil {
			glog.Errorf("Problem analyzing %s", newFuzzPath)
			prepareForExit(err)
			return
		} else {
			forUpload <- upload
		}
	}
}
Пример #8
0
// processTileStream processes the first tile instantly and starts a background
// process to recalculate the blame lists as tiles and expectations change.
func (b *Blamer) processTileStream() error {
	expChanges := make(chan []string)
	b.storages.EventBus.SubscribeAsync(expstorage.EV_EXPSTORAGE_CHANGED, func(e interface{}) {
		expChanges <- e.([]string)
	})
	tileStream := b.storages.GetTileStreamNow(2*time.Minute, false)

	lastTile := <-tileStream
	if err := b.updateBlame(lastTile); err != nil {
		return err
	}

	// Schedule a background process to keep updating the blame lists.
	go func() {
		for {
			select {
			case tile := <-tileStream:
				if err := b.updateBlame(tile); err != nil {
					glog.Errorf("Error updating blame lists: %s", err)
				} else {
					lastTile = tile
				}
			case <-expChanges:
				storage.DrainChangeChannel(expChanges)
				if err := b.updateBlame(lastTile); err != nil {
					glog.Errorf("Error updating blame lists: %s", err)
				}
			}
		}
	}()

	return nil
}
Пример #9
0
func (h *historian) backFillDigestInfo(tilesToBackfill int) {
	go func() {
		// Get the first tile and determine the tile id of the first tile
		var err error
		lastTile, err := h.storages.TileStore.Get(0, -1)
		if err != nil {
			glog.Errorf("Unable to retrieve last tile. Quiting backfill. Error: %s", err)
			return
		}

		var tile *tiling.Tile
		firstTileIndex := util.MaxInt(lastTile.TileIndex-tilesToBackfill+1, 0)
		for idx := firstTileIndex; idx <= lastTile.TileIndex; idx++ {
			if tile, err = h.storages.TileStore.Get(0, idx); err != nil {
				glog.Errorf("Unable to read tile %d from tile store. Quiting backfill. Error: %s", idx, err)
				return
			}

			// Process the tile, but giving higher priority to digests from the
			// latest tile.
			if err = h.processTile(tile); err != nil {
				glog.Errorf("Error processing tile: %s", err)
			}

			// Read the last tile, just to make sure it has not changed.
			if lastTile, err = h.storages.TileStore.Get(0, -1); err != nil {
				glog.Errorf("Unable to retrieve last tile. Quiting backfill. Error: %s", err)
				return
			}
		}
	}()
}
Пример #10
0
// polyAllHashesHandler returns the list of all hashes we currently know about regardless of triage status.
//
// Endpoint used by the Android buildbots to avoid transferring already known images.
func polyAllHashesHandler(w http.ResponseWriter, r *http.Request) {
	unavailableDigests := storages.DiffStore.UnavailableDigests()

	byTest := tallies.ByTest()
	hashes := map[string]bool{}
	for _, test := range byTest {
		for k, _ := range test {
			if _, ok := unavailableDigests[k]; !ok {
				hashes[k] = true
			}
		}
	}

	w.Header().Set("Content-Type", "text/plain")
	for k, _ := range hashes {
		if _, err := w.Write([]byte(k)); err != nil {
			glog.Errorf("Failed to write or encode result: %s", err)
			return
		}
		if _, err := w.Write([]byte("\n")); err != nil {
			glog.Errorf("Failed to write or encode result: %s", err)
			return
		}
	}
}
Пример #11
0
func writeOutAllSourceImages() {
	// Pull all the source images from the db and write them out to inout.
	rows, err := db.Query("SELECT id, image, create_ts FROM source_images ORDER BY create_ts DESC")
	if err != nil {
		glog.Errorf("Failed to open connection to SQL server: %q\n", err)
		panic(err)
	}
	defer util.Close(rows)
	for rows.Next() {
		var id int
		var image []byte
		var create_ts time.Time
		if err := rows.Scan(&id, &image, &create_ts); err != nil {
			glog.Errorf("failed to fetch from database: %q", err)
			continue
		}
		filename := fmt.Sprintf(filepath.Join(config.Fiddle.InoutPath, "image-%d.png"), id)
		if _, err := os.Stat(filename); os.IsExist(err) {
			glog.Infof("Skipping write since file exists: %q", filename)
			continue
		}
		if err := ioutil.WriteFile(filename, image, 0666); err != nil {
			glog.Errorf("failed to write image file: %q", err)
		}
	}
}
// scanForNewCandidates runs scanHelper once every config.Aggregator.RescanPeriod, which scans the
// config.Generator.AflOutputPath for new fuzzes.
// If scanHelper returns an error, this method will terminate.
func (agg *BinaryAggregator) scanForNewCandidates() {
	defer agg.monitoringWaitGroup.Done()

	alreadyFoundBinaries := &SortedStringSlice{}
	// time.Tick does not fire immediately, so we fire it manually once.
	if err := agg.scanHelper(alreadyFoundBinaries); err != nil {
		glog.Errorf("Scanner terminated due to error: %v", err)
		return
	}
	glog.Infof("Sleeping for %s, then waking up to find new crashes again", config.Aggregator.RescanPeriod)

	t := time.Tick(config.Aggregator.RescanPeriod)
	for {
		select {
		case <-t:
			if err := agg.scanHelper(alreadyFoundBinaries); err != nil {
				glog.Errorf("Aggregator scanner terminated due to error: %v", err)
				return
			}
			glog.Infof("Sleeping for %s, then waking up to find new crashes again", config.Aggregator.RescanPeriod)
		case <-agg.monitoringShutdown:
			glog.Info("Aggregator scanner got signal to shut down")
			return
		}

	}
}
Пример #13
0
func AttemptLoadCertFromMetadata() {
	if *certChainFile == "" {
		// Try loading from GCE project level metadata.
		certChainContents, err := metadata.ProjectGet("ca_cert_chain")
		if err != nil {
			glog.Errorf("Failed to load ca_cert_chain from metadata: %s", err)
			return
		}
		keyContents, err := metadata.ProjectGet("ca_key")
		if err != nil {
			glog.Errorf("Failed to load ca_key from metadata: %s", err)
			return
		}
		fullCertChainFilename := filepath.Join(*workDir, CA_CERT_CHAIN_FILENAME)
		fullKeyFilename := filepath.Join(*workDir, CA_KEY_FILENAME)
		if err := ioutil.WriteFile(fullCertChainFilename, []byte(certChainContents), 0600); err != nil {
			glog.Errorf("Failed to write %s: %s", fullCertChainFilename, err)
			return
		}
		if err := ioutil.WriteFile(fullKeyFilename, []byte(keyContents), 0600); err != nil {
			glog.Errorf("Failed to write %s: %s", fullKeyFilename, err)
			return
		}
		*keyFile = fullKeyFilename
		*certChainFile = fullCertChainFilename
		glog.Infof("SUCCESS: Loaded cert from metadata.")
	}
}
Пример #14
0
func main() {
	defer common.LogPanic()
	common.Init()

	args := flag.Args()
	if len(args) != 3 {
		glog.Errorf("Expected arguments: branch target buildID")
		glog.Errorf("i.e.:  git_master-skia razor-userdebug 1772442")
		os.Exit(1)
	}

	// Set the arguments necessary to lookup the git hash.
	branch := args[0]
	target := args[1]
	buildID := args[2]
	glog.Infof("Branch, target, buildID: %s, %s, %s", branch, target, buildID)

	// Set up the oauth client.
	var client *http.Client
	var err error

	// In this case we don't want a backoff transport since the Apiary backend
	// seems to fail a lot, so we basically want to fall back to polling if a
	// call fails.
	transport := &http.Transport{
		Dial: util.DialTimeout,
	}

	if *local {
		// Use a local client secret file to load data.
		client, err = auth.InstalledAppClient(OAUTH_CACHE_FILEPATH, CLIENT_SECRET_FILEPATH,
			transport,
			androidbuildinternal.AndroidbuildInternalScope,
			storage.CloudPlatformScope)
		if err != nil {
			glog.Fatalf("Unable to create installed app oauth client:%s", err)
		}
	} else {
		// Use compute engine service account.
		client = auth.GCEServiceAccountClient(transport)
	}

	f, err := androidbuild.New("/tmp/android-gold-ingest", client)
	if err != nil {
		glog.Fatalf("Failed to construct client: %s", err)
	}
	for {
		r, err := f.Get(branch, target, buildID)
		if err != nil {
			glog.Errorf("Failed to get requested info: %s", err)
			time.Sleep(1 * time.Minute)
			continue
		}
		if r != nil {
			glog.Infof("Successfully found: %#v", *r)
		}

		time.Sleep(1 * time.Minute)
	}
}
Пример #15
0
// buildDM builds the test harness for parsing skp (and other) files.
// First it creates a hard link for the gyp and cpp files. The gyp file is linked into Skia's gyp folder and the cpp file is linked into SKIA_ROOT/../fuzzer_cache/src, which is where the gyp file is configured to point.
// Then it activates Skia's gyp command, which creates the build (ninja) files.
// Finally, it runs those build files.
// If any step fails in unexpected ways, it returns an error.
func buildDM(buildType string, isClean bool) error {
	glog.Infof("Building %s dm", buildType)

	// Change directory to skia folder
	if err := os.Chdir(config.Generator.SkiaRoot); err != nil {
		return err
	}

	// clean previous build if specified
	buildLocation := filepath.Join("out", buildType)
	if isClean {
		if err := os.RemoveAll(buildLocation); err != nil {
			return err
		}
	}

	// run gyp
	if message, err := exec.RunSimple("./gyp_skia"); err != nil {
		glog.Errorf("Failed gyp message: %s", message)
		return err
	}

	// run ninja
	cmd := fmt.Sprintf("ninja -C %s dm", buildLocation)
	if message, err := exec.RunSimple(cmd); err != nil {
		glog.Errorf("Failed ninja message: %s", message)
		return err
	}
	return nil
}
Пример #16
0
// StartCleaner is a process that periodically checks the status of every issue
// that has been previewed and removes all the local files for closed issues.
func StartCleaner(workDir string) {
	glog.Info("Starting Cleaner")
	c := reitveld.NewClient()
	for _ = range time.Tick(config.REFRESH) {
		matches, err := filepath.Glob(workDir + "/patches/*")
		glog.Infof("Matches: %v", matches)
		if err != nil {
			glog.Errorf("Failed to retrieve list of patched checkouts: %s", err)
			continue
		}
		for _, filename := range matches {
			_, file := filepath.Split(filename)
			glog.Info(file)
			m := issueAndPatch.FindStringSubmatch(file)
			if len(m) < 2 {
				continue
			}
			issue, err := strconv.ParseInt(m[1], 10, 64)
			if err != nil {
				glog.Errorf("Failed to parse %q as int: %s", m[1], err)
				continue
			}
			issueInfo, err := c.Issue(issue)
			if err != nil {
				glog.Errorf("Failed to retrieve issue status %d: %s", issue, err)
			}
			if issueInfo.Closed {
				if err := os.RemoveAll(filename); err != nil {
					glog.Errorf("Failed to remove %q: %s", filename, err)
				}
			}
		}
	}
}
// waitForAnalysis waits for files that need to be analyzed (from forAnalysis) and makes a copy of
// them in config.Aggregator.BinaryFuzzPath with their hash as a file name.
// It then analyzes it using the supplied AnalysisPackage and then signals the results should be
// uploaded. If any unrecoverable errors happen, this method terminates.
func (agg *BinaryAggregator) waitForAnalysis(identifier int, analysisPackage AnalysisPackage) {
	defer agg.pipelineWaitGroup.Done()
	defer go_metrics.GetOrRegisterCounter("analysis_process_count", go_metrics.DefaultRegistry).Dec(int64(1))
	glog.Infof("Spawning analyzer %d", identifier)

	// our own unique working folder
	executableDir := filepath.Join(config.Aggregator.ExecutablePath, fmt.Sprintf("analyzer%d", identifier))
	if err := analysisPackage.Setup(executableDir); err != nil {
		glog.Errorf("Analyzer %d terminated due to error: %s", identifier, err)
		return
	}
	for {
		select {
		case badBinaryPath := <-agg.forAnalysis:
			atomic.AddInt64(&agg.analysisCount, int64(1))
			err := agg.analysisHelper(executableDir, badBinaryPath, analysisPackage)
			if err != nil {
				glog.Errorf("Analyzer %d terminated due to error: %s", identifier, err)
				return
			}
		case <-agg.pipelineShutdown:
			glog.Infof("Analyzer %d recieved shutdown signal", identifier)
			return
		}
	}
}
Пример #18
0
func runBenchmark(fileInfoName, pathToPagesets, pathToPyFiles, localOutputDir, chromiumBuildName, chromiumBinary, runID, browserExtraArgs string) error {
	pagesetBaseName := filepath.Base(fileInfoName)
	if pagesetBaseName == util.TIMESTAMP_FILE_NAME || filepath.Ext(pagesetBaseName) == ".pyc" {
		// Ignore timestamp files and .pyc files.
		return nil
	}

	// Convert the filename into a format consumable by the run_benchmarks
	// binary.
	pagesetName := strings.TrimSuffix(pagesetBaseName, filepath.Ext(pagesetBaseName))
	pagesetPath := filepath.Join(pathToPagesets, fileInfoName)

	glog.Infof("===== Processing %s for %s =====", pagesetPath, runID)

	skutil.LogErr(os.Chdir(pathToPyFiles))
	args := []string{
		util.BINARY_RUN_BENCHMARK,
		fmt.Sprintf("%s.%s", *benchmarkName, util.BenchmarksToPagesetName[*benchmarkName]),
		"--page-set-name=" + pagesetName,
		"--page-set-base-dir=" + pathToPagesets,
		"--also-run-disabled-tests",
	}

	// Need to capture output for all benchmarks.
	outputDirArgValue := filepath.Join(localOutputDir, pagesetName)
	args = append(args, "--output-dir="+outputDirArgValue)
	// Figure out which browser should be used.
	if *targetPlatform == util.PLATFORM_ANDROID {
		if err := installChromeAPK(chromiumBuildName); err != nil {
			return fmt.Errorf("Error while installing APK: %s", err)
		}
		args = append(args, "--browser=android-chrome-shell")
	} else {
		args = append(args, "--browser=exact", "--browser-executable="+chromiumBinary)
	}
	// Split benchmark args if not empty and append to args.
	if *benchmarkExtraArgs != "" {
		for _, benchmarkArg := range strings.Split(*benchmarkExtraArgs, " ") {
			args = append(args, benchmarkArg)
		}
	}
	// Add the number of times to repeat.
	args = append(args, fmt.Sprintf("--page-repeat=%d", *repeatBenchmark))
	// Add browserArgs if not empty to args.
	if browserExtraArgs != "" {
		args = append(args, "--extra-browser-args="+browserExtraArgs)
	}
	// Set the PYTHONPATH to the pagesets and the telemetry dirs.
	env := []string{
		fmt.Sprintf("PYTHONPATH=%s:%s:%s:$PYTHONPATH", pathToPagesets, util.TelemetryBinariesDir, util.TelemetrySrcDir),
		"DISPLAY=:0",
	}
	timeoutSecs := util.PagesetTypeToInfo[*pagesetType].RunChromiumPerfTimeoutSecs
	if err := util.ExecuteCmd("python", args, env, time.Duration(timeoutSecs)*time.Second, nil, nil); err != nil {
		glog.Errorf("Run benchmark command failed with: %s", err)
		glog.Errorf("Killing all running chrome processes in case there is a non-recoverable error.")
		skutil.LogErr(util.ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, 5*time.Minute, nil, nil))
	}
	return nil
}
Пример #19
0
// tileHandler accepts URIs like /tiles/0/1
// where the URI format is /tiles/<tile-scale>/<tile-number>
//
// It returns JSON of the form:
//
//  {
//    tiles: [20],
//    scale: 0,
//    paramset: {
//      "os": ["Android", "ChromeOS", ..],
//      "arch": ["Arm7", "x86", ...],
//    },
//    commits: [
//      {
//        "commit_time": 140329432,
//        "hash": "0e03478100ea",
//        "author": "*****@*****.**",
//        "commit_msg": "The subject line of the commit.",
//      },
//      ...
//    ],
//    ticks: [
//      [1.5, "Mon"],
//      [3.5, "Tue"]
//    ],
//    skps: [
//      5, 13, 24
//    ]
//  }
//
//  Where skps are the commit indices where the SKPs were updated.
//
func tileHandler(w http.ResponseWriter, r *http.Request) {
	glog.Infof("Tile Handler: %q\n", r.URL.Path)
	handlerStart := time.Now()
	match := tileHandlerPath.FindStringSubmatch(r.URL.Path)
	if r.Method != "GET" || match == nil || len(match) != 3 {
		http.NotFound(w, r)
		return
	}
	tileScale, err := strconv.ParseInt(match[1], 10, 0)
	if err != nil {
		util.ReportError(w, r, err, "Failed parsing tile scale.")
		return
	}
	tileNumber, err := strconv.ParseInt(match[2], 10, 0)
	if err != nil {
		util.ReportError(w, r, err, "Failed parsing tile number.")
		return
	}
	glog.Infof("tile: %d %d", tileScale, tileNumber)
	tile, err := getTile(int(tileScale), int(tileNumber))
	if err != nil {
		util.ReportError(w, r, err, "Failed retrieving tile.")
		return
	}

	guiTile := tiling.NewTileGUI(tile.Scale, tile.TileIndex)
	guiTile.Commits = tile.Commits
	guiTile.ParamSet = tile.ParamSet
	// SkpCommits goes out to the git repo, add caching if this turns out to be
	// slow.
	if skps, err := git.SkpCommits(tile); err != nil {
		guiTile.Skps = []int{}
		glog.Errorf("Failed to calculate skps: %s", err)
	} else {
		guiTile.Skps = skps
	}

	ts := []int64{}
	for _, c := range tile.Commits {
		if c.CommitTime != 0 {
			ts = append(ts, c.CommitTime)
		}
	}
	glog.Infof("%#v", ts)
	guiTile.Ticks = human.FlotTickMarks(ts)

	// Marshal and send
	marshaledResult, err := json.Marshal(guiTile)
	if err != nil {
		util.ReportError(w, r, err, "Failed to marshal JSON.")
		return
	}
	w.Header().Set("Content-Type", "application/json")
	_, err = w.Write(marshaledResult)
	if err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
	}
	glog.Infoln("Total handler time: ", time.Since(handlerStart).Nanoseconds())
}
Пример #20
0
// UpdateCommitInfo finds all the new commits since the last time we ran and
// adds them to the tiles, creating new tiles if necessary.
func (i *Ingester) UpdateCommitInfo(pull bool) error {
	glog.Infof("Ingest %s: Starting UpdateCommitInfo", i.datasetName)
	if err := i.git.Update(pull, false); err != nil {
		return fmt.Errorf("Ingest %s: Failed git pull for during UpdateCommitInfo: %s", i.datasetName, err)
	}

	// Compute Git CL number for each Git hash.
	allHashes := i.git.From(time.Time(BEGINNING_OF_TIME))
	hashToNumber := map[string]int{}
	for i, h := range allHashes {
		hashToNumber[h] = i
	}
	i.hashToNumber = hashToNumber

	// Find the time of the last Commit seen.
	ts := time.Time(BEGINNING_OF_TIME)
	lastTile, err := i.tileStore.Get(0, -1)
	if err == nil && lastTile != nil {
		ts = i.lastCommitTimeInTile(lastTile)
	} else {
		// Boundary condition; just started making Tiles and none exist.
		newTile := tiling.NewTile()
		newTile.Scale = 0
		newTile.TileIndex = 0
		if err := i.tileStore.Put(0, 0, newTile); err != nil {
			return fmt.Errorf("Ingest %s: UpdateCommitInfo: Failed to write new tile: %s", i.datasetName, err)
		}
	}
	glog.Infof("Ingest %s: UpdateCommitInfo: Last commit timestamp: %s", i.datasetName, ts)

	// Find all the Git hashes that are new to us.
	newHashes := i.git.From(ts)

	glog.Infof("Ingest %s: len(newHashes): from %d", i.datasetName, len(newHashes))

	// Add Commit info to the Tiles for each new hash.
	tt := NewTileTracker(i.tileStore, i.hashToNumber)
	for _, hash := range newHashes {
		if err := tt.Move(hash); err != nil {
			glog.Errorf("UpdateCommitInfo Move(%s) failed with: %s", hash, err)
			continue
		}
		details, err := i.git.Details(hash, true)
		if err != nil {
			glog.Errorf("Failed to get details for hash: %s: %s", hash, err)
			continue
		}
		tt.Tile().Commits[tt.Offset(hash)] = &tiling.Commit{
			CommitTime: details.Timestamp.Unix(),
			Hash:       hash,
			Author:     details.Author,
		}
	}
	glog.Infof("Ingest %s: Starting to flush tile.", i.datasetName)
	tt.Flush()

	glog.Infof("Ingest %s: Finished UpdateCommitInfo", i.datasetName)
	return nil
}
Пример #21
0
// singleStep does a single round of alerting.
func singleStep(issueTracker issues.IssueTracker) {
	latencyBegin := time.Now()
	tile := tileBuilder.GetTile()
	summary, err := clustering.CalculateClusterSummaries(tile, CLUSTER_SIZE, CLUSTER_STDDEV, skpOnly)
	if err != nil {
		glog.Errorf("Alerting: Failed to calculate clusters: %s", err)
		return
	}
	fresh := []*types.ClusterSummary{}
	for _, c := range summary.Clusters {
		if math.Abs(c.StepFit.Regression) > clustering.INTERESTING_THRESHHOLD {
			fresh = append(fresh, c)
		}
	}
	old, err := ListFrom(tile.Commits[0].CommitTime)
	if err != nil {
		glog.Errorf("Alerting: Failed to get existing clusters: %s", err)
		return
	}
	glog.Infof("Found %d old", len(old))
	glog.Infof("Found %d fresh", len(fresh))
	updated := CombineClusters(fresh, old)
	glog.Infof("Found %d to update", len(updated))

	for _, c := range updated {
		if c.Status == "" {
			c.Status = "New"
		}
		if err := Write(c); err != nil {
			glog.Errorf("Alerting: Failed to write updated cluster: %s", err)
		}
	}

	current, err := ListFrom(tile.Commits[0].CommitTime)
	if err != nil {
		glog.Errorf("Alerting: Failed to get existing clusters: %s", err)
		return
	}
	count := 0
	for _, c := range current {
		if c.Status == "New" {
			count++
		}
		if issueTracker != nil {
			if err := updateBugs(c, issueTracker); err != nil {
				glog.Errorf("Error retrieving bugs: %s", err)
				return
			}
		} else {
			glog.Infof("Skipping ClusterSummary.Bugs update because apiKey is missing.")
			return
		}
	}
	newClustersGauge.Update(int64(count))
	runsCounter.Inc(1)
	alertingLatency.UpdateSince(latencyBegin)
}
Пример #22
0
// NewFileDiffStore intializes and returns a file based implementation of
// DiffStore. The optional http.Client is used to make HTTP requests to Google
// Storage. If nil is supplied then a default client is used. The baseDir is
// the local base directory where the DEFAULT_IMG_DIR_NAME,
// DEFAULT_DIFF_DIR_NAME and the DEFAULT_DIFFMETRICS_DIR_NAME directories
// exist. gsBucketName is the bucket images will be downloaded from.
// storageBaseDir is the directory in the bucket (if empty
// DEFAULT_GS_IMG_DIR_NAME is used).  workerPoolSize is the max number of
// simultaneous goroutines that will be created when running Get or AbsPath.
// Use RECOMMENDED_WORKER_POOL_SIZE if unsure what this value should be.
func NewFileDiffStore(client *http.Client, baseDir, gsBucketName string, storageBaseDir string, cacheFactory CacheFactory, workerPoolSize int) (diff.DiffStore, error) {
	if client == nil {
		client = util.NewTimeoutClient()
	}

	if storageBaseDir == "" {
		storageBaseDir = DEFAULT_GS_IMG_DIR_NAME
	}

	imageCache, err := lru.New(IMAGE_LRU_CACHE_SIZE)
	if err != nil {
		return nil, fmt.Errorf("Unable to alloace image LRU cache: %s", err)
	}

	diffCache := cacheFactory("di", DiffMetricsCodec(0))
	unavailableChan := make(chan *diff.DigestFailure, 10)

	statusDir := fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_STATUS_DIR_NAME)))
	failureDB, err := bolt.Open(filepath.Join(statusDir, FAILUREDB_NAME), 0600, nil)
	if err != nil {
		return nil, fmt.Errorf("Unable to open failuredb: %s", err)
	}

	fs := &FileDiffStore{
		client:              client,
		localImgDir:         fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_IMG_DIR_NAME))),
		localDiffDir:        fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_DIFF_DIR_NAME))),
		localDiffMetricsDir: fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_DIFFMETRICS_DIR_NAME))),
		localTempFileDir:    fileutil.Must(fileutil.EnsureDirExists(filepath.Join(baseDir, DEFAULT_TEMPFILE_DIR_NAME))),
		gsBucketName:        gsBucketName,
		storageBaseDir:      storageBaseDir,
		imageCache:          imageCache,
		diffCache:           diffCache,
		unavailableDigests:  map[string]*diff.DigestFailure{},
		unavailableChan:     unavailableChan,
		failureDB:           failureDB,
	}

	if err := fs.loadDigestFailures(); err != nil {
		return nil, err
	}
	go func() {
		for {
			digestFailure := <-unavailableChan
			if err := fs.addDigestFailure(digestFailure); err != nil {
				glog.Errorf("Unable to store digest failure: %s", err)
			} else if err = fs.loadDigestFailures(); err != nil {
				glog.Errorf("Unable to load failures: %s", err)
			}
		}
	}()

	fs.activateWorkers(workerPoolSize)
	return fs, nil
}
Пример #23
0
func mergeUploadCSVFiles(localOutputDir, pathToPyFiles, runID, remoteDir string, gs *util.GsUtil) error {
	// Move all results into a single directory.
	fileInfos, err := ioutil.ReadDir(localOutputDir)
	if err != nil {
		return fmt.Errorf("Unable to read %s: %s", localOutputDir, err)
	}
	for _, fileInfo := range fileInfos {
		if !fileInfo.IsDir() {
			continue
		}
		outputFile := filepath.Join(localOutputDir, fileInfo.Name(), "results-pivot-table.csv")
		newFile := filepath.Join(localOutputDir, fmt.Sprintf("%s.csv", fileInfo.Name()))
		if err := os.Rename(outputFile, newFile); err != nil {
			glog.Errorf("Could not rename %s to %s: %s", outputFile, newFile, err)
			continue
		}
		// Add the rank of the page to the CSV file.
		headers, values, err := getRowsFromCSV(newFile)
		if err != nil {
			glog.Errorf("Could not read %s: %s", newFile, err)
			continue
		}
		pageRank := strings.Split(fileInfo.Name(), "_")[1]
		for i := range headers {
			for j := range values {
				if headers[i] == "page" {
					values[j][i] = fmt.Sprintf("%s (#%s)", values[j][i], pageRank)
				}
			}
		}
		if err := writeRowsToCSV(newFile, headers, values); err != nil {
			glog.Errorf("Could not write to %s: %s", newFile, err)
			continue
		}
	}
	// Call csv_pivot_table_merger.py to merge all results into a single results CSV.
	pathToCsvMerger := filepath.Join(pathToPyFiles, "csv_pivot_table_merger.py")
	outputFileName := runID + ".output"
	args := []string{
		pathToCsvMerger,
		"--csv_dir=" + localOutputDir,
		"--output_csv_name=" + filepath.Join(localOutputDir, outputFileName),
	}
	err = util.ExecuteCmd("python", args, []string{}, util.CSV_PIVOT_TABLE_MERGER_TIMEOUT, nil,
		nil)
	if err != nil {
		return fmt.Errorf("Error running csv_pivot_table_merger.py: %s", err)
	}
	// Copy the output file to Google Storage.
	remoteOutputDir := filepath.Join(remoteDir, fmt.Sprintf("slave%d", *workerNum), "outputs")
	if err := gs.UploadFile(outputFileName, localOutputDir, remoteOutputDir); err != nil {
		return fmt.Errorf("Unable to upload %s to %s: %s", outputFileName, remoteOutputDir, err)
	}
	return nil
}
Пример #24
0
func main() {
	defer common.LogPanic()
	// Setup flags.
	dbConf := buildbot.DBConfigFromFlags()

	// Global init.
	common.InitWithMetrics(APP_NAME, graphiteServer)

	// Parse the time period.
	period, err := human.ParseDuration(*timePeriod)
	if err != nil {
		glog.Fatal(err)
	}

	// Initialize the buildbot database.
	if !*local {
		if err := dbConf.GetPasswordFromMetadata(); err != nil {
			glog.Fatal(err)
		}
	}
	if err := dbConf.InitDB(); err != nil {
		glog.Fatal(err)
	}

	// Initialize the BuildBucket client.
	c, err := auth.NewClient(*local, path.Join(*workdir, "oauth_token_cache"), buildbucket.DEFAULT_SCOPES...)
	if err != nil {
		glog.Fatal(err)
	}
	bb := buildbucket.NewClient(c)

	// Build the queue.
	repos := gitinfo.NewRepoMap(*workdir)
	for _, r := range REPOS {
		if _, err := repos.Repo(r); err != nil {
			glog.Fatal(err)
		}
	}
	q, err := build_queue.NewBuildQueue(period, repos, *scoreThreshold, *scoreDecay24Hr, BOT_BLACKLIST)
	if err != nil {
		glog.Fatal(err)
	}

	// Start scheduling builds in a loop.
	liveness := metrics.NewLiveness(APP_NAME)
	if err := scheduleBuilds(q, bb); err != nil {
		glog.Errorf("Failed to schedule builds: %v", err)
	}
	for _ = range time.Tick(time.Minute) {
		liveness.Update()
		if err := scheduleBuilds(q, bb); err != nil {
			glog.Errorf("Failed to schedule builds: %v", err)
		}
	}
}
Пример #25
0
// getKeyEventChannel returns a channel that sends events for key changes in
// Redis.
func (r *RedisRTC) getKeyEventChannel(channelOrPattern string, isPattern bool) (<-chan string, error) {
	// Listen for changes on the queue continously.
	psc := redis.PubSubConn{Conn: r.redisPool.Get()}

	subscribe := func() error {
		if isPattern {
			return psc.PSubscribe(channelOrPattern)
		} else {
			return psc.Subscribe(channelOrPattern)
		}
	}

	// Subscribe to the key events
	if err := subscribe(); err != nil {
		return nil, err
	}

	readyCh := make(chan bool)
	ret := make(chan string)
	go func() {
		for {
		Loop:
			for {
				switch v := psc.Receive().(type) {
				case redis.PMessage:
					ret <- string(v.Data)
				case redis.Message:
					ret <- string(v.Data)
				case redis.Subscription:
					if readyCh != nil {
						readyCh <- true
						close(readyCh)
					}
				case error:
					glog.Errorf("Error waiting for key events: %s", v)
					glog.Infof("Reconnecting.")
					util.Close(psc)
					break Loop
				}
			}

			readyCh = nil
			psc = redis.PubSubConn{Conn: r.redisPool.Get()}
			if err := subscribe(); err != nil {
				glog.Errorf("Error re-connecting: %s", err)
				time.Sleep(time.Second)
			}
		}
	}()
	<-readyCh

	return ret, nil
}
Пример #26
0
func doWorkerHealthCheck() {
	glog.Infof("Preparing for worker health check")
	if err := updateAndBuild(); err != nil {
		glog.Errorf("Worker health check failed: %v", err)
		return
	}
	if err := checkWorkerHealth(); err != nil {
		glog.Errorf("Worker health check failed: %v", err)
		return
	}
	glog.Infof("Completed worker health check")
}
Пример #27
0
func (i *Ingester) getInputChannels() (<-chan []ResultFileLocation, <-chan []ResultFileLocation) {
	pollChan := make(chan []ResultFileLocation)
	eventChan := make(chan []ResultFileLocation)
	i.stopChannels = make([]chan<- bool, 0, len(i.sources))

	for idx, source := range i.sources {
		stopCh := make(chan bool)
		go func(source Source, srcMetrics *sourceMetrics, stopCh <-chan bool) {
			util.Repeat(i.runEvery, stopCh, func() {
				var startTime, endTime int64 = 0, 0
				startTime, endTime, err := i.getCommitRangeOfInterest()
				if err != nil {
					glog.Errorf("Unable to retrieve the start and end time. Got error: %s", err)
					return
				}

				// measure how long the polling takes.
				pollStart := time.Now()
				resultFiles, err := source.Poll(startTime, endTime)
				srcMetrics.pollTimer.UpdateSince(pollStart)
				if err != nil {
					// Indicate that there was an error in polling the source.
					srcMetrics.pollError.Update(1)
					glog.Errorf("Error polling data source '%s': %s", source.ID(), err)
					return
				}

				// Indicate that the polling was successful.
				srcMetrics.pollError.Update(0)
				pollChan <- resultFiles
				srcMetrics.liveness.Update()
			})
		}(source, i.srcMetrics[idx], stopCh)
		i.stopChannels = append(i.stopChannels, stopCh)

		if ch := source.EventChan(); ch != nil {
			stopCh := make(chan bool)
			go func(ch <-chan []ResultFileLocation, stopCh <-chan bool) {
			MainLoop:
				for {
					select {
					case eventChan <- (<-ch):
					case <-stopCh:
						break MainLoop
					}
				}
			}(ch, stopCh)
			i.stopChannels = append(i.stopChannels, stopCh)
		}
	}
	return pollChan, eventChan
}
Пример #28
0
func writeToDatabase(hash string, code string, workspaceName string, source int, width, height int) {
	if db == nil {
		return
	}
	if _, err := db.Exec("INSERT INTO webtry (code, hash, width, height, source_image_id) VALUES(?, ?, ?, ?, ?)", code, hash, width, height, source); err != nil {
		glog.Errorf("Failed to insert code into database: %q\n", err)
	}
	if workspaceName != "" {
		if _, err := db.Exec("INSERT INTO workspacetry (name, hash, width, height, source_image_id) VALUES(?, ?, ?, ?, ?)", workspaceName, hash, width, height, source); err != nil {
			glog.Errorf("Failed to insert into workspacetry table: %q\n", err)
		}
	}
}
Пример #29
0
// getOne uses the following algorithm:
// 1. Look for the DiffMetrics of the digests in the local cache.
// If found:
//     2. Return the DiffMetrics.
// Else:
//     3. Make sure the digests exist in the local cache. Download it from
//        Google Storage if necessary.
//     4. Calculate DiffMetrics.
//     5. Write DiffMetrics to the cache and return.
func (fs *FileDiffStore) getOne(dMain, dOther string) interface{} {
	var diffMetrics *diff.DiffMetrics = nil
	var err error

	// 1. Check if the DiffMetrics exists in the memory cache.
	baseName := getDiffBasename(dMain, dOther)
	if obj, ok := fs.diffCache.Get(baseName); ok {
		diffMetrics = obj.(*diff.DiffMetrics)
	} else {
		// Check if it's in the file cache.
		diffMetrics, err = fs.getDiffMetricsFromFileCache(baseName)
		if err != nil {
			glog.Errorf("Failed to getDiffMetricsFromFileCache for digest %s and digest %s: %s", dMain, dOther, err)
			return nil
		}

		if diffMetrics != nil {
			// 2. The DiffMetrics exists locally return it.
			fs.diffCache.Add(baseName, diffMetrics)
		} else {
			// 3. Make sure the digests exist in the local cache. Download it from
			//    Google Storage if necessary.
			if err = fs.ensureDigestInCache(dOther); err != nil {
				glog.Errorf("Failed to ensureDigestInCache for digest %s: %s", dOther, err)
				return nil
			}

			// 4. Calculate DiffMetrics.
			diffMetrics, err = fs.diff(dMain, dOther)
			if err != nil {
				glog.Errorf("Failed to calculate DiffMetrics for digest %s and digest %s: %s", dMain, dOther, err)
				return nil
			}

			// 5. Write DiffMetrics to the local caches.
			fs.diffCache.Add(baseName, diffMetrics)

			// Write to disk in the background.
			writeCopy := *diffMetrics
			go func() {
				if err := fs.writeDiffMetricsToFileCache(baseName, &writeCopy); err != nil {
					glog.Errorf("Failed to write diff metrics to cache for digest %s and digest %s: %s", dMain, dOther, err)
				}
			}()
		}
	}

	// Expand the path of the diff images.
	diffMetrics.PixelDiffFilePath = filepath.Join(fs.localDiffDir, diffMetrics.PixelDiffFilePath)
	return diffMetrics
}
Пример #30
0
func step(client *http.Client, store *storage.Service, hostname string) {
	glog.Info("About to read package list.")
	// Read the old and new packages from their respective storage locations.
	serverList, err := packages.InstalledForServer(client, store, hostname)
	if err != nil {
		glog.Errorf("Failed to retrieve remote package list: %s", err)
		return
	}
	localList, err := packages.FromLocalFile(*installedPackagesFile)
	if err != nil {
		glog.Errorf("Failed to retrieve local package list: %s", err)
		return
	}

	// Install any new or updated packages.
	newPackages, installed := differences(serverList.Names, localList)
	glog.Infof("New: %v, Installed: %v", newPackages, installed)

	for _, name := range newPackages {
		// If just an appname appears w/o a package name then that means
		// that package hasn't been selected, so just skip it for now.
		if len(strings.Split(name, "/")) == 1 {
			continue
		}
		installed = append(installed, name)
		if err := packages.ToLocalFile(installed, *installedPackagesFile); err != nil {
			glog.Errorf("Failed to write local package list: %s", err)
			continue
		}
		if err := packages.Install(client, store, name); err != nil {
			glog.Errorf("Failed to install package %s: %s", name, err)
			// Pop last name from 'installed' then rewrite the file since the
			// install failed.
			installed = installed[:len(installed)-1]
			if err := packages.ToLocalFile(installed, *installedPackagesFile); err != nil {
				glog.Errorf("Failed to rewrite local package list after install failure for %s: %s", name, err)
			}
			continue
		}

		// The pull application is special in that it's not restarted by the
		// the postinstall script of the debian package, because that might kill
		// pullg while it was updating itself. Instead pulld will just exit when
		// it notices that it has been updated and count on systemd to restart it.
		if containsPulld(newPackages) {
			glog.Info("The pulld package has been updated, exiting to allow a restart.")
			glog.Flush()
			os.Exit(0)
		}
	}
}