// getBinaryReportsFromGS pulls all files in baseFolder from the skia-fuzzer bucket and // groups them by fuzz. It parses these groups of files into a FuzzReportBinary and returns // the slice of all reports generated in this way. func getBinaryReportsFromGS(storageService *storage.Service, baseFolder string) ([]fuzz.FuzzReportBinary, error) { contents, err := storageService.Objects.List(config.Aggregator.Bucket).Prefix(baseFolder).Fields("nextPageToken", "items(name,size,timeCreated)").MaxResults(100000).Do() // Assumption, files are sorted alphabetically and have the structure // [baseFolder]/[filetype]/[fuzzname]/[fuzzname][suffix] // where suffix is one of _debug.dump, _debug.err, _release.dump or _release.err if err != nil { return nil, fmt.Errorf("Problem reading from Google Storage: %v", err) } glog.Infof("Loading %d files from gs://%s/%s", len(contents.Items), config.Aggregator.Bucket, baseFolder) reports := make([]fuzz.FuzzReportBinary, 0) var debugDump, debugErr, releaseDump, releaseErr string isInitialized := false currFuzzFolder := "" // will be something like binary_fuzzes/bad/skp/badbeef currFuzzName := "" currFuzzType := "" for _, item := range contents.Items { name := item.Name if strings.Count(name, "/") <= 3 { continue } if !isInitialized || !strings.HasPrefix(name, currFuzzFolder) { if isInitialized { reports = append(reports, fuzz.ParseBinaryReport(currFuzzType, currFuzzName, debugDump, debugErr, releaseDump, releaseErr)) } else { isInitialized = true } parts := strings.Split(name, "/") currFuzzFolder = strings.Join(parts[0:4], "/") currFuzzType = parts[2] currFuzzName = parts[3] // reset for next one debugDump, debugErr, releaseDump, releaseErr = "", "", "", "" } if strings.HasSuffix(name, "_debug.dump") { debugDump = emptyStringOnError(gs.FileContentsFromGS(storageService, config.Aggregator.Bucket, name)) } else if strings.HasSuffix(name, "_debug.err") { debugErr = emptyStringOnError(gs.FileContentsFromGS(storageService, config.Aggregator.Bucket, name)) } else if strings.HasSuffix(name, "_release.dump") { releaseDump = emptyStringOnError(gs.FileContentsFromGS(storageService, config.Aggregator.Bucket, name)) } else if strings.HasSuffix(name, "_release.err") { releaseErr = emptyStringOnError(gs.FileContentsFromGS(storageService, config.Aggregator.Bucket, name)) } } if currFuzzName != "" { reports = append(reports, fuzz.ParseBinaryReport(currFuzzType, currFuzzName, debugDump, debugErr, releaseDump, releaseErr)) } glog.Info("Done loading") return reports, nil }
// download waits for fuzzPackages to appear on the toDownload channel and then downloads // the four pieces of the package. It then parses them into a BinaryFuzzReport and sends // the binary to the passed in channel. When there is no more work to be done, this function. // returns and writes out true to the done channel. func (g *GSLoader) download(toDownload <-chan fuzzPackage, reports chan<- fuzz.BinaryFuzzReport, wg *sync.WaitGroup) { defer wg.Done() for job := range toDownload { debugDump := emptyStringOnError(gs.FileContentsFromGS(g.storageClient, config.GS.Bucket, job.DebugDumpName)) debugErr := emptyStringOnError(gs.FileContentsFromGS(g.storageClient, config.GS.Bucket, job.DebugErrName)) releaseDump := emptyStringOnError(gs.FileContentsFromGS(g.storageClient, config.GS.Bucket, job.ReleaseDumpName)) releaseErr := emptyStringOnError(gs.FileContentsFromGS(g.storageClient, config.GS.Bucket, job.ReleaseErrName)) reports <- fuzz.ParseBinaryReport(job.FuzzType, job.FuzzName, debugDump, debugErr, releaseDump, releaseErr) atomic.AddInt32(&g.completedCounter, 1) if g.completedCounter%100 == 0 { glog.Infof("%d fuzzes downloaded", g.completedCounter) } } }
// DownloadBinarySeedFiles downloads the seed skp files stored in Google // Storage to be used by afl-fuzz. It places them in // config.Generator.FuzzSamples after cleaning the folder out. // It returns an error on failure. func DownloadBinarySeedFiles(storageClient *storage.Client) error { if err := os.RemoveAll(config.Generator.FuzzSamples); err != nil && !os.IsNotExist(err) { return fmt.Errorf("Could not clean binary seed path %s: %s", config.Generator.FuzzSamples, err) } if err := os.MkdirAll(config.Generator.FuzzSamples, 0755); err != nil { return fmt.Errorf("Could not create binary seed path %s: %s", config.Generator.FuzzSamples, err) } err := gs.AllFilesInDir(storageClient, config.GS.Bucket, "skp_samples", func(item *storage.ObjectAttrs) { name := item.Name // skip the parent folder if name == "skp_samples/" { return } content, err := gs.FileContentsFromGS(storageClient, config.GS.Bucket, name) if err != nil { glog.Errorf("Problem downloading %s from Google Storage, continuing anyway", item.Name) return } fileName := filepath.Join(config.Generator.FuzzSamples, strings.SplitAfter(name, "skp_samples/")[1]) if err = ioutil.WriteFile(fileName, content, 0644); err != nil && !os.IsExist(err) { glog.Errorf("Problem creating binary seed file %s, continuing anyway", fileName) } }) return err }
// download starts a go routine that waits for files to download from Google Storage // and downloads them to downloadPath. When it is done (on error or when the channel // is closed), it signals to the WaitGroup that it is done. // It also logs the progress on downloading the fuzzes func (v *VersionUpdater) download(toDownload <-chan string, downloadPath string, wg *sync.WaitGroup) { defer wg.Done() for file := range toDownload { contents, err := gs.FileContentsFromGS(v.storageClient, config.GS.Bucket, file) if err != nil { glog.Warningf("Problem downloading fuzz %s, continuing anyway: %s", file, err) } hash := file[strings.LastIndex(file, "/")+1:] onDisk := filepath.Join(downloadPath, hash) if err = ioutil.WriteFile(onDisk, contents, 0644); err != nil && !os.IsExist(err) { glog.Warningf("Problem writing fuzz to %s, continuing anyway: %s", onDisk, err) } atomic.AddInt32(&completedCounter, 1) if completedCounter%100 == 0 { glog.Infof("%d fuzzes downloaded", completedCounter) } } }
func fuzzHandler(w http.ResponseWriter, r *http.Request) { v := mux.Vars(r) kind := v["kind"] name := v["name"] xs := strings.Split(name, ".") hash, ftype := xs[0], xs[1] contents, err := gs.FileContentsFromGS(storageClient, config.GS.Bucket, fmt.Sprintf("%s_fuzzes/%s/bad/%s/%s/%s", kind, config.FrontEnd.SkiaVersion.Hash, ftype, hash, hash)) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Fuzz with name %v not found", v["name"])) return } w.Header().Set("Content-Type", "application/octet-stream") w.Header().Set("Content-Disposition", name) n, err := w.Write(contents) if err != nil || n != len(contents) { glog.Errorf("Could only serve %d bytes of fuzz %s, not %d: %s", n, hash, len(contents), err) return } }
func metadataHandler(w http.ResponseWriter, r *http.Request) { v := mux.Vars(r) ftype := v["type"] kind := v["kind"] name := v["name"] hash := strings.Split(name, "_")[0] contents, err := gs.FileContentsFromGS(storageClient, config.GS.Bucket, fmt.Sprintf("%s_fuzzes/%s/bad/%s/%s/%s", kind, config.FrontEnd.SkiaVersion.Hash, ftype, hash, name)) if err != nil { util.ReportError(w, r, err, fmt.Sprintf("Fuzz with name %v not found", v["name"])) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Disposition", name) n, err := w.Write(contents) if err != nil || n != len(contents) { glog.Errorf("Could only serve %d bytes of metadata %s, not %d: %s", n, name, len(contents), err) return } }