func readCorpus() []*prog.Prog { if *flagCorpus == "" { return nil } files, err := ioutil.ReadDir(*flagCorpus) if err != nil { failf("failed to read corpus dir: %v", err) } var progs []*prog.Prog for _, f := range files { if f.IsDir() { continue } data, err := ioutil.ReadFile(filepath.Join(*flagCorpus, f.Name())) if err != nil { failf("failed to read corpus file: %v", err) } p, err := prog.Deserialize(data) if err != nil { failf("failed to deserialize corpus program: %v", err) } progs = append(progs, p) } return progs }
func main() { if len(os.Args) != 2 { fatalf("usage: syz-upgrage corpus_dir") } files, err := ioutil.ReadDir(os.Args[1]) if err != nil { fatalf("failed to read corpus dir: %v", err) } for _, f := range files { fname := filepath.Join(os.Args[1], f.Name()) data, err := ioutil.ReadFile(fname) if err != nil { fatalf("failed to read program: %v", err) } p, err := prog.Deserialize(data) if err != nil { fatalf("failed to deserialize program: %v", err) } data1 := p.Serialize() if bytes.Equal(data, data1) { continue } fmt.Printf("upgrading:\n%s\nto:\n%s\n\n", data, data1) hash := sha1.Sum(data1) fname1 := filepath.Join(os.Args[1], hex.EncodeToString(hash[:])) if err := ioutil.WriteFile(fname1, data1, 0640); err != nil { fatalf("failed to write program: %v", err) } if err := os.Remove(fname); err != nil { fatalf("failed to remove program: %v", err) } } }
func readCorpus() []*prog.Prog { if *flagCorpus == "" { return nil } zipr, err := zip.OpenReader(*flagCorpus) if err != nil { Fatalf("failed to open bin file: %v", err) } var progs []*prog.Prog for _, zipf := range zipr.File { r, err := zipf.Open() if err != nil { Fatalf("failed to uzip file from input archive: %v", err) } data, err := ioutil.ReadAll(r) if err != nil { Fatalf("failed to read corpus file: %v", err) } p, err := prog.Deserialize(data) if err != nil { Fatalf("failed to deserialize corpus program: %v", err) } progs = append(progs, p) r.Close() } zipr.Close() return progs }
func parseFile(fn string) []*prog.Prog { logf, err := os.Open(fn) if err != nil { log.Fatalf("failed to open log file: %v", err) } log.Printf("parsing log %v", fn) s := bufio.NewScanner(logf) var cur []byte var last *prog.Prog var progs []*prog.Prog for s.Scan() { ln := s.Text() tmp := append(cur, ln...) tmp = append(tmp, '\n') p, err := prog.Deserialize(tmp) if err == nil { cur = tmp last = p continue } if last != nil { progs = append(progs, last) last = nil cur = cur[:0] } } if last != nil { progs = append(progs, last) } return progs }
func addInput(inp RpcInput) { corpusMu.Lock() defer corpusMu.Unlock() coverMu.Lock() defer coverMu.Unlock() if noCover { panic("should not be called when coverage is disabled") } p, err := prog.Deserialize(inp.Prog) if err != nil { panic(err) } if inp.CallIndex < 0 || inp.CallIndex >= len(p.Calls) { panic("bad call index") } call := p.Calls[inp.CallIndex].Meta sig := hash(inp.Prog) if _, ok := corpusHashes[sig]; ok { return } cov := cover.Canonicalize(inp.Cover) diff := cover.Difference(cov, maxCover[call.CallID]) diff = cover.Difference(diff, flakes) if len(diff) == 0 { return } corpus = append(corpus, p) corpusCover[call.CallID] = cover.Union(corpusCover[call.CallID], cov) maxCover[call.CallID] = cover.Union(maxCover[call.CallID], cov) corpusHashes[hash(inp.Prog)] = struct{}{} }
func main() { flag.Parse() if flag.NArg() != 1 { fmt.Fprintf(os.Stderr, "usage: mutate program\n") os.Exit(1) } data, err := ioutil.ReadFile(flag.Arg(0)) if err != nil { fmt.Fprintf(os.Stderr, "failed to read prog file: %v\n", err) os.Exit(1) } p, err := prog.Deserialize(data) if err != nil { fmt.Fprintf(os.Stderr, "failed to deserialize the program: %v\n", err) os.Exit(1) } prios := prog.CalculatePriorities(nil) ct := prog.BuildChoiceTable(prios, nil) seed := time.Now().UnixNano() if *flagSeed != -1 { seed = int64(*flagSeed) } rs := rand.NewSource(seed) p.Mutate(rs, len(p.Calls)+10, ct) fmt.Printf("%s\n", p.Serialize()) }
func (mgr *Manager) pollMaster() { for { a := &MasterPollArgs{mgr.cfg.Name} r := &MasterPollRes{} if err := mgr.master.Call("Master.PollInputs", a, r); err != nil { fatalf("failed to poll master: %v", err) } logf(3, "polling master, got %v inputs", len(r.Inputs)) if len(r.Inputs) == 0 { break } nextProg: for _, prg := range r.Inputs { p, err := prog.Deserialize(prg) if err != nil { logf(0, "failed to deserialize master program: %v", err) continue } if mgr.syscalls != nil { for _, c := range p.Calls { if !mgr.syscalls[c.Meta.ID] { continue nextProg } } } sig := hash(prg) if _, ok := mgr.masterHashes[sig]; ok { continue } mgr.masterHashes[sig] = struct{}{} mgr.masterCorpus = append(mgr.masterCorpus, prg) mgr.candidates = append(mgr.candidates, prg) } } }
func (mgr *Manager) httpCorpus(w http.ResponseWriter, r *http.Request) { mgr.mu.Lock() defer mgr.mu.Unlock() var data []UIInput call := r.FormValue("call") for i, inp := range mgr.corpus { if call != inp.Call { continue } p, err := prog.Deserialize(inp.Prog) if err != nil { http.Error(w, fmt.Sprintf("failed to deserialize program: %v", err), http.StatusInternalServerError) } data = append(data, UIInput{ Short: p.String(), Full: string(inp.Prog), Cover: len(inp.Cover), N: i, }) } sort.Sort(UIInputArray(data)) if err := corpusTemplate.Execute(w, data); err != nil { http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) } }
func (mgr *Manager) minimizeCorpus() { if !mgr.cfg.Nocover && len(mgr.corpus) != 0 { // First, sort corpus per call. type Call struct { inputs []RpcInput cov []cover.Cover } calls := make(map[string]Call) for _, inp := range mgr.corpus { c := calls[inp.Call] c.inputs = append(c.inputs, inp) c.cov = append(c.cov, inp.Cover) calls[inp.Call] = c } // Now minimize and build new corpus. var newCorpus []RpcInput for _, c := range calls { for _, idx := range cover.Minimize(c.cov) { newCorpus = append(newCorpus, c.inputs[idx]) } } logf(1, "minimized corpus: %v -> %v", len(mgr.corpus), len(newCorpus)) mgr.corpus = newCorpus } var corpus []*prog.Prog for _, inp := range mgr.corpus { p, err := prog.Deserialize(inp.Prog) if err != nil { panic(err) } corpus = append(corpus, p) } mgr.prios = prog.CalculatePriorities(corpus) }
func main() { flag.Parse() if len(flag.Args()) != 1 { fmt.Fprintf(os.Stderr, "usage: prog2c [-threaded [-collide]] prog_file\n") os.Exit(1) } data, err := ioutil.ReadFile(flag.Args()[0]) if err != nil { fmt.Fprintf(os.Stderr, "failed to read prog file: %v\n", err) os.Exit(1) } p, err := prog.Deserialize(data) if err != nil { fmt.Fprintf(os.Stderr, "failed to deserialize the program: %v\n", err) os.Exit(1) } opts := csource.Options{ Threaded: *flagThreaded, Collide: *flagCollide, } src := csource.Write(p, opts) if formatted, err := csource.Format(src); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) } else { src = formatted } os.Stdout.Write(src) }
func addInput(inp RpcInput) { p, err := prog.Deserialize(inp.Prog) if err != nil { panic(err) } if inp.CallIndex < 0 || inp.CallIndex >= len(p.Calls) { panic("bad call index") } call := p.Calls[inp.CallIndex].Meta sig := hash(inp.Prog) if _, ok := corpusHashes[sig]; ok { return } cov := cover.Canonicalize(inp.Cover) diff := cover.Difference(cov, maxCover[call.CallID]) diff = cover.Difference(diff, flakes) if len(diff) == 0 { return } inp1 := Input{p, inp.CallIndex, cov} corpus = append(corpus, inp1) corpusCover[call.CallID] = cover.Union(corpusCover[call.CallID], cov) maxCover[call.CallID] = cover.Union(maxCover[call.CallID], cov) corpusHashes[hash(inp.Prog)] = struct{}{} }
func main() { flag.Parse() data, err := ioutil.ReadFile(*flagProg) if err != nil { fmt.Fprintf(os.Stderr, "failed to read prog file: %v\n", err) os.Exit(1) } p, err := prog.Deserialize(data) if err != nil { fmt.Fprintf(os.Stderr, "failed to deserialize the program: %v\n", err) os.Exit(1) } var flags uint64 if *flagThreaded { flags |= ipc.FlagThreaded } if *flagDebug { flags |= ipc.FlagDebug } if *flagStrace { flags |= ipc.FlagStrace } if *flagCover != "" { flags |= ipc.FlagCover } if *flagDedup { flags |= ipc.FlagDedupCover } env, err := ipc.MakeEnv(*flagExecutor, *flagTimeout, flags) if err != nil { fmt.Fprintf(os.Stderr, "failed to create execution environment: %v\n", err) os.Exit(1) } defer env.Close() output, strace, cov, failed, hanged, err := env.Exec(p) fmt.Printf("result: failed=%v hanged=%v err=%v\n\n%s", failed, hanged, err, output) if *flagStrace { fmt.Printf("strace output:\n%s", strace) } // Coverage is dumped in sanitizer format. // github.com/google/sanitizers/tools/sancov command can be used to dump PCs, // then they can be piped via addr2line to symbolize. for i, c := range cov { fmt.Printf("call #%v: coverage %v\n", i, len(c)) if len(c) == 0 { continue } buf := new(bytes.Buffer) binary.Write(buf, binary.LittleEndian, uint64(0xC0BFFFFFFFFFFF64)) for _, pc := range c { binary.Write(buf, binary.LittleEndian, cover.RestorePC(pc)) } err := ioutil.WriteFile(fmt.Sprintf("%v.%v", *flagCover, i), buf.Bytes(), 0660) if err != nil { fmt.Fprintf(os.Stderr, "failed to write coverage file: %v\n", err) os.Exit(1) } } }
func main() { flag.Parse() var progs []*prog.Prog for _, fn := range strings.Split(*flagLog, ",") { logf, err := os.Open(fn) if err != nil { log.Fatalf("failed to open log file: %v", err) } log.Printf("parsing log %v", fn) s := bufio.NewScanner(logf) var cur []byte var last *prog.Prog for s.Scan() { ln := s.Text() tmp := append(cur, ln...) tmp = append(tmp, '\n') p, err := prog.Deserialize(tmp) if err == nil { cur = tmp last = p continue } if last != nil { progs = append(progs, last) last = nil cur = cur[:0] } } if last != nil { progs = append(progs, last) } } log.Printf("parsed %v programs", len(progs)) if len(progs) == 0 { return } var pos uint32 for p := 0; p < 16; p++ { go func() { env, err := ipc.MakeEnv(*flagExecutor, 5*time.Second, 0) if err != nil { log.Fatalf("failed to create ipc env: %v", err) } for { idx := int(atomic.AddUint32(&pos, 1) - 1) if idx%1000 == 0 { log.Printf("executing %v\n", idx) } _, _, _, _, _, err := env.Exec(progs[idx%len(progs)]) if err != nil { log.Printf("failed to execute program: %v", err) } } }() } select {} }
// NewInput saves new interesting input on master. func (m *Master) NewInput(a *NewMasterInputArgs, r *int) error { p, err := prog.Deserialize(a.Prog) if err != nil { logf(0, "bogus new input from %v: %v\n%s\n", a.Name, err, a.Prog) return fmt.Errorf("the program is bogus: %v", err) } m.mu.Lock() defer m.mu.Unlock() if !m.corpus.add(a.Prog) { return nil } m.lastInput = time.Now() logf(1, "new input from %v: %s", a.Name, p) return nil }
func (mgr *Manager) minimizeCorpus() { if mgr.cfg.Cover && len(mgr.corpus) != 0 { // First, sort corpus per call. type Call struct { inputs []RpcInput cov []cover.Cover } calls := make(map[string]Call) for _, inp := range mgr.corpus { c := calls[inp.Call] c.inputs = append(c.inputs, inp) c.cov = append(c.cov, inp.Cover) calls[inp.Call] = c } // Now minimize and build new corpus. var newCorpus []RpcInput for _, c := range calls { for _, idx := range cover.Minimize(c.cov) { newCorpus = append(newCorpus, c.inputs[idx]) } } Logf(1, "minimized corpus: %v -> %v", len(mgr.corpus), len(newCorpus)) mgr.corpus = newCorpus } var corpus []*prog.Prog for _, inp := range mgr.corpus { p, err := prog.Deserialize(inp.Prog) if err != nil { panic(err) } corpus = append(corpus, p) } mgr.prios = prog.CalculatePriorities(corpus) // Don't minimize persistent corpus until fuzzers have triaged all inputs from it. if len(mgr.candidates) == 0 { hashes := make(map[string]bool) for _, inp := range mgr.corpus { h := hash(inp.Prog) hashes[hex.EncodeToString(h[:])] = true } for _, h := range mgr.disabledHashes { hashes[h] = true } mgr.persistentCorpus.minimize(hashes) } }
func main() { if len(os.Args) != 2 { fmt.Fprintf(os.Stderr, "usage: prog2c prog_file\n") os.Exit(1) } data, err := ioutil.ReadFile(os.Args[1]) if err != nil { fmt.Fprintf(os.Stderr, "failed to read prog file: %v\n", err) os.Exit(1) } p, err := prog.Deserialize(data) if err != nil { fmt.Fprintf(os.Stderr, "failed to deserialize the program: %v\n", err) os.Exit(1) } src := p.WriteCSource() os.Stdout.Write(src) }
func main() { flag.Parse() if *flagWorkdir == "" { fatalf("-workdir is not set") } if *flagAddr == "" { fatalf("-addr is not set") } if *flagHTTP == "" { fatalf("-http is not set") } ln, err := net.Listen("tcp", *flagAddr) if err != nil { fatalf("failed to listen: %v", err) } m := &Master{} m.managers = make(map[string]*Manager) m.startTime = time.Now() m.lastInput = time.Now() logf(0, "loading corpus...") m.corpus = newPersistentSet(filepath.Join(*flagWorkdir, "corpus"), func(data []byte) bool { if _, err := prog.Deserialize(data); err != nil { logf(0, "deleting broken program: %v\n%s", err, data) return false } return true }) m.crashers = newPersistentSet(filepath.Join(*flagWorkdir, "crashers"), nil) http.HandleFunc("/", m.httpInfo) http.HandleFunc("/minimize", m.httpMinimize) go func() { logf(0, "serving http on http://%v", *flagHTTP) panic(http.ListenAndServe(*flagHTTP, nil)) }() logf(0, "serving rpc on tcp://%v", *flagAddr) s := rpc.NewServer() s.Register(m) go s.Accept(ln) m.loop() }
func main() { flag.Parse() if *flagProg == "" { flag.PrintDefaults() os.Exit(1) } data, err := ioutil.ReadFile(*flagProg) if err != nil { fmt.Fprintf(os.Stderr, "failed to read prog file: %v\n", err) os.Exit(1) } p, err := prog.Deserialize(data) if err != nil { fmt.Fprintf(os.Stderr, "failed to deserialize the program: %v\n", err) os.Exit(1) } opts := csource.Options{ Threaded: *flagThreaded, Collide: *flagCollide, Repeat: *flagRepeat, Procs: *flagProcs, Sandbox: *flagSandbox, Repro: false, } src, err := csource.Write(p, opts) if err != nil { fmt.Fprintf(os.Stderr, "failed to generate C spurce: %v\n", err) os.Exit(1) } if formatted, err := csource.Format(src); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) } else { src = formatted } os.Stdout.Write(src) }
func RunManager(cfg *config.Config, syscalls map[int]bool, suppressions []*regexp.Regexp) { crashdir := filepath.Join(cfg.Workdir, "crashes") os.MkdirAll(crashdir, 0700) enabledSyscalls := "" if len(syscalls) != 0 { buf := new(bytes.Buffer) for c := range syscalls { fmt.Fprintf(buf, ",%v", c) } enabledSyscalls = buf.String()[1:] Logf(1, "enabled syscalls: %v", enabledSyscalls) } mgr := &Manager{ cfg: cfg, crashdir: crashdir, startTime: time.Now(), stats: make(map[string]uint64), enabledSyscalls: enabledSyscalls, suppressions: suppressions, corpusCover: make([]cover.Cover, sys.CallCount), fuzzers: make(map[string]*Fuzzer), } Logf(0, "loading corpus...") mgr.persistentCorpus = newPersistentSet(filepath.Join(cfg.Workdir, "corpus"), func(data []byte) bool { if _, err := prog.Deserialize(data); err != nil { Logf(0, "deleting broken program: %v\n%s", err, data) return false } return true }) for _, data := range mgr.persistentCorpus.a { p, err := prog.Deserialize(data) if err != nil { Fatalf("failed to deserialize program: %v", err) } disabled := false for _, c := range p.Calls { if !syscalls[c.Meta.ID] { disabled = true break } } if disabled { // This program contains a disabled syscall. // We won't execute it, but remeber its hash so // it is not deleted during minimization. h := hash(data) mgr.disabledHashes = append(mgr.disabledHashes, hex.EncodeToString(h[:])) continue } mgr.candidates = append(mgr.candidates, data) } Logf(0, "loaded %v programs", len(mgr.persistentCorpus.m)) // Create HTTP server. mgr.initHttp() // Create RPC server for fuzzers. ln, err := net.Listen("tcp", cfg.Rpc) if err != nil { Fatalf("failed to listen on %v: %v", cfg.Rpc, err) } Logf(0, "serving rpc on tcp://%v", ln.Addr()) mgr.port = ln.Addr().(*net.TCPAddr).Port s := rpc.NewServer() s.Register(mgr) go func() { for { conn, err := ln.Accept() if err != nil { Logf(0, "failed to accept an rpc connection: %v", err) continue } go s.ServeCodec(jsonrpc.NewServerCodec(conn)) } }() Logf(0, "booting test machines...") var shutdown uint32 var wg sync.WaitGroup wg.Add(cfg.Count + 1) for i := 0; i < cfg.Count; i++ { i := i go func() { defer wg.Done() for { vmCfg, err := config.CreateVMConfig(cfg, i) if atomic.LoadUint32(&shutdown) != 0 { break } if err != nil { Fatalf("failed to create VM config: %v", err) } ok := mgr.runInstance(vmCfg, i == 0) if atomic.LoadUint32(&shutdown) != 0 { break } if !ok { time.Sleep(10 * time.Second) } } }() } go func() { for { time.Sleep(10 * time.Second) mgr.mu.Lock() executed := mgr.stats["exec total"] crashes := mgr.stats["crashes"] mgr.mu.Unlock() Logf(0, "executed programs: %v, crashes: %v", executed, crashes) } }() go func() { c := make(chan os.Signal, 2) signal.Notify(c, syscall.SIGINT) <-c wg.Done() DisableLog() // VMs will fail atomic.StoreUint32(&mgr.shutdown, 1) close(vm.Shutdown) Logf(-1, "shutting down...") atomic.StoreUint32(&shutdown, 1) <-c Fatalf("terminating") }() wg.Wait() }
func main() { debug.SetGCPercent(50) flag.Parse() switch *flagOutput { case "none", "stdout", "dmesg", "file": default: fmt.Fprintf(os.Stderr, "-output flag must be one of none/stdout/dmesg/file\n") os.Exit(1) } logf(0, "started") corpusCover = make([]cover.Cover, sys.CallCount) maxCover = make([]cover.Cover, sys.CallCount) corpusHashes = make(map[Sig]struct{}) logf(0, "dialing manager at %v", *flagManager) conn, err := jsonrpc.Dial("tcp", *flagManager) if err != nil { panic(err) } manager = conn a := &ConnectArgs{*flagName} r := &ConnectRes{} if err := manager.Call("Manager.Connect", a, r); err != nil { panic(err) } calls := buildCallList(r.EnabledCalls) ct := prog.BuildChoiceTable(r.Prios, calls) kmemleakInit() flags, timeout := ipc.DefaultFlags() noCover = flags&ipc.FlagCover == 0 if !noCover { fd, err := syscall.Open("/sys/kernel/debug/kcov", syscall.O_RDWR, 0) if err != nil { log.Fatalf("BUG: /sys/kernel/debug/kcov is missing (%v). Enable CONFIG_KCOV and mount debugfs.", err) } syscall.Close(fd) } gate = ipc.NewGate(2 * *flagProcs) envs := make([]*ipc.Env, *flagProcs) for pid := 0; pid < *flagProcs; pid++ { env, err := ipc.MakeEnv(*flagExecutor, timeout, flags) if err != nil { panic(err) } envs[pid] = env pid := pid go func() { rs := rand.NewSource(time.Now().UnixNano() + int64(pid)*1e12) rnd := rand.New(rs) for i := 0; ; i++ { triageMu.RLock() if len(triage) != 0 || len(candidates) != 0 { triageMu.RUnlock() triageMu.Lock() if len(triage) != 0 { last := len(triage) - 1 inp := triage[last] triage = triage[:last] triageMu.Unlock() logf(1, "triaging : %s", inp.p) triageInput(pid, env, inp) continue } else if len(candidates) != 0 { last := len(candidates) - 1 p := candidates[last] candidates = candidates[:last] triageMu.Unlock() execute(pid, env, p, &statExecCandidate) continue } else { triageMu.Unlock() } } else { triageMu.RUnlock() } corpusMu.RLock() if len(corpus) == 0 || i%10 == 0 { corpusMu.RUnlock() p := prog.Generate(rnd, programLength, ct) logf(1, "#%v: generated: %s", i, p) execute(pid, env, p, &statExecGen) p.Mutate(rnd, programLength, ct) logf(1, "#%v: mutated: %s", i, p) execute(pid, env, p, &statExecFuzz) } else { p0 := corpus[rnd.Intn(len(corpus))] corpusMu.RUnlock() p := p0.Clone() p.Mutate(rs, programLength, ct) logf(1, "#%v: mutated: %s <- %s", i, p, p0) execute(pid, env, p, &statExecFuzz) } } }() } var lastPoll time.Time var lastPrint time.Time for range time.NewTicker(3 * time.Second).C { if *flagOutput != "stdout" && time.Since(lastPrint) > 10*time.Second { // Keep-alive for manager. logf(0, "alive") lastPrint = time.Now() } if time.Since(lastPoll) > 10*time.Second { triageMu.RLock() if len(candidates) != 0 { triageMu.RUnlock() continue } triageMu.RUnlock() a := &PollArgs{ Name: *flagName, Stats: make(map[string]uint64), } for _, env := range envs { a.Stats["exec total"] += atomic.SwapUint64(&env.StatExecs, 0) a.Stats["executor restarts"] += atomic.SwapUint64(&env.StatRestarts, 0) } a.Stats["exec gen"] = atomic.SwapUint64(&statExecGen, 0) a.Stats["exec fuzz"] = atomic.SwapUint64(&statExecFuzz, 0) a.Stats["exec candidate"] = atomic.SwapUint64(&statExecCandidate, 0) a.Stats["exec triage"] = atomic.SwapUint64(&statExecTriage, 0) a.Stats["exec minimize"] = atomic.SwapUint64(&statExecMinimize, 0) a.Stats["fuzzer new inputs"] = atomic.SwapUint64(&statNewInput, 0) r := &PollRes{} if err := manager.Call("Manager.Poll", a, r); err != nil { panic(err) } for _, inp := range r.NewInputs { addInput(inp) } for _, data := range r.Candidates { p, err := prog.Deserialize(data) if err != nil { panic(err) } if noCover { corpusMu.Lock() corpus = append(corpus, p) corpusMu.Unlock() } else { triageMu.Lock() candidates = append(candidates, p) triageMu.Unlock() } } if len(r.Candidates) == 0 { if atomic.LoadUint32(&allTriaged) == 0 { if *flagLeak { kmemleakScan(false) } atomic.StoreUint32(&allTriaged, 1) } } if len(r.NewInputs) == 0 && len(r.Candidates) == 0 { lastPoll = time.Now() } } } }
func RunManager(cfg *config.Config, syscalls map[int]bool) { crashdir := filepath.Join(cfg.Workdir, "crashes") os.MkdirAll(crashdir, 0700) enabledSyscalls := "" if len(syscalls) != 0 { buf := new(bytes.Buffer) for c := range syscalls { fmt.Fprintf(buf, ",%v", c) } enabledSyscalls = buf.String()[1:] Logf(1, "enabled syscalls: %v", enabledSyscalls) } mgr := &Manager{ cfg: cfg, crashdir: crashdir, startTime: time.Now(), stats: make(map[string]uint64), enabledSyscalls: enabledSyscalls, corpusCover: make([]cover.Cover, sys.CallCount), fuzzers: make(map[string]*Fuzzer), fresh: true, vmStop: make(chan bool), } Logf(0, "loading corpus...") mgr.persistentCorpus = newPersistentSet(filepath.Join(cfg.Workdir, "corpus"), func(data []byte) bool { mgr.fresh = false if _, err := prog.Deserialize(data); err != nil { Logf(0, "deleting broken program: %v\n%s", err, data) return false } return true }) for _, data := range mgr.persistentCorpus.a { p, err := prog.Deserialize(data) if err != nil { Fatalf("failed to deserialize program: %v", err) } disabled := false for _, c := range p.Calls { if !syscalls[c.Meta.ID] { disabled = true break } } if disabled { // This program contains a disabled syscall. // We won't execute it, but remeber its hash so // it is not deleted during minimization. // TODO: use mgr.enabledCalls which accounts for missing devices, etc. // But it is available only after vm check. sig := hash.Hash(data) mgr.disabledHashes = append(mgr.disabledHashes, sig.String()) continue } mgr.candidates = append(mgr.candidates, data) } Logf(0, "loaded %v programs (%v total)", len(mgr.candidates), len(mgr.persistentCorpus.m)) // Create HTTP server. mgr.initHttp() // Create RPC server for fuzzers. ln, err := net.Listen("tcp", cfg.Rpc) if err != nil { Fatalf("failed to listen on %v: %v", cfg.Rpc, err) } Logf(0, "serving rpc on tcp://%v", ln.Addr()) mgr.port = ln.Addr().(*net.TCPAddr).Port s := rpc.NewServer() s.Register(mgr) go func() { for { conn, err := ln.Accept() if err != nil { Logf(0, "failed to accept an rpc connection: %v", err) continue } conn.(*net.TCPConn).SetKeepAlive(true) conn.(*net.TCPConn).SetKeepAlivePeriod(time.Minute) go s.ServeCodec(jsonrpc.NewServerCodec(conn)) } }() go func() { for { time.Sleep(10 * time.Second) mgr.mu.Lock() executed := mgr.stats["exec total"] crashes := mgr.stats["crashes"] mgr.mu.Unlock() Logf(0, "executed programs: %v, crashes: %v", executed, crashes) } }() if mgr.cfg.Hub_Addr != "" { go func() { for { time.Sleep(time.Minute) mgr.hubSync() } }() } go func() { c := make(chan os.Signal, 2) signal.Notify(c, syscall.SIGINT) <-c close(vm.Shutdown) Logf(0, "shutting down...") <-c Fatalf("terminating") }() mgr.vmLoop() }
func (mgr *Manager) hubSync() { mgr.mu.Lock() defer mgr.mu.Unlock() if !mgr.vmChecked || len(mgr.candidates) != 0 { return } mgr.minimizeCorpus() if mgr.hub == nil { conn, err := rpc.Dial("tcp", mgr.cfg.Hub_Addr) if err != nil { Logf(0, "failed to connect to hub at %v: %v", mgr.cfg.Hub_Addr, err) return } mgr.hub = conn a := &HubConnectArgs{ Name: mgr.cfg.Name, Key: mgr.cfg.Hub_Key, Fresh: mgr.fresh, Calls: mgr.enabledCalls, } mgr.hubCorpus = make(map[hash.Sig]bool) for _, inp := range mgr.corpus { mgr.hubCorpus[hash.Hash(inp.Prog)] = true a.Corpus = append(a.Corpus, inp.Prog) } if err := mgr.hub.Call("Hub.Connect", a, nil); err != nil { Logf(0, "Hub.Connect rpc failed: %v", err) mgr.hub.Close() mgr.hub = nil return } mgr.fresh = false Logf(0, "connected to hub at %v, corpus %v", mgr.cfg.Hub_Addr, len(mgr.corpus)) } a := &HubSyncArgs{ Name: mgr.cfg.Name, Key: mgr.cfg.Hub_Key, } corpus := make(map[hash.Sig]bool) for _, inp := range mgr.corpus { sig := hash.Hash(inp.Prog) corpus[sig] = true if mgr.hubCorpus[sig] { continue } mgr.hubCorpus[sig] = true a.Add = append(a.Add, inp.Prog) } for sig := range mgr.hubCorpus { if corpus[sig] { continue } delete(mgr.hubCorpus, sig) a.Del = append(a.Del, sig.String()) } r := new(HubSyncRes) if err := mgr.hub.Call("Hub.Sync", a, r); err != nil { Logf(0, "Hub.Sync rpc failed: %v", err) mgr.hub.Close() mgr.hub = nil return } dropped := 0 for _, inp := range r.Inputs { _, err := prog.Deserialize(inp) if err != nil { dropped++ continue } mgr.candidates = append(mgr.candidates, inp) } mgr.stats["hub add"] += uint64(len(a.Add)) mgr.stats["hub del"] += uint64(len(a.Del)) mgr.stats["hub drop"] += uint64(dropped) mgr.stats["hub new"] += uint64(len(r.Inputs) - dropped) Logf(0, "hub sync: add %v, del %v, drop %v, new %v", len(a.Add), len(a.Del), dropped, len(r.Inputs)-dropped) }
func RunManager(cfg *config.Config, syscalls map[int]bool, suppressions []*regexp.Regexp) { crashdir := filepath.Join(cfg.Workdir, "crashes") os.MkdirAll(crashdir, 0700) enabledSyscalls := "" if len(syscalls) != 0 { buf := new(bytes.Buffer) for c := range syscalls { fmt.Fprintf(buf, ",%v", c) } enabledSyscalls = buf.String()[1:] logf(1, "enabled syscalls: %v", enabledSyscalls) } mgr := &Manager{ cfg: cfg, crashdir: crashdir, startTime: time.Now(), stats: make(map[string]uint64), enabledSyscalls: enabledSyscalls, suppressions: suppressions, corpusCover: make([]cover.Cover, sys.CallCount), fuzzers: make(map[string]*Fuzzer), } logf(0, "loading corpus...") mgr.persistentCorpus = newPersistentSet(filepath.Join(cfg.Workdir, "corpus"), func(data []byte) bool { if _, err := prog.Deserialize(data); err != nil { logf(0, "deleting broken program: %v\n%s", err, data) return false } return true }) for _, data := range mgr.persistentCorpus.a { p, err := prog.Deserialize(data) if err != nil { fatalf("failed to deserialize program: %v", err) } disabled := false for _, c := range p.Calls { if !syscalls[c.Meta.ID] { disabled = true break } } if disabled { // This program contains a disabled syscall. // We won't execute it, but remeber its hash so // it is not deleted during minimization. h := hash(data) mgr.disabledHashes = append(mgr.disabledHashes, hex.EncodeToString(h[:])) continue } mgr.candidates = append(mgr.candidates, data) } logf(0, "loaded %v programs", len(mgr.persistentCorpus.m)) // Create HTTP server. mgr.initHttp() // Create RPC server for fuzzers. ln, err := net.Listen("tcp", "localhost:0") if err != nil { fatalf("failed to listen on localhost:0: %v", err) } logf(0, "serving rpc on tcp://%v", ln.Addr()) mgr.port = ln.Addr().(*net.TCPAddr).Port s := rpc.NewServer() s.Register(mgr) go func() { for { conn, err := ln.Accept() if err != nil { logf(0, "failed to accept an rpc connection: %v", err) continue } go s.ServeCodec(jsonrpc.NewServerCodec(conn)) } }() for i := 0; i < cfg.Count; i++ { first := i == 0 go func() { for { vmCfg, err := config.CreateVMConfig(cfg) if err != nil { fatalf("failed to create VM config: %v", err) } if !mgr.runInstance(vmCfg, first) { time.Sleep(10 * time.Second) } } }() } select {} }
func main() { debug.SetGCPercent(50) flag.Parse() switch *flagOutput { case "none", "stdout", "dmesg", "file": default: fmt.Fprintf(os.Stderr, "-output flag must be one of none/stdout/dmesg/file\n") os.Exit(1) } Logf(0, "fuzzer started") go func() { // Handles graceful preemption on GCE. c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) <-c Logf(0, "SYZ-FUZZER: PREEMPTED") os.Exit(1) }() corpusCover = make([]cover.Cover, sys.CallCount) maxCover = make([]cover.Cover, sys.CallCount) corpusHashes = make(map[Sig]struct{}) Logf(0, "dialing manager at %v", *flagManager) conn, err := jsonrpc.Dial("tcp", *flagManager) if err != nil { panic(err) } manager = conn a := &ConnectArgs{*flagName} r := &ConnectRes{} if err := manager.Call("Manager.Connect", a, r); err != nil { panic(err) } calls := buildCallList(r.EnabledCalls) ct := prog.BuildChoiceTable(r.Prios, calls) if r.NeedCheck { a := &CheckArgs{Name: *flagName} if fd, err := syscall.Open("/sys/kernel/debug/kcov", syscall.O_RDWR, 0); err == nil { syscall.Close(fd) a.Kcov = true } for c := range calls { a.Calls = append(a.Calls, c.Name) } if err := manager.Call("Manager.Check", a, nil); err != nil { panic(err) } } kmemleakInit() flags, timeout, err := ipc.DefaultFlags() if err != nil { panic(err) } if _, ok := calls[sys.CallMap["syz_emit_ethernet"]]; ok { flags |= ipc.FlagEnableTun } noCover = flags&ipc.FlagCover == 0 leakCallback := func() { if atomic.LoadUint32(&allTriaged) != 0 { // Scan for leaks once in a while (it is damn slow). kmemleakScan(true) } } if !*flagLeak { leakCallback = nil } gate = ipc.NewGate(2**flagProcs, leakCallback) needPoll := make(chan struct{}, 1) needPoll <- struct{}{} envs := make([]*ipc.Env, *flagProcs) for pid := 0; pid < *flagProcs; pid++ { env, err := ipc.MakeEnv(*flagExecutor, timeout, flags, pid) if err != nil { panic(err) } envs[pid] = env pid := pid go func() { rs := rand.NewSource(time.Now().UnixNano() + int64(pid)*1e12) rnd := rand.New(rs) for i := 0; ; i++ { triageMu.RLock() if len(triage) != 0 || len(candidates) != 0 { triageMu.RUnlock() triageMu.Lock() if len(triage) != 0 { last := len(triage) - 1 inp := triage[last] triage = triage[:last] wakePoll := len(triage) < *flagProcs triageMu.Unlock() if wakePoll { select { case needPoll <- struct{}{}: default: } } Logf(1, "triaging : %s", inp.p) triageInput(pid, env, inp) continue } else if len(candidates) != 0 { last := len(candidates) - 1 p := candidates[last] candidates = candidates[:last] triageMu.Unlock() execute(pid, env, p, &statExecCandidate) continue } else { triageMu.Unlock() } } else { triageMu.RUnlock() } corpusMu.RLock() if len(corpus) == 0 || i%10 == 0 { // Generate a new prog. corpusMu.RUnlock() p := prog.Generate(rnd, programLength, ct) Logf(1, "#%v: generated: %s", i, p) execute(pid, env, p, &statExecGen) p.Mutate(rnd, programLength, ct, nil) Logf(1, "#%v: mutated: %s", i, p) execute(pid, env, p, &statExecFuzz) } else { // Mutate an existing prog. p0 := corpus[rnd.Intn(len(corpus))] p := p0.Clone() p.Mutate(rs, programLength, ct, corpus) corpusMu.RUnlock() Logf(1, "#%v: mutated: %s <- %s", i, p, p0) execute(pid, env, p, &statExecFuzz) } } }() } var lastPoll time.Time var lastPrint time.Time ticker := time.NewTicker(3 * time.Second).C for { poll := false select { case <-ticker: case <-needPoll: poll = true } if *flagOutput != "stdout" && time.Since(lastPrint) > 10*time.Second { // Keep-alive for manager. Logf(0, "alive") lastPrint = time.Now() } if poll || time.Since(lastPoll) > 10*time.Second { triageMu.RLock() if len(candidates) > *flagProcs { triageMu.RUnlock() continue } triageMu.RUnlock() a := &PollArgs{ Name: *flagName, Stats: make(map[string]uint64), } for _, env := range envs { a.Stats["exec total"] += atomic.SwapUint64(&env.StatExecs, 0) a.Stats["executor restarts"] += atomic.SwapUint64(&env.StatRestarts, 0) } a.Stats["exec gen"] = atomic.SwapUint64(&statExecGen, 0) a.Stats["exec fuzz"] = atomic.SwapUint64(&statExecFuzz, 0) a.Stats["exec candidate"] = atomic.SwapUint64(&statExecCandidate, 0) a.Stats["exec triage"] = atomic.SwapUint64(&statExecTriage, 0) a.Stats["exec minimize"] = atomic.SwapUint64(&statExecMinimize, 0) a.Stats["fuzzer new inputs"] = atomic.SwapUint64(&statNewInput, 0) r := &PollRes{} if err := manager.Call("Manager.Poll", a, r); err != nil { panic(err) } for _, inp := range r.NewInputs { addInput(inp) } for _, data := range r.Candidates { p, err := prog.Deserialize(data) if err != nil { panic(err) } if noCover { corpusMu.Lock() corpus = append(corpus, p) corpusMu.Unlock() } else { triageMu.Lock() candidates = append(candidates, p) triageMu.Unlock() } } if len(r.Candidates) == 0 && atomic.LoadUint32(&allTriaged) == 0 { if *flagLeak { kmemleakScan(false) } atomic.StoreUint32(&allTriaged, 1) } if len(r.NewInputs) == 0 && len(r.Candidates) == 0 { lastPoll = time.Now() } } } }
func main() { debug.SetGCPercent(50) flag.Parse() logf(0, "started") var calls []*sys.Call if *flagSyscalls != "" { for _, id := range strings.Split(*flagSyscalls, ",") { n, err := strconv.ParseUint(id, 10, 64) if err != nil || n >= uint64(len(sys.Calls)) { panic(fmt.Sprintf("invalid syscall in -calls flag: '%v", id)) } calls = append(calls, sys.Calls[n]) } } corpusCover = make([]cover.Cover, sys.CallCount) maxCover = make([]cover.Cover, sys.CallCount) corpusHashes = make(map[Sig]struct{}) conn, err := rpc.Dial("tcp", *flagManager) if err != nil { panic(err) } manager = conn a := &ManagerConnectArgs{*flagName} r := &ManagerConnectRes{} if err := manager.Call("Manager.Connect", a, r); err != nil { panic(err) } ct = prog.BuildChoiceTable(r.Prios, calls) flags := ipc.FlagCover | ipc.FlagDedupCover if *flagStrace { flags |= ipc.FlagStrace } env, err := ipc.MakeEnv(*flagExecutor, 4*time.Second, flags) if err != nil { panic(err) } rs := rand.NewSource(time.Now().UnixNano()) rnd := rand.New(rs) var lastPoll time.Time var lastPrint time.Time for i := 0; ; i++ { if !*flagSaveProg && time.Since(lastPrint) > 10*time.Second { // Keep-alive for manager. logf(0, "#%v: alive", i) lastPrint = time.Now() } if len(triage) != 0 { last := len(triage) - 1 inp := triage[last] triage = triage[:last] logf(1, "#%v: triaging : %s", i, inp.p) triageInput(env, inp) continue } if time.Since(lastPoll) > 10*time.Second { a := &ManagerPollArgs{*flagName} r := &ManagerPollRes{} if err := manager.Call("Manager.Poll", a, r); err != nil { panic(err) } for _, inp := range r.NewInputs { addInput(inp) } for _, data := range r.Candidates { p, err := prog.Deserialize(data) if err != nil { panic(err) } execute(env, p) } if len(r.NewInputs) == 0 && len(r.Candidates) == 0 { lastPoll = time.Now() } continue } if len(corpus) == 0 || i%10 == 0 { p := prog.Generate(rnd, programLength, ct) logf(1, "#%v: generated: %s", i, p) execute(env, p) p.Mutate(rnd, programLength, ct) logf(1, "#%v: mutated: %s", i, p) execute(env, p) } else { inp := corpus[rnd.Intn(len(corpus))] p := inp.p.Clone() p.Mutate(rs, programLength, ct) logf(1, "#%v: mutated: %s <- %s", i, p, inp.p) execute(env, p) } } }
func main() { debug.SetGCPercent(50) flag.Parse() logf(0, "started") var calls []*sys.Call if *flagSyscalls != "" { for _, id := range strings.Split(*flagSyscalls, ",") { n, err := strconv.ParseUint(id, 10, 64) if err != nil || n >= uint64(len(sys.Calls)) { panic(fmt.Sprintf("invalid syscall in -calls flag: '%v", id)) } calls = append(calls, sys.Calls[n]) } } corpusCover = make([]cover.Cover, sys.CallCount) maxCover = make([]cover.Cover, sys.CallCount) corpusHashes = make(map[Sig]struct{}) conn, err := rpc.Dial("tcp", *flagManager) if err != nil { panic(err) } manager = conn a := &ManagerConnectArgs{*flagName} r := &ManagerConnectRes{} if err := manager.Call("Manager.Connect", a, r); err != nil { panic(err) } ct = prog.BuildChoiceTable(r.Prios, calls) if *flagParallel <= 0 { *flagParallel = 1 } flags := ipc.FlagCover | ipc.FlagDedupCover if *flagStrace { flags |= ipc.FlagStrace } workerIn = make(chan *prog.Prog, *flagParallel+10) workerOut = make(chan []Input, *flagParallel) for i := 0; i < *flagParallel; i++ { env, err := ipc.MakeEnv(*flagExecutor, 4*time.Second, flags) if err != nil { panic(err) } workerId := i + 1 go func() { for p := range workerIn { workerOut <- execute(env, p, workerId) } }() } env, err := ipc.MakeEnv(*flagExecutor, 4*time.Second, flags) if err != nil { panic(err) } rs := rand.NewSource(time.Now().UnixNano()) rnd := rand.New(rs) var lastPoll time.Time var lastPrint time.Time secondTicker := time.NewTicker(100 * time.Millisecond).C for i := 0; ; i++ { if !*flagSaveProg && time.Since(lastPrint) > 10*time.Second { // Keep-alive for manager. logf(0, "#%v: alive", i) lastPrint = time.Now() } if len(triage) != 0 { last := len(triage) - 1 inp := triage[last] triage = triage[:last] logf(1, "#%v: triaging : %s", i, inp.p) triageInput(env, inp) continue } if time.Since(lastPoll) > 10*time.Second { a := &ManagerPollArgs{*flagName} r := &ManagerPollRes{} if err := manager.Call("Manager.Poll", a, r); err != nil { panic(err) } for _, inp := range r.NewInputs { addInput(inp) } for _, data := range r.Candidates { p, err := prog.Deserialize(data) if err != nil { panic(err) } inputs := execute(env, p, 0) for _, inp := range inputs { call := inp.p.Calls[inp.call].Meta maxCover[call.CallID] = cover.Union(maxCover[call.CallID], inp.cover) triage = append(triage, inp) } } if len(r.NewInputs) == 0 && len(r.Candidates) == 0 { lastPoll = time.Now() } continue } // Parallel part. pending := 0 for ; ; i++ { if !(!*flagSaveProg && time.Since(lastPrint) > 10*time.Second) && !(len(triage) != 0) && !(time.Since(lastPoll) > 10*time.Second) { // No need to do any work above. // Send new inputs to workers, if they need some. for len(workerIn) < *flagParallel { if len(corpus) == 0 || i%10 == 0 { p := prog.Generate(rnd, programLength, ct) logf(1, "#%v: generated: %s", i, p) workerIn <- p pending++ p = p.Clone() p.Mutate(rnd, programLength, ct) logf(1, "#%v: mutated: %s", i, p) workerIn <- p pending++ } else { inp := corpus[rnd.Intn(len(corpus))] p := inp.p.Clone() p.Mutate(rs, programLength, ct) logf(1, "#%v: mutated: %s <- %s", i, p, inp.p) workerIn <- p pending++ } } } else if pending == 0 { // Need to do some work above. // Break if collected all pending results. break } // Collect results. select { case inputs := <-workerOut: pending-- for _, inp := range inputs { triage = append(triage, inp) } case <-secondTicker: } } // Do this after the parallel section because workers access maxCover. for _, inp := range triage { call := inp.p.Calls[inp.call].Meta maxCover[call.CallID] = cover.Union(maxCover[call.CallID], inp.cover) } } }