Esempio n. 1
0
// ReadMeter returns the current count and resets the counter's value to 0. The
// returned value is normalized using the given delta to ensure that the value
// always represents a per second value.
func (counter *Counter) ReadMeter(delta time.Duration) map[string]float64 {
	result := make(map[string]float64)

	if value := atomic.SwapUint64(&counter.value, 0); value > 0 {
		result[""] = float64(value) * (float64(time.Second) / float64(delta))
	}

	return result
}
Esempio n. 2
0
func (rs *RateStat) manage() {
	for range time.Tick(rs.interval) {
		bucket := atomic.LoadInt32(&rs.curBucket)
		bucket++
		if bucket >= rs.length {
			bucket = 0
		}
		old := atomic.SwapUint64(&rs.Buckets[bucket], 0)
		atomic.StoreInt32(&rs.curBucket, bucket)
		atomic.AddUint64(&rs.curValue, -old)
	}
}
Esempio n. 3
0
// Tick the moving average
func (e *EWMA) Tick() {
	// Assume Tick is never called concurrently
	count := atomic.SwapUint64(&e.uncounted, 0)
	instantRate := float64(count) / e.interval.Seconds()
	rate := e.Rate()
	if e.initialized {
		rate += e.alpha * (instantRate - rate)
	} else {
		rate = instantRate
		e.initialized = true
	}
	atomic.StoreUint64(&e.rate, math.Float64bits(rate))
}
Esempio n. 4
0
func main() {
	count := uint64(0)
	tick := time.NewTicker(time.Second)
	http.ListenAndServe(":6060", http.HandlerFunc(
		func(w http.ResponseWriter, r *http.Request) {
			select {
			case <-tick.C:
				log.Printf("Rate: %d", atomic.SwapUint64(&count, 0))
			default:
				atomic.AddUint64(&count, 1)
			}
		},
	))
}
Esempio n. 5
0
File: download.go Progetto: mm3/Sia
// Download downloads a file, identified by its nickname, to the destination
// specified.
func (r *Renter) Download(nickname, destination string) error {
	lockID := r.mu.Lock()
	// Lookup the file associated with the nickname.
	file, exists := r.files[nickname]
	if !exists {
		return errors.New("no file of that nickname")
	}

	// Create the download object and spawn the download process.
	d, err := newDownload(file, destination)
	if err != nil {
		return err
	}

	// Add the download to the download queue.
	r.downloadQueue = append(r.downloadQueue, d)
	r.mu.Unlock(lockID)

	// Download the file. We only need one piece, so iterate through the hosts
	// until a download succeeds.
	for i := 0; i < downloadAttempts; i++ {
		for _, piece := range d.pieces {
			downloadErr := d.downloadPiece(piece)
			if downloadErr == nil {
				// done
				d.complete = true
				d.file.Close()
				return nil
			}
			// Reset seek, since the file may have been partially written. The
			// next attempt will overwrite these bytes.
			d.file.Seek(0, 0)
			atomic.SwapUint64(&d.received, 0)
		}

		// This iteration failed, no hosts returned the piece. Try again
		// after waiting a random amount of time.
		randSource := make([]byte, 1)
		rand.Read(randSource)
		time.Sleep(time.Second * time.Duration(i*i) * time.Duration(randSource[0]))
	}

	// File could not be downloaded; delete the copy on disk.
	d.file.Close()
	os.Remove(destination)

	return errors.New("could not download any file pieces")
}
Esempio n. 6
0
func (q *messageQueue) logMetrics() {
	for _ = range time.Tick(time.Second) {
		// Die with the handler
		if !q.running {
			break
		}

		// If this is the metrics queue - don't log metrics
		if q.Name == metricsQueueName {
			break
		}

		// Send various metrics
		currentMessageRate := atomic.SwapUint64(&q.messagesSentLastSecond, 0)

		q.metrics <- &Metric{Name: q.Name + ".messagerate", Value: int64(currentMessageRate), Type: "counter"}
		q.doLogGuages()
	}
}
Esempio n. 7
0
func (s *Scheme) startReporter() {
	s.done.Add(1)

	go func() {
		defer s.done.Done()

		ticker := time.NewTicker(s.reportCycle)
		for {
			<-ticker.C
			if s.isClosed() {
				break
			}

			report := Report{}
			report.MessageCount = atomic.SwapUint32(&s.messageCount, 0)
			report.ByteCount = atomic.SwapUint64(&s.byteCount, 0)
			report.ErrorCount = atomic.SwapUint32(&s.errorCount, 0)
			s.reporter.Report(report)
		}
	}()
}
Esempio n. 8
0
func (p *Pool) calibrate() {
	if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) {
		return
	}

	a := make(callSizes, 0, steps)
	var callsSum uint64
	for i := uint64(0); i < steps; i++ {
		calls := atomic.SwapUint64(&p.calls[i], 0)
		callsSum += calls
		a = append(a, callSize{
			calls: calls,
			size:  minSize << i,
		})
	}
	sort.Sort(a)

	defaultSize := a[0].size
	maxSize := defaultSize

	maxSum := uint64(float64(callsSum) * maxPercentile)
	callsSum = 0
	for i := 0; i < steps; i++ {
		if callsSum > maxSum {
			break
		}
		callsSum += a[i].calls
		size := a[i].size
		if size > maxSize {
			maxSize = size
		}
	}

	atomic.StoreUint64(&p.defaultSize, defaultSize)
	atomic.StoreUint64(&p.maxSize, maxSize)

	atomic.StoreUint64(&p.calibrating, 0)
}
Esempio n. 9
0
// Limit returns true if rate was exceeded
func (rl *RateLimiter) Limit() bool {
	// Calculate the number of ns that have passed since our last call
	now := unixNano()
	passed := now - atomic.SwapUint64(&rl.lastCheck, now)

	// Add them to our allowance
	current := atomic.AddUint64(&rl.allowance, passed*rl.rate)

	// Ensure our allowance is not over maximum
	if current > rl.max {
		atomic.AddUint64(&rl.allowance, rl.max-current)
		current = rl.max
	}

	// If our allowance is less than one unit, rate-limit!
	if current < rl.unit {
		return true
	}

	// Not limited, subtract a unit
	atomic.AddUint64(&rl.allowance, -rl.unit)
	return false
}
Esempio n. 10
0
func (m *Monitor) monitor() {
	m.t0 = time.Now()
	for {
		select {
		case <-m.exit:
			return
		default:
			// Use default and sleep instead of
			// a time.After case because extra
			// thread switching under heavy loads
			// makes a big performance difference.
			time.Sleep(m.period)

			// In case we missed an exit command
			// while we were sleeping; this technically
			// wouldn't invalidate the semantics,
			// but it'd still be dumb to unnecessarily
			// be doing stuff hundreds of milliseconds
			// after we were told to stop.
			select {
			case <-m.exit:
				return
			default:
			}

			t1 := time.Now()
			delta := t1.Sub(m.t0)
			m.t0 = t1

			nn := atomic.SwapUint64(&m.nn, 0)
			m.n += nn

			rate := float64(nn) / delta.Seconds()
			m.f(Rate{m.n, rate})
		}
	}
}
Esempio n. 11
0
// Swap atomically swaps the wrapped uint64 and returns the old value.
func (i *Uint64) Swap(n uint64) uint64 {
	return atomic.SwapUint64(&i.v, n)
}
Esempio n. 12
0
func SwapUint64(addr *AlignedUint64, new uint64) uint64 {
	return orig.SwapUint64(&(addr.data), new)
}
Esempio n. 13
0
func (self *CoordinatorMonitor) GetWrites() uint64 {
	return atomic.SwapUint64(&self.pointsWritten, 0)
}
Esempio n. 14
0
func (mc *Conn) submitTraffic() {
	mc.m.submitTraffic(mc.ID,
		atomic.SwapUint64(&mc.BytesIn, 0),
		atomic.SwapUint64(&mc.BytesOut, 0))
}
Esempio n. 15
0
func updateTime() {
	c := time.Tick(100 * time.Millisecond)
	for now := range c {
		atomic.SwapUint64(&unixTime, uint64(now.Unix()))
	}
}
Esempio n. 16
0
func (e *eventStream) Enqueue(val uint64) {

	var ridx uint64
	var widx uint64
	var idx uint64

	// First get a slot
	widx = atomic.AddUint64(&(e.widx), 1)
	widx-- // back up to get our reserved slot
	idx = widx & bufferMask

	var try int
	// we now have widx holding the index we intend to write
	// Then write the data
	for {
		try++
		// Where's the reader? Don't overtake it.
		ridx = atomic.LoadUint64(&e.ridx)

		diff := widx - ridx // unsigned artimetic should work
		if diff < bufferSize {
			// We have not catched up
			e.slots[idx].val = val
			// mark the slot written
			oldmark := atomic.SwapUint64(&(e.slots[idx].seq), widx)

			// test to see if someone was waiting for that mark
			if oldmark != widx-bufferSize {
				// ensure we don't signal before the waiter waits
				e.slots[idx].mu.Lock()
				e.flusher.FlushMeter(e)
				e.slots[idx].cv.Broadcast() // wake up time
				e.slots[idx].mu.Unlock()
			}
			break
		}

		if try == 1 {
			// try again
			runtime.Gosched()
			continue
		}
		// at the time we read ridx, we could not proceed. That may have changed however, so
		// we need to make an atomic operation which:
		// 1) Decides whether to go to sleep and wait for our slot to be ready.
		// 2) Informs the writer of the slot that we want to be woken.

		// The slot we are waiting for have sequence 1 buffersize back from rdix,
		// if it's still not ready

		oldmark := ridx - bufferSize

		idx2 := ridx & bufferMask // from here on we look at the stale read index.
		e.slots[idx2].mu.Lock()
		// Try skew the mark to indicate we're waiting
		mustwait := atomic.CompareAndSwapUint64(&(e.slots[idx2].seq), oldmark, oldmark+1)
		if mustwait {
			// We have now at the same time determined that the slot is not ready
			// and set it to indicate that who ever writes it must signal us.

			e.slots[idx2].cv.Wait()

		} else {
			// Ok... so the slot is not just "old". It has either been updated
			// to current, or someone else has skewed the mark and a signal will
			// be sent to waiters. Find out whether to join the waiters or just try again.
			actualmark := atomic.LoadUint64(&(e.slots[idx2].seq))
			if actualmark == oldmark+1 {
				// skewed - join the waiters.
				e.slots[idx2].cv.Wait()
			} else { // stuff can happen fast
				// The slot was actually up to date - so advance the reader
				e.flusher.FlushMeter(e)
			}
		}
		e.slots[idx2].mu.Unlock()

	}
}
Esempio n. 17
0
// Swap atomically stores the new value and returns the old one.
func (a *Uint64) Swap(v uint64) uint64 {
	return atomic.SwapUint64(&a.x, v)
}
Esempio n. 18
0
func (c *Counter) Reset() uint64 {
	return atomic.SwapUint64(&c.value, 0)
}
Esempio n. 19
0
func main() {
	debug.SetGCPercent(50)
	flag.Parse()
	switch *flagOutput {
	case "none", "stdout", "dmesg", "file":
	default:
		fmt.Fprintf(os.Stderr, "-output flag must be one of none/stdout/dmesg/file\n")
		os.Exit(1)
	}
	Logf(0, "fuzzer started")

	go func() {
		// Handles graceful preemption on GCE.
		c := make(chan os.Signal, 1)
		signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
		<-c
		Logf(0, "SYZ-FUZZER: PREEMPTED")
		os.Exit(1)
	}()

	corpusCover = make([]cover.Cover, sys.CallCount)
	maxCover = make([]cover.Cover, sys.CallCount)
	corpusHashes = make(map[Sig]struct{})

	Logf(0, "dialing manager at %v", *flagManager)
	conn, err := jsonrpc.Dial("tcp", *flagManager)
	if err != nil {
		panic(err)
	}
	manager = conn
	a := &ConnectArgs{*flagName}
	r := &ConnectRes{}
	if err := manager.Call("Manager.Connect", a, r); err != nil {
		panic(err)
	}
	calls := buildCallList(r.EnabledCalls)
	ct := prog.BuildChoiceTable(r.Prios, calls)

	if r.NeedCheck {
		a := &CheckArgs{Name: *flagName}
		if fd, err := syscall.Open("/sys/kernel/debug/kcov", syscall.O_RDWR, 0); err == nil {
			syscall.Close(fd)
			a.Kcov = true
		}
		for c := range calls {
			a.Calls = append(a.Calls, c.Name)
		}
		if err := manager.Call("Manager.Check", a, nil); err != nil {
			panic(err)
		}
	}

	kmemleakInit()

	flags, timeout, err := ipc.DefaultFlags()
	if err != nil {
		panic(err)
	}
	if _, ok := calls[sys.CallMap["syz_emit_ethernet"]]; ok {
		flags |= ipc.FlagEnableTun
	}
	noCover = flags&ipc.FlagCover == 0
	leakCallback := func() {
		if atomic.LoadUint32(&allTriaged) != 0 {
			// Scan for leaks once in a while (it is damn slow).
			kmemleakScan(true)
		}
	}
	if !*flagLeak {
		leakCallback = nil
	}
	gate = ipc.NewGate(2**flagProcs, leakCallback)
	needPoll := make(chan struct{}, 1)
	needPoll <- struct{}{}
	envs := make([]*ipc.Env, *flagProcs)
	for pid := 0; pid < *flagProcs; pid++ {
		env, err := ipc.MakeEnv(*flagExecutor, timeout, flags, pid)
		if err != nil {
			panic(err)
		}
		envs[pid] = env

		pid := pid
		go func() {
			rs := rand.NewSource(time.Now().UnixNano() + int64(pid)*1e12)
			rnd := rand.New(rs)

			for i := 0; ; i++ {
				triageMu.RLock()
				if len(triage) != 0 || len(candidates) != 0 {
					triageMu.RUnlock()
					triageMu.Lock()
					if len(triage) != 0 {
						last := len(triage) - 1
						inp := triage[last]
						triage = triage[:last]
						wakePoll := len(triage) < *flagProcs
						triageMu.Unlock()
						if wakePoll {
							select {
							case needPoll <- struct{}{}:
							default:
							}
						}
						Logf(1, "triaging : %s", inp.p)
						triageInput(pid, env, inp)
						continue
					} else if len(candidates) != 0 {
						last := len(candidates) - 1
						p := candidates[last]
						candidates = candidates[:last]
						triageMu.Unlock()
						execute(pid, env, p, &statExecCandidate)
						continue
					} else {
						triageMu.Unlock()
					}
				} else {
					triageMu.RUnlock()
				}

				corpusMu.RLock()
				if len(corpus) == 0 || i%10 == 0 {
					// Generate a new prog.
					corpusMu.RUnlock()
					p := prog.Generate(rnd, programLength, ct)
					Logf(1, "#%v: generated: %s", i, p)
					execute(pid, env, p, &statExecGen)
					p.Mutate(rnd, programLength, ct, nil)
					Logf(1, "#%v: mutated: %s", i, p)
					execute(pid, env, p, &statExecFuzz)
				} else {
					// Mutate an existing prog.
					p0 := corpus[rnd.Intn(len(corpus))]
					p := p0.Clone()
					p.Mutate(rs, programLength, ct, corpus)
					corpusMu.RUnlock()
					Logf(1, "#%v: mutated: %s <- %s", i, p, p0)
					execute(pid, env, p, &statExecFuzz)
				}
			}
		}()
	}

	var lastPoll time.Time
	var lastPrint time.Time
	ticker := time.NewTicker(3 * time.Second).C
	for {
		poll := false
		select {
		case <-ticker:
		case <-needPoll:
			poll = true
		}
		if *flagOutput != "stdout" && time.Since(lastPrint) > 10*time.Second {
			// Keep-alive for manager.
			Logf(0, "alive")
			lastPrint = time.Now()
		}
		if poll || time.Since(lastPoll) > 10*time.Second {
			triageMu.RLock()
			if len(candidates) > *flagProcs {
				triageMu.RUnlock()
				continue
			}
			triageMu.RUnlock()

			a := &PollArgs{
				Name:  *flagName,
				Stats: make(map[string]uint64),
			}
			for _, env := range envs {
				a.Stats["exec total"] += atomic.SwapUint64(&env.StatExecs, 0)
				a.Stats["executor restarts"] += atomic.SwapUint64(&env.StatRestarts, 0)
			}
			a.Stats["exec gen"] = atomic.SwapUint64(&statExecGen, 0)
			a.Stats["exec fuzz"] = atomic.SwapUint64(&statExecFuzz, 0)
			a.Stats["exec candidate"] = atomic.SwapUint64(&statExecCandidate, 0)
			a.Stats["exec triage"] = atomic.SwapUint64(&statExecTriage, 0)
			a.Stats["exec minimize"] = atomic.SwapUint64(&statExecMinimize, 0)
			a.Stats["fuzzer new inputs"] = atomic.SwapUint64(&statNewInput, 0)
			r := &PollRes{}
			if err := manager.Call("Manager.Poll", a, r); err != nil {
				panic(err)
			}
			for _, inp := range r.NewInputs {
				addInput(inp)
			}
			for _, data := range r.Candidates {
				p, err := prog.Deserialize(data)
				if err != nil {
					panic(err)
				}
				if noCover {
					corpusMu.Lock()
					corpus = append(corpus, p)
					corpusMu.Unlock()
				} else {
					triageMu.Lock()
					candidates = append(candidates, p)
					triageMu.Unlock()
				}
			}
			if len(r.Candidates) == 0 && atomic.LoadUint32(&allTriaged) == 0 {
				if *flagLeak {
					kmemleakScan(false)
				}
				atomic.StoreUint32(&allTriaged, 1)
			}
			if len(r.NewInputs) == 0 && len(r.Candidates) == 0 {
				lastPoll = time.Now()
			}
		}
	}
}
Esempio n. 20
0
func Collect(backend Backend) {
	// client set
	clientSet := NewClientSet()
	defer clientSet.Close()
	clientSet.Logger = backend.LogClient

	jobsIn, jobsOut, jobsClose := NewJobsChan()
	defer close(jobsClose)
	wg := new(sync.WaitGroup)

	fgcats, err := backend.GetFgCats()
	ce(err, "get fgcats")
	wg.Add(len(fgcats))
	go func() {
		for _, cat := range fgcats {
			jobsIn <- Job{
				Cat:  cat.Cat,
				Page: 0,
			}
		}
	}()

	var jobsDone, itemsCollected, totalJobsDone uint64
	go func() {
		ticker := time.NewTicker(time.Second * 10)
		t0 := time.Now()
		for range ticker.C {
			pt("%d / %d / %d - %v\n", atomic.SwapUint64(&jobsDone, 0),
				atomic.SwapUint64(&itemsCollected, 0),
				atomic.LoadUint64(&totalJobsDone), time.Now().Sub(t0))
		}
	}()

	go func() {
		sem := make(chan struct{}, 128)
		for {
			job := <-jobsOut
			sem <- struct{}{}
			go func() {
				defer func() {
					wg.Done()
					atomic.AddUint64(&jobsDone, 1)
					atomic.AddUint64(&totalJobsDone, 1)
					<-sem
				}()
				// check
				if job.Page > 99 {
					return
				}
				if backend.IsCollected(job) {
					wg.Add(1)
					jobsIn <- Job{
						Cat:  job.Cat,
						Page: job.Page + 1,
					}
					return
				}
				// trace
				tc := jobTraceSet.NewTrace(sp("cat %d, page %d", job.Cat, job.Page))
				defer tc.SetFlag("done")
				// collect
				url := sp("http://s.taobao.com/list?cat=%d&sort=sale-desc&bcoffset=0&s=%d", job.Cat, job.Page*60)
				clientSet.Do(func(client *http.Client) ClientState {
					bs, err := getBytes(client, url)
					if err != nil {
						tc.Log(sp("get bytes error %v", err))
						return Bad
					}
					jstr, err := GetPageConfigJson(bs)
					if err != nil {
						tc.Log(sp("get page config error %v", err))
						return Bad
					}
					var config PageConfig
					if err := json.Unmarshal(jstr, &config); err != nil {
						tc.Log(sp("unmarshal page config error %v", err))
						return Bad
					}
					// get items
					if config.Mods["itemlist"].Status == "hide" { // no items
						tc.Log("no items found")
						backend.AddItems([]Item{}, Job{
							Cat:  job.Cat,
							Page: job.Page,
						})
						return Good
					}
					items, err := GetItems(config.Mods["itemlist"].Data)
					if err != nil {
						tc.Log(sp("get items error %v", err))
						return Bad
					}
					// save items
					err = backend.AddItems(items, Job{
						Cat:  job.Cat,
						Page: job.Page,
					})
					ce(err, "save items")
					atomic.AddUint64(&itemsCollected, uint64(len(items)))
					// add next pass cat
					if len(items) > 0 && job.Page < 99 {
						wg.Add(1)
						jobsIn <- Job{
							Cat:  job.Cat,
							Page: job.Page + 1,
						}
					}
					return Good
				})
			}()
		}
	}()

	wg.Wait()
}
Esempio n. 21
0
func logStatus() {
	now := time.Now()

	created := atomic.SwapUint64(&notesCreatedCount, 0)
	full := atomic.SwapUint64(&noteStorageFullRequestCount, 0)
	tooLarge := atomic.SwapUint64(&noteTooLargeRequestCount, 0)
	duplicateId := atomic.SwapUint64(&noteDuplicateIdRequestCount, 0)
	opened := atomic.SwapUint64(&notesOpenedCount, 0)
	expired := atomic.SwapUint64(&noteExpiredRequestCount, 0)
	alreadyOpened := atomic.SwapUint64(&noteAlreadyOpenedRequestCount, 0)
	notFound := atomic.SwapUint64(&noteNotFoundCount, 0)
	status := atomic.SwapUint64(&statusRequestCount, 0)
	assets := atomic.SwapUint64(&assetRequestCount, 0)
	total := atomic.SwapUint64(&totalRequestCount, 0)

	requestsPerSecond := float64(total) / now.Sub(lastStatusLogTime).Seconds()

	log.Printf("Requests: total=%d rps=%.6f assets=%d Notes: created=%d opened=%d alreadyOpened=%d expired=%d notFound=%d full=%d tooLarge=%d duplicateId=%d status=%d",
		total,
		requestsPerSecond,
		assets,
		created,
		opened,
		alreadyOpened,
		expired,
		notFound,
		full,
		tooLarge,
		duplicateId,
		status)

	lastStatusLogTime = now
}
Esempio n. 22
0
func SwapUint64(addr *AlignedUint64, new uint64) uint64 {
	return orig.SwapUint64((*uint64)(addr), new)
}
Esempio n. 23
0
func main() {
	debug.SetGCPercent(50)
	flag.Parse()
	switch *flagOutput {
	case "none", "stdout", "dmesg", "file":
	default:
		fmt.Fprintf(os.Stderr, "-output flag must be one of none/stdout/dmesg/file\n")
		os.Exit(1)
	}
	logf(0, "started")

	corpusCover = make([]cover.Cover, sys.CallCount)
	maxCover = make([]cover.Cover, sys.CallCount)
	corpusHashes = make(map[Sig]struct{})

	logf(0, "dialing manager at %v", *flagManager)
	conn, err := jsonrpc.Dial("tcp", *flagManager)
	if err != nil {
		panic(err)
	}
	manager = conn
	a := &ConnectArgs{*flagName}
	r := &ConnectRes{}
	if err := manager.Call("Manager.Connect", a, r); err != nil {
		panic(err)
	}
	calls := buildCallList(r.EnabledCalls)
	ct := prog.BuildChoiceTable(r.Prios, calls)

	kmemleakInit()

	flags, timeout := ipc.DefaultFlags()
	noCover = flags&ipc.FlagCover == 0
	if !noCover {
		fd, err := syscall.Open("/sys/kernel/debug/kcov", syscall.O_RDWR, 0)
		if err != nil {
			log.Fatalf("BUG: /sys/kernel/debug/kcov is missing (%v). Enable CONFIG_KCOV and mount debugfs.", err)
		}
		syscall.Close(fd)
	}
	gate = ipc.NewGate(2 * *flagProcs)
	envs := make([]*ipc.Env, *flagProcs)
	for pid := 0; pid < *flagProcs; pid++ {
		env, err := ipc.MakeEnv(*flagExecutor, timeout, flags)
		if err != nil {
			panic(err)
		}
		envs[pid] = env

		pid := pid
		go func() {
			rs := rand.NewSource(time.Now().UnixNano() + int64(pid)*1e12)
			rnd := rand.New(rs)

			for i := 0; ; i++ {
				triageMu.RLock()
				if len(triage) != 0 || len(candidates) != 0 {
					triageMu.RUnlock()
					triageMu.Lock()
					if len(triage) != 0 {
						last := len(triage) - 1
						inp := triage[last]
						triage = triage[:last]
						triageMu.Unlock()
						logf(1, "triaging : %s", inp.p)
						triageInput(pid, env, inp)
						continue
					} else if len(candidates) != 0 {
						last := len(candidates) - 1
						p := candidates[last]
						candidates = candidates[:last]
						triageMu.Unlock()
						execute(pid, env, p, &statExecCandidate)
						continue
					} else {
						triageMu.Unlock()
					}
				} else {
					triageMu.RUnlock()
				}

				corpusMu.RLock()
				if len(corpus) == 0 || i%10 == 0 {
					corpusMu.RUnlock()
					p := prog.Generate(rnd, programLength, ct)
					logf(1, "#%v: generated: %s", i, p)
					execute(pid, env, p, &statExecGen)
					p.Mutate(rnd, programLength, ct)
					logf(1, "#%v: mutated: %s", i, p)
					execute(pid, env, p, &statExecFuzz)
				} else {
					p0 := corpus[rnd.Intn(len(corpus))]
					corpusMu.RUnlock()
					p := p0.Clone()
					p.Mutate(rs, programLength, ct)
					logf(1, "#%v: mutated: %s <- %s", i, p, p0)
					execute(pid, env, p, &statExecFuzz)
				}
			}
		}()
	}

	var lastPoll time.Time
	var lastPrint time.Time
	for range time.NewTicker(3 * time.Second).C {
		if *flagOutput != "stdout" && time.Since(lastPrint) > 10*time.Second {
			// Keep-alive for manager.
			logf(0, "alive")
			lastPrint = time.Now()
		}
		if time.Since(lastPoll) > 10*time.Second {
			triageMu.RLock()
			if len(candidates) != 0 {
				triageMu.RUnlock()
				continue
			}
			triageMu.RUnlock()

			a := &PollArgs{
				Name:  *flagName,
				Stats: make(map[string]uint64),
			}
			for _, env := range envs {
				a.Stats["exec total"] += atomic.SwapUint64(&env.StatExecs, 0)
				a.Stats["executor restarts"] += atomic.SwapUint64(&env.StatRestarts, 0)
			}
			a.Stats["exec gen"] = atomic.SwapUint64(&statExecGen, 0)
			a.Stats["exec fuzz"] = atomic.SwapUint64(&statExecFuzz, 0)
			a.Stats["exec candidate"] = atomic.SwapUint64(&statExecCandidate, 0)
			a.Stats["exec triage"] = atomic.SwapUint64(&statExecTriage, 0)
			a.Stats["exec minimize"] = atomic.SwapUint64(&statExecMinimize, 0)
			a.Stats["fuzzer new inputs"] = atomic.SwapUint64(&statNewInput, 0)
			r := &PollRes{}
			if err := manager.Call("Manager.Poll", a, r); err != nil {
				panic(err)
			}
			for _, inp := range r.NewInputs {
				addInput(inp)
			}
			for _, data := range r.Candidates {
				p, err := prog.Deserialize(data)
				if err != nil {
					panic(err)
				}
				if noCover {
					corpusMu.Lock()
					corpus = append(corpus, p)
					corpusMu.Unlock()
				} else {
					triageMu.Lock()
					candidates = append(candidates, p)
					triageMu.Unlock()
				}
			}
			if len(r.Candidates) == 0 {
				if atomic.LoadUint32(&allTriaged) == 0 {
					if *flagLeak {
						kmemleakScan(false)
					}
					atomic.StoreUint32(&allTriaged, 1)
				}
			}
			if len(r.NewInputs) == 0 && len(r.Candidates) == 0 {
				lastPoll = time.Now()
			}
		}
	}
}
Esempio n. 24
0
func (this *AtomicUint64) Set(val uint64) uint64 {
	return atomic.SwapUint64((*uint64)(this), val)
}