func lock(l *mutex) { gp := getg() if gp.m.locks < 0 { throw("runtime·lock: lock count") } gp.m.locks++ // Speculative grab for lock. v := atomic.Xchg(key32(&l.key), mutex_locked) if v == mutex_unlocked { return } // wait is either MUTEX_LOCKED or MUTEX_SLEEPING // depending on whether there is a thread sleeping // on this mutex. If we ever change l->key from // MUTEX_SLEEPING to some other value, we must be // careful to change it back to MUTEX_SLEEPING before // returning, to ensure that the sleeping thread gets // its wakeup call. wait := v // On uniprocessors, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. spin := 0 if ncpu > 1 { spin = active_spin } for { // Try for lock, spinning. for i := 0; i < spin; i++ { for l.key == mutex_unlocked { if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { return } } procyield(active_spin_cnt) } // Try for lock, rescheduling. for i := 0; i < passive_spin; i++ { for l.key == mutex_unlocked { if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { return } } osyield() } // Sleep. v = atomic.Xchg(key32(&l.key), mutex_sleeping) if v == mutex_unlocked { return } wait = mutex_sleeping futexsleep(key32(&l.key), mutex_sleeping, -1) } }
// Called to receive the next queued signal. // Must only be called from a single goroutine at a time. //go:linkname signal_recv os/signal.signal_recv func signal_recv() uint32 { for { // Serve any signals from local copy. for i := uint32(0); i < _NSIG; i++ { if sig.recv[i/32]&(1<<(i&31)) != 0 { sig.recv[i/32] &^= 1 << (i & 31) return i } } // Wait for updates to be available from signal sender. Receive: for { switch atomic.Load(&sig.state) { default: throw("signal_recv: inconsistent state") case sigIdle: if atomic.Cas(&sig.state, sigIdle, sigReceiving) { notetsleepg(&sig.note, -1) noteclear(&sig.note) break Receive } case sigSending: if atomic.Cas(&sig.state, sigSending, sigIdle) { break Receive } } } // Incorporate updates from sender into local copy. for i := range sig.mask { sig.recv[i] = atomic.Xchg(&sig.mask[i], 0) } } }
func notewakeup(n *note) { old := atomic.Xchg(key32(&n.key), 1) if old != 0 { print("notewakeup - double wakeup (", old, ")\n") throw("notewakeup - double wakeup") } futexwakeup(key32(&n.key), 1) }
// newextram allocates m's and puts them on the extra list. // It is called with a working local m, so that it can do things // like call schedlock and allocate. func newextram() { c := atomic.Xchg(&extraMWaiters, 0) if c > 0 { for i := uint32(0); i < c; i++ { oneNewExtraM() } } else { // Make sure there is at least one extra M. mp := lockextra(true) unlockextra(mp) if mp == nil { oneNewExtraM() } } }
func unlock(l *mutex) { v := atomic.Xchg(key32(&l.key), mutex_unlocked) if v == mutex_unlocked { throw("unlock of unlocked lock") } if v == mutex_sleeping { futexwakeup(key32(&l.key), 1) } gp := getg() gp.m.locks-- if gp.m.locks < 0 { throw("runtime·unlock: lock count") } if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack gp.stackguard0 = stackPreempt } }