func (m *Mutex) unlock() { unlocked := m.state &^ 1 barrier.Memory() if atomic.AddUintptr(&m.state, ^uintptr(0)) != unlocked { panic("sync: unlock of unlocked mutex") } noos.Event(unlocked).Send() }
func BenchmarkPool(b *testing.B) { var p Pool var wg WaitGroup n0 := uintptr(b.N) n := n0 for i := 0; i < runtime.GOMAXPROCS(0); i++ { wg.Add(1) go func() { defer wg.Done() for atomic.AddUintptr(&n, ^uintptr(0)) < n0 { for b := 0; b < 100; b++ { p.Put(1) p.Get() } } }() } wg.Wait() }
// Signal, to be called after every operation that can un-wait a block, awakens // all block waiters. func (b *Block) Signal() { if atomic.LoadInt32(&b.waiters) == 0 { return } // We either get the lock, wait in pending state until we get the lock, // or return because somebody else is in a pending state. // // The logic for having only _one_ pending wait is as follows: // // - Prime calls can observe the counter either before an increment or // directly after an increment. We must keep a pending signal because // a prime call observed after an active increment will return on // Wait from the pending signal. // // - We need only need one pending signal because all active prime // calls will, at worst, observe the actively signaling counter. That // counter will be incremented by the pending singal. // // - All future signals can be collapsed into one pending signal which // will wake everything that race read the actively signaling counter. // // - One pending signal is the same as pathologically racing all // signals in front of any active Prime calls continuing to their // Wait. That is, having one pending singal is the _worst case_ of // processing all signals consecutively. // // In summary, anything that called Prime by _now_ cares about either // this signal or one pending signal. Eliding _all_ signals into one // pending signal is the _same_ as having all signals race finishing // immediately before any future Prime call, which would be the worst // case scenario from a signaling perspective. if !b.lock.TryLock() { return } atomic.AddUintptr(&b.counter, 1) b.lock.WUnlock() b.cond.Broadcast() }
func AddUintptr(addr *uintptr, delta uintptr) uintptr { return orig.AddUintptr(addr, delta) }
func nextID() uintptr { return atomic.AddUintptr(&nextIteratorID, 1) - 1 }