func HammerSemaphore(s *uint32, loops int, cdone chan bool) { for i := 0; i < loops; i++ { runtime.Semacquire(s) runtime.Semrelease(s) } cdone <- true }
// Lock locks m. // If the lock is already in use, the calling goroutine // blocks until the mutex is available. func (m *Mutex) Lock() { if xadd(&m.key, 1) == 1 { // changed from 0 to 1; we hold lock return } runtime.Semacquire(&m.sema) }
// Lock locks m. // If the lock is already in use, the calling goroutine // blocks until the mutex is available. func (m *Mutex) Lock() { if atomic.AddInt32(&m.key, 1) == 1 { // changed from 0 to 1; we hold lock return } runtime.Semacquire(&m.sema) }
// Lock locks m. // If the lock is already in use, the calling goroutine // blocks until the mutex is available. func (m *Mutex) Lock() { // Fast path: grab unlocked mutex. if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) { return } awoke := false for { old := m.state new := old | mutexLocked if old&mutexLocked != 0 { new = old + 1<<mutexWaiterShift } if awoke { // The goroutine has been woken from sleep, // so we need to reset the flag in either case. new &^= mutexWoken } if atomic.CompareAndSwapInt32(&m.state, old, new) { if old&mutexLocked == 0 { break } runtime.Semacquire(&m.sema) awoke = true } } }
func BenchmarkSemaUncontended(b *testing.B) { type PaddedSem struct { sem uint32 pad [32]uint32 } const CallsPerSched = 1000 procs := runtime.GOMAXPROCS(-1) N := int32(b.N / CallsPerSched) c := make(chan bool, procs) for p := 0; p < procs; p++ { go func() { sem := new(PaddedSem) for atomic.AddInt32(&N, -1) >= 0 { runtime.Gosched() for g := 0; g < CallsPerSched; g++ { runtime.Semrelease(&sem.sem) runtime.Semacquire(&sem.sem) } } c <- true }() } for p := 0; p < procs; p++ { <-c } }
func benchmarkSema(b *testing.B, block, work bool) { const CallsPerSched = 1000 const LocalWork = 100 procs := runtime.GOMAXPROCS(-1) N := int32(b.N / CallsPerSched) c := make(chan bool, procs) c2 := make(chan bool, procs/2) sem := uint32(0) if block { for p := 0; p < procs/2; p++ { go func() { runtime.Semacquire(&sem) c2 <- true }() } } for p := 0; p < procs; p++ { go func() { foo := 0 for atomic.AddInt32(&N, -1) >= 0 { runtime.Gosched() for g := 0; g < CallsPerSched; g++ { runtime.Semrelease(&sem) if work { for i := 0; i < LocalWork; i++ { foo *= 2 foo /= 2 } } runtime.Semacquire(&sem) } } c <- foo == 42 runtime.Semrelease(&sem) }() } if block { for p := 0; p < procs/2; p++ { <-c2 } } for p := 0; p < procs; p++ { <-c } }
// Lock locks rw for writing. // If the lock is already locked for reading or writing, // Lock blocks until the lock is available. // To ensure that the lock eventually becomes available, // a blocked Lock call excludes new readers from acquiring // the lock. func (rw *RWMutex) Lock() { // First, resolve competition with other writers. rw.w.Lock() // Announce to readers there is a pending writer. r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders // Wait for active readers. if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 { runtime.Semacquire(&rw.writerSem) } }
// Wait atomically unlocks c.L and suspends execution // of the calling goroutine. After later resuming execution, // Wait locks c.L before returning. // // Because L is not locked when Wait first resumes, the caller // typically cannot assume that the condition is true when // Wait returns. Instead, the caller should Wait in a loop: // // c.L.Lock() // for !condition() { // c.Wait() // } // ... make use of condition ... // c.L.Unlock() // func (c *Cond) Wait() { c.m.Lock() if c.sema == nil { c.sema = new(uint32) } s := c.sema c.waiters++ c.m.Unlock() c.L.Unlock() runtime.Semacquire(s) c.L.Lock() }
// Wait blocks until the WaitGroup counter is zero. func (wg *WaitGroup) Wait() { wg.m.Lock() if wg.counter == 0 { wg.m.Unlock() return } wg.waiters++ if wg.sema == nil { wg.sema = new(uint32) } s := wg.sema wg.m.Unlock() runtime.Semacquire(s) }
// Wait blocks until the WaitGroup counter is zero. func (wg *WaitGroup) Wait() { if atomic.LoadInt32(&wg.counter) == 0 { return } wg.m.Lock() atomic.AddInt32(&wg.waiters, 1) // This code is racing with the unlocked path in Add above. // The code above modifies counter and then reads waiters. // We must modify waiters and then read counter (the opposite order) // to avoid missing an Add. if atomic.LoadInt32(&wg.counter) == 0 { atomic.AddInt32(&wg.waiters, -1) wg.m.Unlock() return } if wg.sema == nil { wg.sema = new(uint32) } s := wg.sema wg.m.Unlock() runtime.Semacquire(s) }
func (this *Semaphore) P() { runtime.Semacquire((*uint32)(this)) }
func (p *Parker) Park() { runtime.Semacquire(&p.sema) }
// RLock locks rw for reading. func (rw *RWMutex) RLock() { if atomic.AddInt32(&rw.readerCount, 1) < 0 { // A writer is pending, wait for it. runtime.Semacquire(&rw.readerSem) } }