// Lock 用于锁定 m。 // 若该锁正在使用,调用的Go程就会阻塞,直到该互斥体可用。 func (m *Mutex) Lock() { // Fast path: grab unlocked mutex. // 快速通道:抢占锁定的互斥体。 if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) { if race.Enabled { race.Acquire(unsafe.Pointer(m)) } return } awoke := false iter := 0 for { old := m.state new := old | mutexLocked if old&mutexLocked != 0 { if runtime_canSpin(iter) { // Active spinning makes sense. // Try to set mutexWoken flag to inform Unlock // to not wake other blocked goroutines. if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 && atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) { awoke = true } runtime_doSpin() iter++ continue } new = old + 1<<mutexWaiterShift } if awoke { // The goroutine has been woken from sleep, // so we need to reset the flag in either case. // 此Go程已从睡眠状态被唤醒,因此无论在哪种状态下, // 我们都需要充值此标记。 if new&mutexWoken == 0 { panic("sync: inconsistent mutex state") } new &^= mutexWoken } if atomic.CompareAndSwapInt32(&m.state, old, new) { if old&mutexLocked == 0 { break } runtime_Semacquire(&m.sema) awoke = true iter = 0 } } if race.Enabled { race.Acquire(unsafe.Pointer(m)) } }
// Get selects an arbitrary item from the Pool, removes it from the // Pool, and returns it to the caller. // Get may choose to ignore the pool and treat it as empty. // Callers should not assume any relation between values passed to Put and // the values returned by Get. // // If Get would otherwise return nil and p.New is non-nil, Get returns // the result of calling p.New. func (p *Pool) Get() interface{} { if race.Enabled { race.Disable() } l := p.pin() x := l.private l.private = nil runtime_procUnpin() if x == nil { l.Lock() last := len(l.shared) - 1 if last >= 0 { x = l.shared[last] l.shared = l.shared[:last] } l.Unlock() if x == nil { x = p.getSlow() } } if race.Enabled { race.Enable() if x != nil { race.Acquire(poolRaceAddr(x)) } } if x == nil && p.New != nil { x = p.New() } return x }
// Lock locks rw for writing. // If the lock is already locked for reading or writing, // Lock blocks until the lock is available. // To ensure that the lock eventually becomes available, // a blocked Lock call excludes new readers from acquiring // the lock. func (rw *RWMutex) Lock() { if race.Enabled { _ = rw.w.state race.Disable() } // First, resolve competition with other writers. rw.w.Lock() // Announce to readers there is a pending writer. r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders // Wait for active readers. if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 { runtime_Semacquire(&rw.writerSem) } if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(&rw.readerSem)) race.Acquire(unsafe.Pointer(&rw.writerSem)) } }
// Wait 阻塞 WaitGroup 直到其 counter 为零。 func (wg *WaitGroup) Wait() { statep := wg.state() if race.Enabled { _ = *statep // trigger nil deref early race.Disable() } for { state := atomic.LoadUint64(statep) v := int32(state >> 32) w := uint32(state) if v == 0 { // Counter is 0, no need to wait. // 计数器为 0,无需等待。 if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(wg)) } return } // Increment waiters count. // 递增等待者计数。 if atomic.CompareAndSwapUint64(statep, state, state+1) { if race.Enabled && w == 0 { // Wait must be synchronized with the first Add. // Need to model this is as a write to race with the read in Add. // As a consequence, can do the write only for the first waiter, // otherwise concurrent Waits will race with each other. race.Write(unsafe.Pointer(&wg.sema)) } runtime_Semacquire(&wg.sema) if *statep != 0 { panic("sync: WaitGroup is reused before previous Wait has returned") } if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(wg)) } return } } }
// RLock locks rw for reading. func (rw *RWMutex) RLock() { if race.Enabled { _ = rw.w.state race.Disable() } if atomic.AddInt32(&rw.readerCount, 1) < 0 { // A writer is pending, wait for it. runtime_Semacquire(&rw.readerSem) } if race.Enabled { race.Enable() race.Acquire(unsafe.Pointer(&rw.readerSem)) } }
func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if race.Enabled { if n > 0 { race.WriteRange(unsafe.Pointer(&p[0]), n) } if err == nil { race.Acquire(unsafe.Pointer(&ioSync)) } } if msanenabled && n > 0 { msanWrite(unsafe.Pointer(&p[0]), n) } return }
func (fd *netFD) Read(buf []byte) (int, error) { if err := fd.readLock(); err != nil { return 0, err } defer fd.readUnlock() o := &fd.rop o.InitBuf(buf) n, err := rsrv.ExecIO(o, "WSARecv", func(o *operation) error { return syscall.WSARecv(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil) }) if race.Enabled { race.Acquire(unsafe.Pointer(&ioSync)) } err = fd.eofError(n, err) if _, ok := err.(syscall.Errno); ok { err = os.NewSyscallError("wsarecv", err) } return n, err }
func Read(fd Handle, p []byte) (n int, err error) { var done uint32 e := ReadFile(fd, p, &done, nil) if e != nil { if e == ERROR_BROKEN_PIPE { // NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin return 0, nil } return 0, e } if race.Enabled { if done > 0 { race.WriteRange(unsafe.Pointer(&p[0]), int(done)) } race.Acquire(unsafe.Pointer(&ioSync)) } if msanenabled && done > 0 { msanWrite(unsafe.Pointer(&p[0]), int(done)) } return int(done), nil }