// TryDequeue dequeues a value from our queue. If the queue is empty, this // will return failure. func (q *Queue) TryDequeue() (ptr unsafe.Pointer, dequeued bool) { var c *cell // Race load our deqPos, pos := atomic.LoadUintptr(&q.deqPos) for { // load the cell at that deqPos, c = &q.cells[pos&q.mask] // load the sequence number in that cell, seq := atomic.LoadUintptr(&c.seq) // and, if the sequence number is (deqPos + 1), we have an // enqueued value to dequeue. cmp := int(seq - (pos + 1)) if cmp == 0 { var swapped bool // Try to claim the deqPos to ourselves to dequeue, // updating pos to the new value. if pos, swapped = primitive.CompareAndSwapUintptr(&q.deqPos, pos, pos+1); swapped { dequeued = true break } continue } if cmp < 0 { // If the sequence number was less than deqPos + 1, // the queue is empty. return } // If the sequence number was larger than (deqPos+1), // somebody else just updated the sequence number and // our loaded deqPos is out of date. pos = atomic.LoadUintptr(&q.deqPos) } // We have won the race and can dequeue - grab the pointer. ptr = c.ptr c.ptr = primitive.Null // Update the cell's sequence number for the next enqueue. atomic.StoreUintptr(&c.seq, pos+q.mask) return }
// TryEnqueue adds a value to our queue. TryEnqueue takes an unsafe.Pointer to // avoid the necessity of wrapping a heap allocated value in an interface, // which also goes on the heap. If the queue is full, this will return failure. func (q *Queue) TryEnqueue(ptr unsafe.Pointer) (enqueued bool) { var c *cell // Race load our enqPos, pos := atomic.LoadUintptr(&q.enqPos) for { // load the cell at that enqPos, c = &q.cells[pos&q.mask] // load the sequence number in that cell, seq := atomic.LoadUintptr(&c.seq) // and, if the sequence number is (enqPos), we have a spot to // enqueue into. cmp := int(seq - pos) if cmp == 0 { var swapped bool // Try to claim the enqPos to ourselves to enqueue, // updating pos to the new value. if pos, swapped = primitive.CompareAndSwapUintptr(&q.enqPos, pos, pos+1); swapped { enqueued = true break } continue } if cmp < 0 { // If the sequence number was less than enqPos, the // queue is full. return } // If the sequence number was larger than enqPos, // somebody else just updated the sequence number and // our loaded enqPos is out of date. pos = atomic.LoadUintptr(&q.enqPos) } // We have won the race and can enqueue - set the pointer. c.ptr = ptr // Update the cell's sequence number for dequeueing. atomic.StoreUintptr(&c.seq, pos) return }
// completeTurn unblocks a thread running waitFor(turn + 1). func (t turnBroker) completeTurn(turn uintptr) { state := atomic.LoadUintptr(&t.f.State) for { curWaitingFor := getTurnWait(state) var oneLess uintptr if curWaitingFor > 0 { oneLess = curWaitingFor - 1 } newState := encodeTurn(turn+1, oneLess) var swapped bool if state, swapped = primitive.CompareAndSwapUintptr(&t.f.State, state, newState); swapped { if curWaitingFor != 0 { // We need to wake all waiters. If there is a // waiter for turn 0, and a wraparound waiter // for turn 32, and we wake only turn 32, it // will go back to waiting while turn 0 still // needs to be awoken. t.f.Wake(math.MaxUint32, futexChannel(turn+1)) } break } } }
func (t turnBroker) waitFor(turn uintptr, spinCutoff *uint32, updateSpinCutoff bool) { givenSpinCount := atomic.LoadUint32(spinCutoff) spinCount := givenSpinCount if updateSpinCutoff || givenSpinCount == 0 { spinCount = maxSpins } var tries uint32 state := atomic.LoadUintptr(&t.f.State) for ; ; tries++ { curTurn := getTurnNumber(state) if curTurn == turn { break } waitingFor := turn - curTurn if waitingFor >= math.MaxUint32>>(turnShift+1) { panic("turn is in the past") } if tries < spinCount { primitive.Pause() state = atomic.LoadUintptr(&t.f.State) continue } curWaitingFor := getTurnWait(state) var newState uintptr if waitingFor <= curWaitingFor { // A later turn is already being waited for - we will // hop on that bandwagon and wait with it. newState = state } else { newState = encodeTurn(curTurn, waitingFor) if state != newState { var swapped bool if state, swapped = primitive.CompareAndSwapUintptr(&t.f.State, state, newState); !swapped { continue } } } t.f.Wait(newState, futexChannel(turn)) state = atomic.LoadUintptr(&t.f.State) } if updateSpinCutoff || givenSpinCount == 0 { var spinUpdate uint32 if tries >= maxSpins { // If we hit maxSpins, then spinning is pointless, so // the right spinCutoff is the minimum possible. spinUpdate = minSpins } else { // To account for variations, we allow ourself to spin // 2*N when we think that N is actually required in // order to succeed. spinUpdate = minSpins dubTries := tries << 1 if dubTries > spinUpdate { spinUpdate = dubTries } if maxSpins < spinUpdate { spinUpdate = maxSpins } } if givenSpinCount == 0 { atomic.StoreUint32(spinCutoff, spinUpdate) } else { // Per Facebook, "Exponential moving average with alpha // of 7/8"... k. spinUpdate = uint32(int(givenSpinCount) + (int(spinUpdate)-int(givenSpinCount))>>3) // Try once but keep moving if somebody else updated. atomic.CompareAndSwapUint32(spinCutoff, givenSpinCount, spinUpdate) } } }