func resetcpuprofiler(hz int32) { lock(&cpuprofilerlock) if profiletimer == 0 { timer := stdcall3(_CreateWaitableTimerA, 0, 0, 0) atomic.Storeuintptr(&profiletimer, timer) thread := stdcall6(_CreateThread, 0, 0, funcPC(profileloop), 0, 0, 0) stdcall2(_SetThreadPriority, thread, _THREAD_PRIORITY_HIGHEST) stdcall1(_CloseHandle, thread) } unlock(&cpuprofilerlock) ms := int32(0) due := ^int64(^uint64(1 << 63)) if hz > 0 { ms = 1000 / hz if ms == 0 { ms = 1 } due = int64(ms) * -10000 } stdcall6(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0) atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz)) }
// push adds span s to buffer b. push is safe to call concurrently // with other push operations, but NOT to call concurrently with pop. func (b *gcSweepBuf) push(s *mspan) { // Obtain our slot. cursor := uintptr(atomic.Xadd(&b.index, +1) - 1) top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries // Do we need to add a block? spineLen := atomic.Loaduintptr(&b.spineLen) var block *gcSweepBlock retry: if top < spineLen { spine := atomic.Loadp(unsafe.Pointer(&b.spine)) blockp := add(spine, sys.PtrSize*top) block = (*gcSweepBlock)(atomic.Loadp(blockp)) } else { // Add a new block to the spine, potentially growing // the spine. lock(&b.spineLock) // spineLen cannot change until we release the lock, // but may have changed while we were waiting. spineLen = atomic.Loaduintptr(&b.spineLen) if top < spineLen { unlock(&b.spineLock) goto retry } if spineLen == b.spineCap { // Grow the spine. newCap := b.spineCap * 2 if newCap == 0 { newCap = gcSweepBufInitSpineCap } newSpine := persistentalloc(newCap*sys.PtrSize, sys.CacheLineSize, &memstats.gc_sys) if b.spineCap != 0 { // Blocks are allocated off-heap, so // no write barriers. memmove(newSpine, b.spine, b.spineCap*sys.PtrSize) } // Spine is allocated off-heap, so no write barrier. atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine) b.spineCap = newCap // We can't immediately free the old spine // since a concurrent push with a lower index // could still be reading from it. We let it // leak because even a 1TB heap would waste // less than 2MB of memory on old spines. If // this is a problem, we could free old spines // during STW. } // Allocate a new block and add it to the spine. block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), sys.CacheLineSize, &memstats.gc_sys)) blockp := add(b.spine, sys.PtrSize*top) // Blocks are allocated off-heap, so no write barrier. atomic.StorepNoWB(blockp, unsafe.Pointer(block)) atomic.Storeuintptr(&b.spineLen, spineLen+1) unlock(&b.spineLock) } // We have a block. Insert the span. block.spans[bottom] = s }
//go:nosplit func unlockextra(mp *m) { atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) }
// Called to initialize a new m (including the bootstrap m). // Called on the new thread, cannot allocate memory. func minit() { var thandle uintptr stdcall7(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) atomic.Storeuintptr(&getg().m.thread, thandle) }