Beispiel #1
0
// block returns the spans in the i'th block of buffer b. block is
// safe to call concurrently with push.
func (b *gcSweepBuf) block(i int) []*mspan {
	// Perform bounds check before loading spine address since
	// push ensures the allocated length is at least spineLen.
	if i < 0 || uintptr(i) >= atomic.Loaduintptr(&b.spineLen) {
		throw("block index out of range")
	}

	// Get block i.
	spine := atomic.Loadp(unsafe.Pointer(&b.spine))
	blockp := add(spine, sys.PtrSize*uintptr(i))
	block := (*gcSweepBlock)(atomic.Loadp(blockp))

	// Slice the block if necessary.
	cursor := uintptr(atomic.Load(&b.index))
	top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
	var spans []*mspan
	if uintptr(i) < top {
		spans = block.spans[:]
	} else {
		spans = block.spans[:bottom]
	}

	// push may have reserved a slot but not filled it yet, so
	// trim away unused entries.
	for len(spans) > 0 && spans[len(spans)-1] == nil {
		spans = spans[:len(spans)-1]
	}
	return spans
}
Beispiel #2
0
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
	if raceenabled && h != nil {
		callerpc := getcallerpc(unsafe.Pointer(&t))
		pc := funcPC(mapaccess2)
		racereadpc(unsafe.Pointer(h), callerpc, pc)
		raceReadObjectPC(t.key, key, callerpc, pc)
	}
	if msanenabled && h != nil {
		msanread(key, t.key.size)
	}
	if h == nil || h.count == 0 {
		return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
	}
	if h.flags&hashWriting != 0 {
		throw("concurrent map read and map write")
	}
	alg := t.key.alg
	hash := alg.hash(key, uintptr(h.hash0))
	m := uintptr(1)<<h.B - 1
	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
	if c := h.oldbuckets; c != nil {
		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
		if !evacuated(oldb) {
			b = oldb
		}
	}
	top := uint8(hash >> (sys.PtrSize*8 - 8))
	if top < minTopHash {
		top += minTopHash
	}
	for {
		for i := uintptr(0); i < bucketCnt; i++ {
			if b.tophash[i] != top {
				continue
			}
			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
			if t.indirectkey {
				k = *((*unsafe.Pointer)(k))
			}
			if alg.equal(key, k) {
				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
				if t.indirectvalue {
					v = *((*unsafe.Pointer)(v))
				}
				return v, true
			}
		}
		b = b.overflow(t)
		if b == nil {
			return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
		}
	}
}
Beispiel #3
0
// NumCgoCall returns the number of cgo calls made by the current process.
func NumCgoCall() int64 {
	var n int64
	for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
		n += int64(mp.ncgocall)
	}
	return n
}
Beispiel #4
0
// activeModules returns a slice of active modules.
//
// A module is active once its gcdatamask and gcbssmask have been
// assembled and it is usable by the GC.
func activeModules() []*moduledata {
	p := (*[]*moduledata)(atomic.Loadp(unsafe.Pointer(&modulesSlice)))
	if p == nil {
		return nil
	}
	return *p
}
Beispiel #5
0
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
	if raceenabled && h != nil {
		callerpc := getcallerpc(unsafe.Pointer(&t))
		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
	}
	if h == nil || h.count == 0 {
		return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
	}
	if h.flags&hashWriting != 0 {
		throw("concurrent map read and map write")
	}
	var b *bmap
	if h.B == 0 {
		// One-bucket table.  No need to hash.
		b = (*bmap)(h.buckets)
	} else {
		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
		m := uintptr(1)<<h.B - 1
		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
		if c := h.oldbuckets; c != nil {
			oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
			if !evacuated(oldb) {
				b = oldb
			}
		}
	}
	for {
		for i := uintptr(0); i < bucketCnt; i++ {
			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
			if k != key {
				continue
			}
			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
			if x == empty {
				continue
			}
			return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
		}
		b = b.overflow(t)
		if b == nil {
			return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
		}
	}
}
Beispiel #6
0
func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
	if len(inter.mhdr) == 0 {
		throw("internal error - misuse of itab")
	}

	// easy case
	if typ.tflag&tflagUncommon == 0 {
		if canfail {
			return nil
		}
		name := inter.typ.nameOff(inter.mhdr[0].name)
		panic(&TypeAssertionError{"", typ.string(), inter.typ.string(), name.name()})
	}

	h := itabhash(inter, typ)

	// look twice - once without lock, once with.
	// common case will be no lock contention.
	var m *itab
	var locked int
	for locked = 0; locked < 2; locked++ {
		if locked != 0 {
			lock(&ifaceLock)
		}
		for m = (*itab)(atomic.Loadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
			if m.inter == inter && m._type == typ {
				if m.bad != 0 {
					if !canfail {
						// this can only happen if the conversion
						// was already done once using the , ok form
						// and we have a cached negative result.
						// the cached result doesn't record which
						// interface function was missing, so try
						// adding the itab again, which will throw an error.
						additab(m, locked != 0, false)
					}
					m = nil
				}
				if locked != 0 {
					unlock(&ifaceLock)
				}
				return m
			}
		}
	}

	m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
	m.inter = inter
	m._type = typ
	additab(m, true, canfail)
	unlock(&ifaceLock)
	if m.bad != 0 {
		return nil
	}
	return m
}
Beispiel #7
0
func goexitsall(status *byte) {
	var buf [_ERRMAX]byte
	n := copy(buf[:], goexits)
	n = copy(buf[n:], gostringnocopy(status))
	pid := getpid()
	for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
		if mp.procid != pid {
			postnote(mp.procid, buf[:])
		}
	}
}
Beispiel #8
0
// mapzero ensures that zeroptr points to a buffer large enough to
// serve as the zero value for t.
func mapzero(t *_type) {
	// Is the type small enough for existing buffer?
	cursize := uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
	if t.size <= cursize {
		return
	}

	// Allocate a new buffer.
	lock(&zerolock)
	cursize = uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
	if cursize < t.size {
		for cursize < t.size {
			cursize *= 2
			if cursize == 0 {
				// need >2GB zero on 32-bit machine
				throw("map element too large")
			}
		}
		atomic.Storep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
		atomic.Storep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
	}
	unlock(&zerolock)
}
Beispiel #9
0
// ThreadCreateProfile returns n, the number of records in the thread creation profile.
// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
//
// Most clients should use the runtime/pprof package instead
// of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
	first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
	for mp := first; mp != nil; mp = mp.alllink {
		n++
	}
	if n <= len(p) {
		ok = true
		i := 0
		for mp := first; mp != nil; mp = mp.alllink {
			p[i].Stack0 = mp.createstack
			i++
		}
	}
	return
}
Beispiel #10
0
func goexitsall(status *byte) {
	var buf [_ERRMAX]byte
	if !atomic.Cas(&exiting, 0, 1) {
		return
	}
	getg().m.locks++
	n := copy(buf[:], goexits)
	n = copy(buf[n:], gostringnocopy(status))
	pid := getpid()
	for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
		if mp.procid != 0 && mp.procid != pid {
			postnote(mp.procid, buf[:])
		}
	}
	getg().m.locks--
}
Beispiel #11
0
func profileloop1(param uintptr) uint32 {
	stdcall2(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)

	for {
		stdcall2(_WaitForSingleObject, profiletimer, _INFINITE)
		first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
		for mp := first; mp != nil; mp = mp.alllink {
			thread := atomic.Loaduintptr(&mp.thread)
			// Do not profile threads blocked on Notes,
			// this includes idle worker threads,
			// idle timer thread, idle heap scavenger, etc.
			if thread == 0 || mp.profilehz == 0 || mp.blocked {
				continue
			}
			stdcall1(_SuspendThread, thread)
			if mp.profilehz != 0 && !mp.blocked {
				profilem(mp)
			}
			stdcall1(_ResumeThread, thread)
		}
	}
}
Beispiel #12
0
// push adds span s to buffer b. push is safe to call concurrently
// with other push operations, but NOT to call concurrently with pop.
func (b *gcSweepBuf) push(s *mspan) {
	// Obtain our slot.
	cursor := uintptr(atomic.Xadd(&b.index, +1) - 1)
	top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries

	// Do we need to add a block?
	spineLen := atomic.Loaduintptr(&b.spineLen)
	var block *gcSweepBlock
retry:
	if top < spineLen {
		spine := atomic.Loadp(unsafe.Pointer(&b.spine))
		blockp := add(spine, sys.PtrSize*top)
		block = (*gcSweepBlock)(atomic.Loadp(blockp))
	} else {
		// Add a new block to the spine, potentially growing
		// the spine.
		lock(&b.spineLock)
		// spineLen cannot change until we release the lock,
		// but may have changed while we were waiting.
		spineLen = atomic.Loaduintptr(&b.spineLen)
		if top < spineLen {
			unlock(&b.spineLock)
			goto retry
		}

		if spineLen == b.spineCap {
			// Grow the spine.
			newCap := b.spineCap * 2
			if newCap == 0 {
				newCap = gcSweepBufInitSpineCap
			}
			newSpine := persistentalloc(newCap*sys.PtrSize, sys.CacheLineSize, &memstats.gc_sys)
			if b.spineCap != 0 {
				// Blocks are allocated off-heap, so
				// no write barriers.
				memmove(newSpine, b.spine, b.spineCap*sys.PtrSize)
			}
			// Spine is allocated off-heap, so no write barrier.
			atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
			b.spineCap = newCap
			// We can't immediately free the old spine
			// since a concurrent push with a lower index
			// could still be reading from it. We let it
			// leak because even a 1TB heap would waste
			// less than 2MB of memory on old spines. If
			// this is a problem, we could free old spines
			// during STW.
		}

		// Allocate a new block and add it to the spine.
		block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), sys.CacheLineSize, &memstats.gc_sys))
		blockp := add(b.spine, sys.PtrSize*top)
		// Blocks are allocated off-heap, so no write barrier.
		atomic.StorepNoWB(blockp, unsafe.Pointer(block))
		atomic.Storeuintptr(&b.spineLen, spineLen+1)
		unlock(&b.spineLock)
	}

	// We have a block. Insert the span.
	block.spans[bottom] = s
}
Beispiel #13
0
Datei: iface.go Projekt: sreis/go
func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
	if len(inter.mhdr) == 0 {
		throw("internal error - misuse of itab")
	}

	// easy case
	x := typ.x
	if x == nil {
		if canfail {
			return nil
		}
		panic(&TypeAssertionError{"", typ._string, inter.typ._string, *inter.mhdr[0].name})
	}

	// compiler has provided some good hash codes for us.
	h := inter.typ.hash
	h += 17 * typ.hash
	// TODO(rsc): h += 23 * x.mhash ?
	h %= hashSize

	// look twice - once without lock, once with.
	// common case will be no lock contention.
	var m *itab
	var locked int
	for locked = 0; locked < 2; locked++ {
		if locked != 0 {
			lock(&ifaceLock)
		}
		for m = (*itab)(atomic.Loadp(unsafe.Pointer(&hash[h]))); m != nil; m = m.link {
			if m.inter == inter && m._type == typ {
				if m.bad != 0 {
					m = nil
					if !canfail {
						// this can only happen if the conversion
						// was already done once using the , ok form
						// and we have a cached negative result.
						// the cached result doesn't record which
						// interface function was missing, so jump
						// down to the interface check, which will
						// do more work but give a better error.
						goto search
					}
				}
				if locked != 0 {
					unlock(&ifaceLock)
				}
				return m
			}
		}
	}

	m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
	m.inter = inter
	m._type = typ

search:
	// both inter and typ have method sorted by name,
	// and interface names are unique,
	// so can iterate over both in lock step;
	// the loop is O(ni+nt) not O(ni*nt).
	ni := len(inter.mhdr)
	nt := len(x.mhdr)
	j := 0
	for k := 0; k < ni; k++ {
		i := &inter.mhdr[k]
		iname := i.name
		ipkgpath := i.pkgpath
		itype := i._type
		for ; j < nt; j++ {
			t := &x.mhdr[j]
			if t.mtyp == itype && (t.name == iname || *t.name == *iname) && t.pkgpath == ipkgpath {
				if m != nil {
					*(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = t.ifn
				}
				goto nextimethod
			}
		}
		// didn't find method
		if !canfail {
			if locked != 0 {
				unlock(&ifaceLock)
			}
			panic(&TypeAssertionError{"", typ._string, inter.typ._string, *iname})
		}
		m.bad = 1
		break
	nextimethod:
	}
	if locked == 0 {
		throw("invalid itab locking")
	}
	m.link = hash[h]
	atomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m))
	unlock(&ifaceLock)
	if m.bad != 0 {
		return nil
	}
	return m
}

func typ2Itab(t *_type, inter *interfacetype, cache **itab) *itab {
	tab := getitab(inter, t, false)
	atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab))
	return tab
}

func convT2E(t *_type, elem unsafe.Pointer, x unsafe.Pointer) (e eface) {
	if raceenabled {
		raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2E))
	}
	if msanenabled {
		msanread(elem, t.size)
	}
	if isDirectIface(t) {
		e._type = t
		typedmemmove(t, unsafe.Pointer(&e.data), elem)
	} else {
		if x == nil {
			x = newobject(t)
		}
		// TODO: We allocate a zeroed object only to overwrite it with
		// actual data. Figure out how to avoid zeroing. Also below in convT2I.
		typedmemmove(t, x, elem)
		e._type = t
		e.data = x
	}
	return
}

func convT2I(t *_type, inter *interfacetype, cache **itab, elem unsafe.Pointer, x unsafe.Pointer) (i iface) {
	if raceenabled {
		raceReadObjectPC(t, elem, getcallerpc(unsafe.Pointer(&t)), funcPC(convT2I))
	}
	if msanenabled {
		msanread(elem, t.size)
	}
	tab := (*itab)(atomic.Loadp(unsafe.Pointer(cache)))
	if tab == nil {
		tab = getitab(inter, t, false)
		atomicstorep(unsafe.Pointer(cache), unsafe.Pointer(tab))
	}
	if isDirectIface(t) {
		i.tab = tab
		typedmemmove(t, unsafe.Pointer(&i.data), elem)
	} else {
		if x == nil {
			x = newobject(t)
		}
		typedmemmove(t, x, elem)
		i.tab = tab
		i.data = x
	}
	return
}

func panicdottype(have, want, iface *_type) {
	haveString := ""
	if have != nil {
		haveString = have._string
	}
	panic(&TypeAssertionError{iface._string, haveString, want._string, ""})
}

func assertI2T(t *_type, i iface, r unsafe.Pointer) {
	tab := i.tab
	if tab == nil {
		panic(&TypeAssertionError{"", "", t._string, ""})
	}
	if tab._type != t {
		panic(&TypeAssertionError{tab.inter.typ._string, tab._type._string, t._string, ""})
	}
	if r != nil {
		if isDirectIface(t) {
			writebarrierptr((*uintptr)(r), uintptr(i.data))
		} else {
			typedmemmove(t, r, i.data)
		}
	}
}

func assertI2T2(t *_type, i iface, r unsafe.Pointer) bool {
	tab := i.tab
	if tab == nil || tab._type != t {
		if r != nil {
			memclr(r, uintptr(t.size))
		}
		return false
	}
	if r != nil {
		if isDirectIface(t) {
			writebarrierptr((*uintptr)(r), uintptr(i.data))
		} else {
			typedmemmove(t, r, i.data)
		}
	}
	return true
}

func assertE2T(t *_type, e eface, r unsafe.Pointer) {
	if e._type == nil {
		panic(&TypeAssertionError{"", "", t._string, ""})
	}
	if e._type != t {
		panic(&TypeAssertionError{"", e._type._string, t._string, ""})
	}
	if r != nil {
		if isDirectIface(t) {
			writebarrierptr((*uintptr)(r), uintptr(e.data))
		} else {
			typedmemmove(t, r, e.data)
		}
	}
}

var testingAssertE2T2GC bool

// The compiler ensures that r is non-nil.
func assertE2T2(t *_type, e eface, r unsafe.Pointer) bool {
	if testingAssertE2T2GC {
		GC()
	}
	if e._type != t {
		memclr(r, uintptr(t.size))
		return false
	}
	if isDirectIface(t) {
		writebarrierptr((*uintptr)(r), uintptr(e.data))
	} else {
		typedmemmove(t, r, e.data)
	}
	return true
}

func convI2E(i iface) (r eface) {
	tab := i.tab
	if tab == nil {
		return
	}
	r._type = tab._type
	r.data = i.data
	return
}

func assertI2E(inter *interfacetype, i iface, r *eface) {
	tab := i.tab
	if tab == nil {
		// explicit conversions require non-nil interface value.
		panic(&TypeAssertionError{"", "", inter.typ._string, ""})
	}
	r._type = tab._type
	r.data = i.data
	return
}

// The compiler ensures that r is non-nil.
func assertI2E2(inter *interfacetype, i iface, r *eface) bool {
	tab := i.tab
	if tab == nil {
		return false
	}
	r._type = tab._type
	r.data = i.data
	return true
}

func convI2I(inter *interfacetype, i iface) (r iface) {
	tab := i.tab
	if tab == nil {
		return
	}
	if tab.inter == inter {
		r.tab = tab
		r.data = i.data
		return
	}
	r.tab = getitab(inter, tab._type, false)
	r.data = i.data
	return
}

func assertI2I(inter *interfacetype, i iface, r *iface) {
	tab := i.tab
	if tab == nil {
		// explicit conversions require non-nil interface value.
		panic(&TypeAssertionError{"", "", inter.typ._string, ""})
	}
	if tab.inter == inter {
		r.tab = tab
		r.data = i.data
		return
	}
	r.tab = getitab(inter, tab._type, false)
	r.data = i.data
}

func assertI2I2(inter *interfacetype, i iface, r *iface) bool {
	tab := i.tab
	if tab == nil {
		if r != nil {
			*r = iface{}
		}
		return false
	}
	if tab.inter != inter {
		tab = getitab(inter, tab._type, true)
		if tab == nil {
			if r != nil {
				*r = iface{}
			}
			return false
		}
	}
	if r != nil {
		r.tab = tab
		r.data = i.data
	}
	return true
}

func assertE2I(inter *interfacetype, e eface, r *iface) {
	t := e._type
	if t == nil {
		// explicit conversions require non-nil interface value.
		panic(&TypeAssertionError{"", "", inter.typ._string, ""})
	}
	r.tab = getitab(inter, t, false)
	r.data = e.data
}

var testingAssertE2I2GC bool

func assertE2I2(inter *interfacetype, e eface, r *iface) bool {
	if testingAssertE2I2GC {
		GC()
	}
	t := e._type
	if t == nil {
		if r != nil {
			*r = iface{}
		}
		return false
	}
	tab := getitab(inter, t, true)
	if tab == nil {
		if r != nil {
			*r = iface{}
		}
		return false
	}
	if r != nil {
		r.tab = tab
		r.data = e.data
	}
	return true
}

//go:linkname reflect_ifaceE2I reflect.ifaceE2I
func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
	assertE2I(inter, e, dst)
}

func assertE2E(inter *interfacetype, e eface, r *eface) {
	if e._type == nil {
		// explicit conversions require non-nil interface value.
		panic(&TypeAssertionError{"", "", inter.typ._string, ""})
	}
	*r = e
}

// The compiler ensures that r is non-nil.
func assertE2E2(inter *interfacetype, e eface, r *eface) bool {
	if e._type == nil {
		*r = eface{}
		return false
	}
	*r = e
	return true
}

func iterate_itabs(fn func(*itab)) {
	for _, h := range &hash {
		for ; h != nil; h = h.link {
			fn(h)
		}
	}
}
Beispiel #14
0
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
	if raceenabled && h != nil {
		callerpc := getcallerpc(unsafe.Pointer(&t))
		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
	}
	if h == nil || h.count == 0 {
		return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
	}
	key := stringStructOf(&ky)
	if h.B == 0 {
		// One-bucket table.
		b := (*bmap)(h.buckets)
		if key.len < 32 {
			// short key, doing lots of comparisons is ok
			for i := uintptr(0); i < bucketCnt; i++ {
				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
				if x == empty {
					continue
				}
				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
				if k.len != key.len {
					continue
				}
				if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
				}
			}
			return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
		}
		// long key, try not to do more comparisons than necessary
		keymaybe := uintptr(bucketCnt)
		for i := uintptr(0); i < bucketCnt; i++ {
			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
			if x == empty {
				continue
			}
			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
			if k.len != key.len {
				continue
			}
			if k.str == key.str {
				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
			}
			// check first 4 bytes
			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
				continue
			}
			// check last 4 bytes
			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
				continue
			}
			if keymaybe != bucketCnt {
				// Two keys are potential matches.  Use hash to distinguish them.
				goto dohash
			}
			keymaybe = i
		}
		if keymaybe != bucketCnt {
			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
			if memeq(k.str, key.str, uintptr(key.len)) {
				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
			}
		}
		return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
	}
dohash:
	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
	m := uintptr(1)<<h.B - 1
	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
	if c := h.oldbuckets; c != nil {
		oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
		if !evacuated(oldb) {
			b = oldb
		}
	}
	top := uint8(hash >> (ptrSize*8 - 8))
	if top < minTopHash {
		top += minTopHash
	}
	for {
		for i := uintptr(0); i < bucketCnt; i++ {
			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
			if x != top {
				continue
			}
			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
			if k.len != key.len {
				continue
			}
			if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
			}
		}
		b = b.overflow(t)
		if b == nil {
			return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
		}
	}
}