Exemple #1
0
func regenPowChallenges() {
	c := time.Tick(100 * time.Millisecond)
	var deltaT float64
	for _ = range c {
		barrier := unixTime >> powRegenIntervalBits
		if curPowCollection.barrier < barrier {
			logMessage("Activating ProofOfWork challenges (barrier: %d)", barrier)
			atomic.StoreUintptr(
				(*uintptr)(unsafe.Pointer(&prevPowCollection)),
				(uintptr)(unsafe.Pointer(curPowCollection)),
			)
			atomic.StoreUintptr(
				(*uintptr)(unsafe.Pointer(&curPowCollection)),
				(uintptr)(unsafe.Pointer(nextPowCollection)),
			)

			timeToNextRun := float64(((barrier + 1) << powRegenIntervalBits) - unixTime)
			if (deltaT * 1.05) > timeToNextRun {
				logMessage(`WARNING: Last run (%.2fs) we were out of sync. `+
					`Sleeping remainder of cycle (%.2fs), hoping to get back in sync`,
					deltaT, timeToNextRun*1.05)
				time.Sleep(time.Duration(timeToNextRun*1.05) * time.Second)
				continue
			}
		}

		if nextPowCollection.barrier <= barrier {
			deltaT = updateNextPowCollection(barrier)
		}
	}
}
Exemple #2
0
//export hookGoValuePaint
func hookGoValuePaint(enginep, foldp unsafe.Pointer, reflectIndex C.intptr_t) {
	fold := ensureEngine(enginep, foldp)
	v := reflect.ValueOf(fold.gvalue)

	// The main GUI thread is mutex-locked while paint methods are called,
	// so no two paintings should be happening at the same time.
	atomic.StoreUintptr(&guiPaintRef, tref.Ref())

	painter := &Painter{fold.engine, &Common{fold.cvalue, fold.engine}}

	method := v.Method(int(reflectIndex))
	method.Call([]reflect.Value{reflect.ValueOf(painter)})

	atomic.StoreUintptr(&guiPaintRef, 0)
}
Exemple #3
0
func BenchmarkAtomicUintPtr(b *testing.B) {
	b.StopTimer()
	pointers := make([]uintptr, 1000)
	b.Log(unsafe.Sizeof(pointers[0]))
	b.StartTimer()

	for j := 0; j < b.N; j++ {
		atomic.StoreUintptr(&pointers[j%1000], uintptr(j))
	}
}
Exemple #4
0
//export hookGoValuePaint
func hookGoValuePaint(enginep, foldp unsafe.Pointer, reflectIndex C.intptr_t) {
	// Besides a convenience this is a workaround for http://golang.org/issue/8588
	defer printPaintPanic()
	defer atomic.StoreUintptr(&guiPaintRef, 0)

	// The main GUI thread is mutex-locked while paint methods are called,
	// so no two paintings should be happening at the same time.
	atomic.StoreUintptr(&guiPaintRef, cdata.Ref())

	fold := ensureEngine(enginep, foldp)
	if fold.init.IsValid() {
		return
	}

	painter := &Painter{engine: fold.engine, obj: &Common{fold.cvalue, fold.engine, newConnections()}}
	v := reflect.ValueOf(fold.gvalue)
	method := v.Method(int(reflectIndex))
	method.Call([]reflect.Value{reflect.ValueOf(painter)})
}
Exemple #5
0
func sysTickHandler() {
	aba := atomic.LoadUintptr(&ticksABA)
	t := ticks[aba&1]
	aba++
	ticks[aba&1] = t + 1
	barrier.Memory()
	atomic.StoreUintptr(&ticksABA, aba)
	tickEvent.Send()

	if tasker.onSysTick {
		exce.PendSV.SetPending()
	}
}
Exemple #6
0
func (q *StreamQueue) Push(data []byte) {
	tail := (*Node)(unsafe.Pointer(q.tail))
	tail.data = data
	defer tail.Unlock()

	if data != nil {
		n := &Node{}
		n.Lock()

		atomic.StoreUintptr(&q.tail, uintptr(unsafe.Pointer(n)))
	}
	return
}
Exemple #7
0
func (p *Process) release() error {
	handle := atomic.LoadUintptr(&p.handle)
	if handle == uintptr(syscall.InvalidHandle) {
		return syscall.EINVAL
	}
	e := syscall.CloseHandle(syscall.Handle(handle))
	if e != nil {
		return NewSyscallError("CloseHandle", e)
	}
	atomic.StoreUintptr(&p.handle, uintptr(syscall.InvalidHandle))
	// no need for a finalizer anymore
	runtime.SetFinalizer(p, nil)
	return nil
}
Exemple #8
0
func updateNextPowCollection(barrier uint64) (deltaT float64) {
	t := time.Now()
	atomic.StoreUintptr(
		(*uintptr)(unsafe.Pointer(&nextPowCollection)),
		(uintptr)(unsafe.Pointer(newPowCollection(barrier+1))),
	)
	deltaT = time.Now().Sub(t).Seconds()
	logMessage("Created next set of ProofOfWork challenges in %.2fs", deltaT)
	intervalSeconds := math.Pow(2, float64(powRegenIntervalBits))
	if deltaT >= intervalSeconds {
		logMessage(
			"WARNING: Generating new Proof of Work challenges took longer (%.2fs) than the set interval (%.2fs)",
			deltaT,
			intervalSeconds)
	}

	return
}
Exemple #9
0
// TryDequeue dequeues a value from our queue. If the queue is empty, this
// will return failure.
func (q *Queue) TryDequeue() (ptr unsafe.Pointer, dequeued bool) {
	var c *cell
	// Race load our deqPos,
	pos := atomic.LoadUintptr(&q.deqPos)
	for {
		// load the cell at that deqPos,
		c = &q.cells[pos&q.mask]
		// load the sequence number in that cell,
		seq := atomic.LoadUintptr(&c.seq)
		// and, if the sequence number is (deqPos + 1), we have an
		// enqueued value to dequeue.
		cmp := int(seq - (pos + 1))
		if cmp == 0 {
			var swapped bool
			// Try to claim the deqPos to ourselves to dequeue,
			// updating pos to the new value.
			if pos, swapped = primitive.CompareAndSwapUintptr(&q.deqPos, pos, pos+1); swapped {
				dequeued = true
				break
			}
			continue
		}
		if cmp < 0 {
			// If the sequence number was less than deqPos + 1,
			// the queue is empty.
			return
		}
		// If the sequence number was larger than (deqPos+1),
		// somebody else just updated the sequence number and
		// our loaded deqPos is out of date.
		pos = atomic.LoadUintptr(&q.deqPos)
	}
	// We have won the race and can dequeue - grab the pointer.
	ptr = c.ptr
	c.ptr = primitive.Null
	// Update the cell's sequence number for the next enqueue.
	atomic.StoreUintptr(&c.seq, pos+q.mask)
	return
}
Exemple #10
0
func (p *Pool) pinSlow() *poolLocal {
	// Retry under the mutex.
	runtime_procUnpin()
	p.mu.Lock()
	defer p.mu.Unlock()
	pid := runtime_procPin()
	s := p.localSize
	l := p.local
	if uintptr(pid) < s {
		return indexLocal(l, pid)
	}
	if p.local == nil {
		p.globalOffset = unsafe.Offsetof(p.global)
		runtime_registerPool(p)
	}
	// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
	size := runtime.GOMAXPROCS(0)
	local := make([]poolLocal, size)
	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.local)), unsafe.Pointer(&local[0])) // store-release
	atomic.StoreUintptr(&p.localSize, uintptr(size))                                            // store-release
	return &local[pid]
}
Exemple #11
0
// TryEnqueue adds a value to our queue. TryEnqueue takes an unsafe.Pointer to
// avoid the necessity of wrapping a heap allocated value in an interface,
// which also goes on the heap. If the queue is full, this will return failure.
func (q *Queue) TryEnqueue(ptr unsafe.Pointer) (enqueued bool) {
	var c *cell
	// Race load our enqPos,
	pos := atomic.LoadUintptr(&q.enqPos)
	for {
		// load the cell at that enqPos,
		c = &q.cells[pos&q.mask]
		// load the sequence number in that cell,
		seq := atomic.LoadUintptr(&c.seq)
		// and, if the sequence number is (enqPos), we have a spot to
		// enqueue into.
		cmp := int(seq - pos)
		if cmp == 0 {
			var swapped bool
			// Try to claim the enqPos to ourselves to enqueue,
			// updating pos to the new value.
			if pos, swapped = primitive.CompareAndSwapUintptr(&q.enqPos, pos, pos+1); swapped {
				enqueued = true
				break
			}
			continue
		}
		if cmp < 0 {
			// If the sequence number was less than enqPos, the
			// queue is full.
			return
		}
		// If the sequence number was larger than enqPos,
		// somebody else just updated the sequence number and
		// our loaded enqPos is out of date.
		pos = atomic.LoadUintptr(&q.enqPos)
	}
	// We have won the race and can enqueue - set the pointer.
	c.ptr = ptr
	// Update the cell's sequence number for dequeueing.
	atomic.StoreUintptr(&c.seq, pos)
	return
}
Exemple #12
0
func (p *Pools) pinSlow() *poolsLocal {
	// Retry under the mutex.
	// Can not lock the mutex while pinned.
	runtime_procUnpin()
	allPoolxsMu.Lock()
	defer allPoolxsMu.Unlock()
	pid := runtime_procPin()
	// poolsCleanup won't be called while we are pinned.
	s := p.localSize
	l := p.local
	if uintptr(pid) < s {
		return indexLocals(l, pid)
	}
	if p.local == nil {
		allPoolxs = append(allPoolxs, p)
	}
	// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
	size := runtime.GOMAXPROCS(0)
	local := make([]poolsLocal, size)
	atomic.StorePointer((*unsafe.Pointer)(&p.local), unsafe.Pointer(&local[0])) // store-release
	atomic.StoreUintptr(&p.localSize, uintptr(size))                            // store-release
	return &local[pid]
}
Exemple #13
0
func (p *Pool) pinSlow() *poolLocal {
	// Retry under the mutex.
	// Can not lock the mutex while pinned.
	runtime_procUnpin() // 在allPoolsMu加锁的情况下查找,这时候必须unpin
	allPoolsMu.Lock()
	defer allPoolsMu.Unlock() // 在allPoolsMu的保护下执行
	pid := runtime_procPin()  // 再次获得P的id
	// poolCleanup won't be called while we are pinned.
	s := p.localSize
	l := p.local
	if uintptr(pid) < s { // 尝试获取poolLocal
		return indexLocal(l, pid)
	}
	if p.local == nil { // 获取失败,表明是第一次,将当前的Pool加入到allPools中
		allPools = append(allPools, p)
	}
	// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
	size := runtime.GOMAXPROCS(0)                                               // 如果到这里,表明P的数量发生变化了,丢弃以前的poolLocal
	local := make([]poolLocal, size)                                            // 分配procs个poolLocal
	atomic.StorePointer((*unsafe.Pointer)(&p.local), unsafe.Pointer(&local[0])) // store-release 存储poolLocal
	atomic.StoreUintptr(&p.localSize, uintptr(size))                            // store-release 存储大小
	return &local[pid]                                                          // 返回对应P的poolLocal指针
}
Exemple #14
0
func (p *Pool) pinSlow() *poolLocal {
	// Retry under the mutex.
	// Can not lock the mutex while pinned.
	runtime_procUnpin()
	allPoolsMu.Lock()
	defer allPoolsMu.Unlock()
	// 将当前goroutine绑定到P并禁止抢占,返回P的ID
	// 这样做是防止goroutine被挂起之后再次运行时P已经发生变化
	pid := runtime_procPin()
	// poolCleanup won't be called while we are pinned.
	// p.localsize是Pool的大小
	// p.local是池中的每个元素
	s := p.localSize
	l := p.local
	// 如果pid<s,说明池中有元素
	// 如果>s,说明s为0,也就是说池的大小为0
	// 在池中根据P的id索引一个poolLocal
	if uintptr(pid) < s {
		return indexLocal(l, pid)
	}
	// 如果p.local为nil(池为空)
	// 将当前Pool加入到allPools
	if p.local == nil {
		allPools = append(allPools, p)
	}
	// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
	// 获取当前P的数量
	size := runtime.GOMAXPROCS(0)
	// 根据P的数量生成poolLocal池
	local := make([]poolLocal, size)
	// 将poolLocal池和吃的大小保存到Pool变量
	atomic.StorePointer((*unsafe.Pointer)(&p.local), unsafe.Pointer(&local[0])) // store-release
	atomic.StoreUintptr(&p.localSize, uintptr(size))                            // store-release
	// 最后根据P的id返回池中的元素
	return &local[pid]
}
Exemple #15
0
func StoreUintptr(addr *uintptr, val uintptr) {
	orig.StoreUintptr(addr, val)
}
Exemple #16
0
// Enable checking of invariants when locking and unlocking InvariantMutex.
func EnableInvariantChecking() {
	atomic.StoreUintptr(&gEnable, 1)
}