Example #1
0
func loadTicks() uint64 {
	aba := atomic.LoadUintptr(&ticksABA)
	for {
		barrier.Compiler()
		t := ticks[aba&1]
		barrier.Compiler()
		aba1 := atomic.LoadUintptr(&ticksABA)
		if aba == aba1 {
			return t
		}
		aba = aba1
	}
}
Example #2
0
File: block.go Project: twmb/dash
// Prime, called before a function that may fail, returns what you will call
// Wait with. If you do not call wait, and this call successfully primes the
// block, you must call Cancel.
func (b *Block) Prime(last uintptr) (primer uintptr, primed bool) {
	primer = atomic.LoadUintptr(&b.counter)
	if primer != last {
		return
	}
	runtime.Gosched()
	primer = atomic.LoadUintptr(&b.counter)
	if primer != last || atomic.LoadUint32(&b.lock.write) != 0 {
		return
	}
	primed = true
	atomic.AddInt32(&b.waiters, 1)
	return
}
Example #3
0
func (p *Pools) getSlow() (x interface{}) {
	// See the comment in pin regarding ordering of the loads.
	size := atomic.LoadUintptr(&p.localSize) // load-acquire
	local := p.local                         // load-consume
	// Try to steal one element from other procs.
	pid := runtime_procPin()
	runtime_procUnpin()
	for i := 0; i < int(size); i++ {
		l := indexLocals(local, (pid+i+1)%int(size))
		l.Lock()
		last := len(l.shared) - 1
		if last >= 0 {
			x = l.shared[last]
			l.shared = l.shared[:last]
			l.Unlock()
			break
		}
		l.Unlock()
	}

	if x == nil && p.New != nil {
		x = p.New()
	}
	return x
}
Example #4
0
// lazyinit creates an I/O completion port and starts the main event processing
// loop. This method uses Double-Checked Locking optimization.
func (r *readdcw) lazyinit() (err error) {
	invalid := uintptr(syscall.InvalidHandle)
	if atomic.LoadUintptr((*uintptr)(&r.cph)) == invalid {
		r.Lock()
		defer r.Unlock()
		if atomic.LoadUintptr((*uintptr)(&r.cph)) == invalid {
			cph := syscall.InvalidHandle
			if cph, err = syscall.CreateIoCompletionPort(cph, 0, 0, 0); err != nil {
				return
			}
			r.cph, r.start = cph, true
			go r.loop()
		}
	}
	return
}
Example #5
0
func (output *ForwardOutput) sendBuffer(buf []byte) error {
	for len(buf) > 0 {
		if atomic.LoadUintptr(&output.isShuttingDown) != 0 {
			break
		}
		err := output.ensureConnected()
		if err != nil {
			output.logger.Info("Will be retried in %s", output.retryInterval.String())
			time.Sleep(output.retryInterval)
			continue
		}
		startTime := time.Now()
		if output.writeTimeout == 0 {
			output.conn.SetWriteDeadline(time.Time{})
		} else {
			output.conn.SetWriteDeadline(startTime.Add(output.writeTimeout))
		}
		n, err := output.conn.Write(buf)
		buf = buf[n:]
		if err != nil {
			output.logger.Error("Failed to flush buffer (reason: %s, left: %d bytes)", err.Error(), len(buf))
			err_, ok := err.(net.Error)
			if !ok || (!err_.Timeout() && !err_.Temporary()) {
				output.conn.Close()
				output.conn = nil
				continue
			}
		}
		if n > 0 {
			elapsed := time.Now().Sub(startTime)
			output.logger.Info("Forwarded %d bytes in %f seconds (%d bytes left)\n", n, elapsed.Seconds(), len(buf))
		}
	}
	return nil
}
Example #6
0
func (p *Process) wait() (ps *ProcessState, err error) {
	handle := atomic.LoadUintptr(&p.handle)
	s, e := syscall.WaitForSingleObject(syscall.Handle(handle), syscall.INFINITE)
	switch s {
	case syscall.WAIT_OBJECT_0:
		break
	case syscall.WAIT_FAILED:
		return nil, NewSyscallError("WaitForSingleObject", e)
	default:
		return nil, errors.New("os: unexpected result from WaitForSingleObject")
	}
	var ec uint32
	e = syscall.GetExitCodeProcess(syscall.Handle(handle), &ec)
	if e != nil {
		return nil, NewSyscallError("GetExitCodeProcess", e)
	}
	var u syscall.Rusage
	e = syscall.GetProcessTimes(syscall.Handle(handle), &u.CreationTime, &u.ExitTime, &u.KernelTime, &u.UserTime)
	if e != nil {
		return nil, NewSyscallError("GetProcessTimes", e)
	}
	p.setDone()
	// NOTE(brainman): It seems that sometimes process is not dead
	// when WaitForSingleObject returns. But we do not know any
	// other way to wait for it. Sleeping for a while seems to do
	// the trick sometimes. So we will sleep and smell the roses.
	defer time.Sleep(5 * time.Millisecond)
	defer p.Release()
	return &ProcessState{p.Pid, syscall.WaitStatus{ExitCode: ec}, &u}, nil
}
Example #7
0
func initGoType(fold *valueFold) {
	if cdata.Ref() == atomic.LoadUintptr(&guiPaintRef) {
		go RunMain(func() { _initGoType(fold, true) })
	} else {
		_initGoType(fold, false)
	}
}
Example #8
0
/*
尝试从其他P的poolLocal的shared列表获取
*/
func (p *Pool) getSlow() (x interface{}) {
	// See the comment in pin regarding ordering of the loads.
	size := atomic.LoadUintptr(&p.localSize) // load-acquire
	local := p.local                         // load-consume
	// Try to steal one element from other procs.
	pid := runtime_procPin()
	runtime_procUnpin()
	for i := 0; i < int(size); i++ {
		// 循环从其他P获取poolLocal
		l := indexLocal(local, (pid+i+1)%int(size))
		l.Lock()
		last := len(l.shared) - 1
		// 如果某个P的poolLocal的shared列表不为空
		// 则获取shared列表的最后一个元素并跳出循环
		if last >= 0 {
			x = l.shared[last]
			l.shared = l.shared[:last]
			l.Unlock()
			break
		}
		l.Unlock()
	}

	// 如果循环所有P的poolLocal都没有找到
	// 则创建一个新的
	if x == nil && p.New != nil {
		x = p.New()
	}
	return x
}
Example #9
0
func (q *StreamQueue) Peek() []byte {
	tail := (*Node)(unsafe.Pointer(atomic.LoadUintptr(&q.tail)))

	tail.RLock()
	defer tail.RUnlock()

	return tail.data
}
Example #10
0
// wrapGoValue creates a new GoValue object in C++ land wrapping
// the Go value contained in the given interface.
//
// This must be run from the main GUI thread.
func wrapGoValue(engine *Engine, gvalue interface{}, owner valueOwner) (cvalue unsafe.Pointer) {
	gvaluev := reflect.ValueOf(gvalue)
	gvaluek := gvaluev.Kind()
	if gvaluek == reflect.Struct && !hashable(gvalue) {
		name := gvaluev.Type().Name()
		if name != "" {
			name = " (" + name + ")"
		}
		panic("cannot hand an unhashable struct value" + name + " to QML logic; use its address instead")
	}
	if gvaluek == reflect.Ptr && gvaluev.Elem().Kind() == reflect.Ptr {
		panic("cannot hand pointer of pointer to QML logic; use a simple pointer instead")
	}

	painting := cdata.Ref() == atomic.LoadUintptr(&guiPaintRef)

	// Cannot reuse a jsOwner because the QML runtime may choose to destroy
	// the value _after_ we hand it a new reference to the same value.
	// See issue #68 for details.
	prev, ok := engine.values[gvalue]
	if ok && (prev.owner == cppOwner || painting) {
		return prev.cvalue
	}

	if painting {
		panic("cannot allocate new objects while painting")
	}

	parent := nilPtr
	if owner == cppOwner {
		parent = engine.addr
	}
	fold := &valueFold{
		engine: engine,
		gvalue: gvalue,
		owner:  owner,
	}
	fold.cvalue = C.newGoValue(unsafe.Pointer(fold), typeInfo(gvalue), parent)
	if prev != nil {
		// Put new fold first so the single cppOwner, if any, is always the first entry.
		fold.next = prev
		prev.prev = fold
	}
	engine.values[gvalue] = fold

	//fmt.Printf("[DEBUG] value alive (wrapped): cvalue=%x gvalue=%x/%#v\n", fold.cvalue, addrOf(fold.gvalue), fold.gvalue)
	stats.valuesAlive(+1)
	C.engineSetContextForObject(engine.addr, fold.cvalue)
	switch owner {
	case cppOwner:
		C.engineSetOwnershipCPP(engine.addr, fold.cvalue)
	case jsOwner:
		C.engineSetOwnershipJS(engine.addr, fold.cvalue)
	}
	return fold.cvalue
}
Example #11
0
// wrapGoValue creates a new GoValue object in C++ land wrapping
// the Go value contained in the given interface.
//
// This must be run from the main GUI thread.
func wrapGoValue(engine *Engine, gvalue interface{}, owner valueOwner) (cvalue unsafe.Pointer) {
	gvaluev := reflect.ValueOf(gvalue)
	gvaluek := gvaluev.Kind()
	if gvaluek == reflect.Struct && !hashable(gvalue) {
		name := gvaluev.Type().Name()
		if name != "" {
			name = " (" + name + ")"
		}
		panic("cannot hand an unhashable struct value" + name + " to QML logic; use its address instead")
	}
	if gvaluek == reflect.Ptr && gvaluev.Elem().Kind() == reflect.Ptr {
		panic("cannot hand pointer of pointer to QML logic; use a simple pointer instead")
	}

	painting := tref.Ref() == atomic.LoadUintptr(&guiPaintRef)

	prev, ok := engine.values[gvalue]
	if ok && (prev.owner == owner || owner != cppOwner || painting) {
		return prev.cvalue
	}

	if painting {
		panic("cannot allocate new objects while painting")
	}

	parent := nilPtr
	if owner == cppOwner {
		parent = engine.addr
	}
	fold := &valueFold{
		engine: engine,
		gvalue: gvalue,
		owner:  owner,
	}
	fold.cvalue = C.newGoValue(unsafe.Pointer(fold), typeInfo(gvalue), parent)
	if prev != nil {
		prev.next = fold
		fold.prev = prev
	} else {
		engine.values[gvalue] = fold
	}
	//fmt.Printf("[DEBUG] value alive (wrapped): cvalue=%x gvalue=%x/%#v\n", fold.cvalue, addrOf(fold.gvalue), fold.gvalue)
	stats.valuesAlive(+1)
	C.engineSetContextForObject(engine.addr, fold.cvalue)
	switch owner {
	case cppOwner:
		C.engineSetOwnershipCPP(engine.addr, fold.cvalue)
	case jsOwner:
		C.engineSetOwnershipJS(engine.addr, fold.cvalue)
	}
	return fold.cvalue
}
Example #12
0
// pin pins the current goroutine to P, disables preemption and returns poolLocal pool for the P.
// Caller must call runtime_procUnpin() when done with the pool.
func (p *Pool) pin() *poolLocal { // 获取特定于P的pool
	pid := runtime_procPin() // 获得当前P的id
	// In pinSlow we store to localSize and then to local, here we load in opposite order.
	// Since we've disabled preemption, GC can not happen in between.
	// Thus here we must observe local at least as large localSize.
	// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
	s := atomic.LoadUintptr(&p.localSize) // load-acquire 获得pool的本地大小
	l := p.local                          // load-consume
	if uintptr(pid) < s {                 // 如果pid小于localSize的大小,表明P的数量无变化,直接取出poolLocal
		return indexLocal(l, pid) // 返回对应pid的poolLocal
	}
	return p.pinSlow() // 如果获得的pid大于localSize,表明P的大小变化了,使用pinSlow获得poolLocal
}
Example #13
0
// TryDequeue dequeues a value from our queue. If the queue is empty, this
// will return failure.
func (q *Queue) TryDequeue() (ptr unsafe.Pointer, dequeued bool) {
	var c *cell
	// Race load our deqPos,
	pos := atomic.LoadUintptr(&q.deqPos)
	for {
		// load the cell at that deqPos,
		c = &q.cells[pos&q.mask]
		// load the sequence number in that cell,
		seq := atomic.LoadUintptr(&c.seq)
		// and, if the sequence number is (deqPos + 1), we have an
		// enqueued value to dequeue.
		cmp := int(seq - (pos + 1))
		if cmp == 0 {
			var swapped bool
			// Try to claim the deqPos to ourselves to dequeue,
			// updating pos to the new value.
			if pos, swapped = primitive.CompareAndSwapUintptr(&q.deqPos, pos, pos+1); swapped {
				dequeued = true
				break
			}
			continue
		}
		if cmp < 0 {
			// If the sequence number was less than deqPos + 1,
			// the queue is empty.
			return
		}
		// If the sequence number was larger than (deqPos+1),
		// somebody else just updated the sequence number and
		// our loaded deqPos is out of date.
		pos = atomic.LoadUintptr(&q.deqPos)
	}
	// We have won the race and can dequeue - grab the pointer.
	ptr = c.ptr
	c.ptr = primitive.Null
	// Update the cell's sequence number for the next enqueue.
	atomic.StoreUintptr(&c.seq, pos+q.mask)
	return
}
Example #14
0
func sysTickHandler() {
	aba := atomic.LoadUintptr(&ticksABA)
	t := ticks[aba&1]
	aba++
	ticks[aba&1] = t + 1
	barrier.Memory()
	atomic.StoreUintptr(&ticksABA, aba)
	tickEvent.Send()

	if tasker.onSysTick {
		exce.PendSV.SetPending()
	}
}
Example #15
0
// pin pins the current goroutine to P, disables preemption and returns poolsLocal pool for the P.
// Caller must call runtime_procUnpin() when done with the pool.
func (p *Pools) pin() *poolsLocal {
	pid := runtime_procPin()
	// In pinSlow we store to localSize and then to local, here we load in opposite order.
	// Since we've disabled preemption, GC can not happen in between.
	// Thus here we must observe local at least as large localSize.
	// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
	s := atomic.LoadUintptr(&p.localSize) // load-acquire
	l := p.local                          // load-consume
	if uintptr(pid) < s {
		return indexLocals(l, pid)
	}
	return p.pinSlow()
}
Example #16
0
// TryEnqueue adds a value to our queue. TryEnqueue takes an unsafe.Pointer to
// avoid the necessity of wrapping a heap allocated value in an interface,
// which also goes on the heap. If the queue is full, this will return failure.
func (q *Queue) TryEnqueue(ptr unsafe.Pointer) (enqueued bool) {
	var c *cell
	// Race load our enqPos,
	pos := atomic.LoadUintptr(&q.enqPos)
	for {
		// load the cell at that enqPos,
		c = &q.cells[pos&q.mask]
		// load the sequence number in that cell,
		seq := atomic.LoadUintptr(&c.seq)
		// and, if the sequence number is (enqPos), we have a spot to
		// enqueue into.
		cmp := int(seq - pos)
		if cmp == 0 {
			var swapped bool
			// Try to claim the enqPos to ourselves to enqueue,
			// updating pos to the new value.
			if pos, swapped = primitive.CompareAndSwapUintptr(&q.enqPos, pos, pos+1); swapped {
				enqueued = true
				break
			}
			continue
		}
		if cmp < 0 {
			// If the sequence number was less than enqPos, the
			// queue is full.
			return
		}
		// If the sequence number was larger than enqPos,
		// somebody else just updated the sequence number and
		// our loaded enqPos is out of date.
		pos = atomic.LoadUintptr(&q.enqPos)
	}
	// We have won the race and can enqueue - set the pointer.
	c.ptr = ptr
	// Update the cell's sequence number for dequeueing.
	atomic.StoreUintptr(&c.seq, pos)
	return
}
Example #17
0
func (p *Process) release() error {
	handle := atomic.LoadUintptr(&p.handle)
	if handle == uintptr(syscall.InvalidHandle) {
		return syscall.EINVAL
	}
	e := syscall.CloseHandle(syscall.Handle(handle))
	if e != nil {
		return NewSyscallError("CloseHandle", e)
	}
	atomic.StoreUintptr(&p.handle, uintptr(syscall.InvalidHandle))
	// no need for a finalizer anymore
	runtime.SetFinalizer(p, nil)
	return nil
}
Example #18
0
// Create a lock which, when EnableInvariantChecking has been called, will call
// the supplied function at moments when invariants protected by the lock
// should hold (e.g. just after acquiring the lock). The function should crash
// if an invariant is violated. It should not have side effects, as there are
// no guarantees that it will run.
//
// The invariants must hold at the time that NewInvariantMutex is called.
func NewInvariantMutex(check func()) InvariantMutex {
	if check == nil {
		panic("check must be non-nil.")
	}

	// Check now, if enabled.
	if atomic.LoadUintptr(&gEnable) != 0 {
		check()
	}

	return InvariantMutex{
		check: check,
	}
}
Example #19
0
func (p *Process) signal(sig Signal) error {
	handle := atomic.LoadUintptr(&p.handle)
	if handle == uintptr(syscall.InvalidHandle) {
		return syscall.EINVAL
	}
	if p.done() {
		return errors.New("os: process already finished")
	}
	if sig == Kill {
		return terminateProcess(p.Pid, 1)
	}
	// TODO(rsc): Handle Interrupt too?
	return syscall.Errno(syscall.EWINDOWS)
}
Example #20
0
func (m *Mutex) lock() {
	state := atomic.LoadUintptr(&m.state)
	if state == 0 {
		state = uintptr(noos.AssignEventFlag())
		if !atomic.CompareAndSwapUintptr(&m.state, 0, state) {
			state = m.state
		}
	}
	unlocked, locked := state&^1, state|1
	for {
		if atomic.CompareAndSwapUintptr(&m.state, unlocked, locked) {
			return
		}
		noos.Event(unlocked).Wait()
	}
}
Example #21
0
// gui runs f in the main GUI thread and waits for f to return.
func gui(f func()) {
	ref := tref.Ref()
	if ref == guiLoopRef || ref == atomic.LoadUintptr(&guiPaintRef) {
		// Already within the GUI or render threads. Attempting to wait would deadlock.
		f()
		return
	}

	// Tell Qt we're waiting for the idle hook to be called.
	if atomic.AddInt32((*int32)(unsafe.Pointer(&hookWaiting)), 1) == 1 {
		C.idleTimerStart()
	}

	// Send f to be executed by the idle hook in the main GUI thread.
	guiFunc <- f

	// Wait until f is done executing.
	<-guiDone
}
Example #22
0
// RunMain runs f in the main QML thread and waits for f to return.
//
// This is meant to be used by extensions that integrate directly with the
// underlying QML logic.
func RunMain(f func()) {
	ref := cdata.Ref()
	if ref == guiMainRef || ref == atomic.LoadUintptr(&guiPaintRef) {
		// Already within the GUI or render threads. Attempting to wait would deadlock.
		f()
		return
	}

	// Tell Qt we're waiting for the idle hook to be called.
	if atomic.AddInt32(&guiIdleRun, 1) == 1 {
		C.idleTimerStart()
	}

	// Send f to be executed by the idle hook in the main GUI thread.
	guiFunc <- f

	// Wait until f is done executing.
	<-guiDone
}
Example #23
0
func (p *Pools) getSlows(xs []interface{}) int {
	xsl := len(xs)
	gxs := xs[:0]
	gxsl := 0
	// See the comment in pin regarding ordering of the loads.
	size := atomic.LoadUintptr(&p.localSize) // load-acquire
	local := p.local                         // load-consume
	// Try to steal one element from other procs.
	pid := runtime_procPin()
	runtime_procUnpin()
	for i := 0; i < int(size); i++ {
		l := indexLocals(local, (pid+i+1)%int(size))
		l.Lock()

		if n, lack := len(l.shared), xsl-len(gxs); n >= lack {
			gxs = append(gxs, l.shared[n-lack:]...)
			l.shared = l.shared[:n-lack]
		} else if n > 0 {
			gxs = append(gxs, l.shared...)
			l.shared = l.shared[:0]
		}

		gxsl = len(gxs)
		if gxsl == xsl {
			l.Unlock()
			break
		}
		l.Unlock()
	}

	if gxsl < xsl && p.New != nil {
		for i := gxsl; i < xsl; i++ {
			gxs = append(gxs, p.New())
		}
		return xsl
	} else {
		return gxsl
	}

}
Example #24
0
File: block.go Project: twmb/dash
// Wait blocks until the block has been signaled to continue. This may
// spuriously return early. The assumption is that re-checking an operation
// that may fail is cheaper than blocking.
func (b *Block) Wait(primer uintptr) {
	for {
		for {
			runtime.Gosched()
			if primer != atomic.LoadUintptr(&b.counter) {
				atomic.AddInt32(&b.waiters, -1)
				return

			}
			if b.lock.TryRLock() {
				break
			}
		}
		if primer != b.counter {
			atomic.AddInt32(&b.waiters, -1)
			b.lock.Unlock()
			return
		}
		b.cond.Wait()
		// Waking up does not grab any lock.
	}
}
Example #25
0
File: turn.go Project: twmb/dash
// completeTurn unblocks a thread running waitFor(turn + 1).
func (t turnBroker) completeTurn(turn uintptr) {
	state := atomic.LoadUintptr(&t.f.State)
	for {
		curWaitingFor := getTurnWait(state)
		var oneLess uintptr
		if curWaitingFor > 0 {
			oneLess = curWaitingFor - 1
		}
		newState := encodeTurn(turn+1, oneLess)
		var swapped bool
		if state, swapped = primitive.CompareAndSwapUintptr(&t.f.State, state, newState); swapped {
			if curWaitingFor != 0 {
				// We need to wake all waiters. If there is a
				// waiter for turn 0, and a wraparound waiter
				// for turn 32, and we wake only turn 32, it
				// will go back to waiting while turn 0 still
				// needs to be awoken.
				t.f.Wake(math.MaxUint32, futexChannel(turn+1))
			}
			break
		}
	}
}
Example #26
0
File: turn.go Project: twmb/dash
func (t turnBroker) waitFor(turn uintptr, spinCutoff *uint32, updateSpinCutoff bool) {
	givenSpinCount := atomic.LoadUint32(spinCutoff)
	spinCount := givenSpinCount
	if updateSpinCutoff || givenSpinCount == 0 {
		spinCount = maxSpins
	}

	var tries uint32
	state := atomic.LoadUintptr(&t.f.State)
	for ; ; tries++ {
		curTurn := getTurnNumber(state)
		if curTurn == turn {
			break
		}

		waitingFor := turn - curTurn
		if waitingFor >= math.MaxUint32>>(turnShift+1) {
			panic("turn is in the past")
		}

		if tries < spinCount {
			primitive.Pause()
			state = atomic.LoadUintptr(&t.f.State)
			continue
		}

		curWaitingFor := getTurnWait(state)

		var newState uintptr
		if waitingFor <= curWaitingFor {
			// A later turn is already being waited for - we will
			// hop on that bandwagon and wait with it.
			newState = state
		} else {
			newState = encodeTurn(curTurn, waitingFor)

			if state != newState {
				var swapped bool
				if state, swapped = primitive.CompareAndSwapUintptr(&t.f.State, state, newState); !swapped {
					continue
				}
			}
		}

		t.f.Wait(newState, futexChannel(turn))
		state = atomic.LoadUintptr(&t.f.State)
	}

	if updateSpinCutoff || givenSpinCount == 0 {
		var spinUpdate uint32
		if tries >= maxSpins {
			// If we hit maxSpins, then spinning is pointless, so
			// the right spinCutoff is the minimum possible.
			spinUpdate = minSpins
		} else {
			// To account for variations, we allow ourself to spin
			// 2*N when we think that N is actually required in
			// order to succeed.
			spinUpdate = minSpins
			dubTries := tries << 1
			if dubTries > spinUpdate {
				spinUpdate = dubTries
			}
			if maxSpins < spinUpdate {
				spinUpdate = maxSpins
			}
		}
		if givenSpinCount == 0 {
			atomic.StoreUint32(spinCutoff, spinUpdate)
		} else {
			// Per Facebook, "Exponential moving average with alpha
			// of 7/8"... k.
			spinUpdate = uint32(int(givenSpinCount) + (int(spinUpdate)-int(givenSpinCount))>>3)
			// Try once but keep moving if somebody else updated.
			atomic.CompareAndSwapUint32(spinCutoff, givenSpinCount,
				spinUpdate)
		}
	}
}
Example #27
0
func init() {
	// Ensure that this import is not removed by the linker. This is used to
	// ensure that shell32.dll is loaded by the system loader, preventing
	// go#15286 from triggering on Nano Server TP5.
	atomic.LoadUintptr(&dummy)
}
Example #28
0
func (i *InvariantMutex) checkIfEnabled() {
	if atomic.LoadUintptr(&gEnable) != 0 {
		i.check()
	}
}
Example #29
0
func LoadUintptr(addr *uintptr) uintptr {
	return orig.LoadUintptr(addr)
}
Example #30
0
func (spooler *tdOutputSpooler) handle() {
	defer spooler.cleanup()
	spooler.daemon.output.logger.Notice("Spooler started")
outer:
	for {
		select {
		case <-spooler.ticker.C:
			spooler.daemon.output.logger.Notice("Flushing...")
			err := spooler.journal.Flush(func(chunk JournalChunk) interface{} {
				defer chunk.Dispose()
				if atomic.LoadUintptr(&spooler.isShuttingDown) != 0 {
					return errors.New("Flush aborted")
				}
				spooler.daemon.output.logger.Info("Flushing chunk %s", chunk.String())
				size, err := chunk.Size()
				if err != nil {
					return err
				}
				if size == 0 {
					return nil
				}
				futureErr := make(chan error, 1)
				sem := spooler.daemon.output.sem
				sem <- struct{}{}
				go func(size int64, chunk JournalChunk, futureErr chan error) {
					err := (error)(nil)
					defer func() {
						if err != nil {
							spooler.daemon.output.logger.Info("Failed to flush chunk %s (reason: %s)", chunk.String(), err.Error())
						} else {
							spooler.daemon.output.logger.Info("Completed flushing chunk %s", chunk.String())
						}
						<-sem
						// disposal must be done before notifying the initiator
						chunk.Dispose()
						futureErr <- err
					}()
					err = func() error {
						compressingBlob := NewCompressingBlob(
							chunk,
							maxInt(4096, int(size/4)),
							gzip.BestSpeed,
							&spooler.daemon.tempFactory,
						)
						defer compressingBlob.Dispose()
						_, err := spooler.client.Import(
							spooler.databaseName,
							spooler.tableName,
							"msgpack.gz",
							td_client.NewBufferingBlobSize(
								compressingBlob,
								maxInt(4096, int(size/16)),
							),
							chunk.Id(),
						)
						return err
					}()
				}(size, chunk.Dup(), futureErr)
				return (<-chan error)(futureErr)
			})
			if err != nil {
				spooler.daemon.output.logger.Error("Error during reading from the journal: %s", err.Error())
			}
		case <-spooler.shutdownChan:
			break outer
		}
	}
	spooler.daemon.output.logger.Notice("Spooler ended")
}