Пример #1
1
// RLock readlock resurs from thread
// uses double check
func (t *TControl) RLock(threadId uint16, resursId string) {
	var wlock *string
	for {
		wlock = (*string)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&t.writer))))
		if wlock == nil || *wlock != resursId {
			atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&t.readers[threadId])), unsafe.Pointer(&resursId))
			wlock = (*string)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&t.writer))))
			if wlock == nil || *wlock != resursId {
				return
			}
			atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&t.readers[threadId])), nil)
		}
		t.sleep()
	}
}
Пример #2
0
func (c *container) getOrInsert(k uint64, v unsafe.Pointer) unsafe.Pointer {
	bi := k & (c.sz - 1)
	b := c.list[bi]
	for i := range b.elems {
		e := &b.elems[i]
		// Once allocated a valid key, it would never change. So, first check if
		// it's allocated. If not, then allocate it. If can't, or not allocated,
		// then check if it's k. If it is, then replace value. Otherwise continue.
		// This sequence could be problematic, if this happens:
		// Main thread runs Step 1. Check
		if atomic.CompareAndSwapUint64(&e.k, 0, k) { // Step 1.
			atomic.AddUint32(&c.numElems, 1)
			if atomic.CompareAndSwapPointer(&e.v, nil, v) {
				return v
			}
			return atomic.LoadPointer(&e.v)
		}

		if atomic.LoadUint64(&e.k) == k {
			// Swap if previous pointer is nil.
			if atomic.CompareAndSwapPointer(&e.v, nil, v) {
				return v
			}
			return atomic.LoadPointer(&e.v)
		}
	}
	return nil
}
Пример #3
0
func (m *Map) GetOrInsert(k uint64, v unsafe.Pointer) unsafe.Pointer {
	if v == nil {
		log.Fatal("GetOrInsert doesn't allow setting nil pointers.")
		return nil
	}

	// Check immutable first.
	cval := atomic.LoadPointer(&m.cs[IMMUTABLE])
	if cval != nil {
		c := (*container)(cval)
		if pv := c.get(k); pv != nil {
			return pv
		}
	}

	// Okay, deal with mutable container now.
	cval = atomic.LoadPointer(&m.cs[MUTABLE])
	if cval == nil {
		log.Fatal("This is disruptive in a bad way.")
	}
	c := (*container)(cval)
	if pv := c.getOrInsert(k, v); pv != nil {
		return pv
	}

	// We still couldn't insert the key. Time to grow.
	// TODO: Handle this case.
	return nil
}
Пример #4
0
func (db *DB) Preparex(query string) (stmt Stmt, err error) {
	var m stmtCache

	if p := (*stmtCache)(atomic.LoadPointer(&db.stmtCachePtr)); p != nil {
		m = *p
		if stmt = m[query]; stmt.Stmt != nil {
			return
		}
	}

	db.stmtCachePtrMutex.Lock()
	defer db.stmtCachePtrMutex.Unlock()

	if p := (*stmtCache)(atomic.LoadPointer(&db.stmtCachePtr)); p != nil {
		m = *p
		if stmt = m[query]; stmt.Stmt != nil {
			return
		}
	}

	stmtx, err := db.DB.Preparex(query)
	if err != nil {
		return
	}
	stmt = Stmt{Stmt: stmtx}

	m2 := make(stmtCache, len(m)+1)
	for k, v := range m {
		m2[k] = v
	}
	m2[query] = stmt

	atomic.StorePointer(&db.stmtCachePtr, unsafe.Pointer(&m2))
	return
}
Пример #5
0
func (wstore *WStore) checkPingPong() {
	ncping := (*map[int64]Node)(atomic.LoadPointer(&wstore.ncping))
	ncpong := (*map[int64]Node)(atomic.LoadPointer(&wstore.ncpong))
	if len(*ncping) != len(*ncpong) {
		panic("Mismatch in nc ping-pong lengths")
	}
	for fpos := range *ncping {
		if (*ncpong)[fpos] == nil {
			panic("fpos not found in nc ping-pong")
		}
	}

	//lcping := (*map[int64]Node)(atomic.LoadPointer(&wstore.lcping))
	//lcpong := (*map[int64]Node)(atomic.LoadPointer(&wstore.lcpong))
	//if len(*lcping) != len(*lcpong) {
	//    panic("Mismatch in lc ping-pong lengths")
	//}
	//for fpos := range *lcping {
	//    if (*lcpong)[fpos] == nil {
	//        panic("fpos not found in lc ping-pong")
	//    }
	//}

	kdping := (*map[int64][]byte)(atomic.LoadPointer(&wstore.kdping))
	kdpong := (*map[int64][]byte)(atomic.LoadPointer(&wstore.kdpong))
	if len(*kdping) != len(*kdpong) {
		panic("Mismatch in kd ping-pong lengths")
	}
	for fpos := range *kdping {
		if (*kdpong)[fpos] == nil {
			panic("fpos not found in kd ping-pong")
		}
	}
}
Пример #6
0
func (p *partitionstore) mutate(cb func(keys, changes *gkvlite.Collection)) {
	p.lock.Lock()
	defer p.lock.Unlock()

	cb((*gkvlite.Collection)(atomic.LoadPointer(&p.keys)),
		(*gkvlite.Collection)(atomic.LoadPointer(&p.changes)))
}
Пример #7
0
func (cache *DCache) cacheEvict(fpos int64) Node {
	var node Node
	idx := cache.indexFor(fpos)
	for {
		var retry bool
		hash := (*[]unsafe.Pointer)(atomic.LoadPointer(&(cache.hash)))
		addr := &((*hash)[idx])
		hd := (*DCacheItem)(atomic.LoadPointer(addr))
		for hd != nil {
			nx := atomic.LoadPointer(&hd.next)
			if hd.fpos == fpos {
				if !atomic.CompareAndSwapPointer(addr, unsafe.Pointer(hd), nx) {
					retry = true
				} else {
					node = hd.node
				}
				break
			}
			addr = &hd.next
			hd = (*DCacheItem)(nx)
		}
		if retry {
			continue
		}
		break
	}
	return node
}
Пример #8
0
func (self *element) next() *element {
	next := atomic.LoadPointer(&self.Pointer)
	for next != nil {
		/*
		 If the pointer of the next element is marked as deleted, that means the next element is supposed to be GONE
		*/
		if nextPointer := atomic.LoadPointer(&(*element)(normal(next)).Pointer); isDeleted(nextPointer) {
			/*
			 If OUR pointer is marked as deleted, that means WE are supposed to be gone
			*/
			if isDeleted(next) {
				/*
				 .. which means that we can steal the pointer of the next element right away,
				 it points to the right place AND it is marked as deleted.
				*/
				atomic.CompareAndSwapPointer(&self.Pointer, next, nextPointer)
			} else {
				/*
				 .. if not, we have to remove the marking on the pointer before we steal it.
				*/
				atomic.CompareAndSwapPointer(&self.Pointer, next, normal(nextPointer))
			}
			next = atomic.LoadPointer(&self.Pointer)
		} else {
			/*
			 If the next element is NOT deleted, then we simply return a pointer to it, and make
			 damn sure that the pointer is a working one even if we are deleted (and, therefore,
			 our pointer is marked as deleted).
			*/
			return (*element)(normal(next))
		}
	}
	return nil
}
Пример #9
0
func (self *element) next() *element {
	next := atomic.LoadPointer(&self.Pointer)
	for next != nil {
		nextElement := (*element)(next)
		/*
		 If our next element contains &deletedElement that means WE are deleted, and
		 we can just return the next-next element. It will make it impossible to add
		 stuff to us, since we will always lie about our next(), but then again, deleted
		 elements shouldn't get new children anyway.
		*/
		if sp, ok := nextElement.value.(*string); ok && sp == &deletedElement {
			return nextElement.next()
		}
		/*
		 If our next element is itself deleted (by the same criteria) then we will just replace
		 it with its next() (which should be the first thing behind it that isn't itself deleted
		 (the power of recursion compels you) and then check again.
		*/
		if nextElement.isDeleted() {
			atomic.CompareAndSwapPointer(&self.Pointer, next, unsafe.Pointer(nextElement.next()))
			next = atomic.LoadPointer(&self.Pointer)
		} else {
			/*
			 If it isn't deleted then we just return it.
			*/
			return nextElement
		}
	}
	/*
	 And if our next is nil, then we are at the end of the list and can just return nil for next()
	*/
	return nil
}
Пример #10
0
func (p *partitionstore) visitItems(start []byte, withValue bool,
	visitor func(*item) bool) (err error) {
	keys, changes := p.colls()
	var vErr error
	v := func(kItem *gkvlite.Item) bool {
		i := (*item)(atomic.LoadPointer(&kItem.Transient))
		if i != nil {
			return visitor(i)
		}
		var cItem *gkvlite.Item
		cItem, vErr = changes.GetItem(kItem.Val, true)
		if vErr != nil {
			return false
		}
		if cItem == nil {
			return true // TODO: track this case; might have been compacted away.
		}
		i = (*item)(atomic.LoadPointer(&cItem.Transient))
		if i != nil {
			atomic.StorePointer(&kItem.Transient, unsafe.Pointer(i))
			return visitor(i)
		}
		i = &item{key: kItem.Key}
		if vErr = i.fromValueBytes(cItem.Val); vErr != nil {
			return false
		}
		atomic.StorePointer(&cItem.Transient, unsafe.Pointer(i))
		atomic.StorePointer(&kItem.Transient, unsafe.Pointer(i))
		return visitor(i)
	}
	if err := p.visit(keys, start, true, v); err != nil {
		return err
	}
	return vErr
}
Пример #11
0
func (this *MapIterator) advance() {
	if this.nextE != nil {
		this.nextE = this.nextE.next
		if this.nextE != nil {
			return
		}
	}

	for this.nextTableIndex >= 0 {
		this.nextE = (*Entry)(atomic.LoadPointer(&this.currentTable[this.nextTableIndex]))
		this.nextTableIndex--
		if this.nextE != nil {
			return
		}
	}

	for this.nextSegmentIndex >= 0 {
		seg := this.cm.segments[this.nextSegmentIndex]
		this.nextSegmentIndex--
		if atomic.LoadInt32(&seg.count) != 0 {
			this.currentTable = seg.loadTable()
			for j := len(this.currentTable) - 1; j >= 0; j-- {
				this.nextE = (*Entry)(atomic.LoadPointer(&this.currentTable[j]))
				if this.nextE != nil {
					this.nextTableIndex = j - 1
					return
				}
			}
		}
	}
}
Пример #12
0
// Dequeue returns the value at the head of the queue and true, or if the queue is empty, it returns a nil value and false
func (q *ZFifo) Dequeue() (value interface{}, ok bool) {
	for {
		head := atomic.LoadPointer(&q.head)               // Read head pointer
		tail := atomic.LoadPointer(&q.tail)               // Read tail pointer
		next := atomic.LoadPointer(&(*lfNode)(head).next) // Read head.next
		if head != q.head {                               // Check head, tail, and next consistency
			continue // Not consistent. Try again
		}

		if head == tail { // Is queue empty or tail failing behind
			if next == unsafe.Pointer(q) { // Is queue empty?
				return
			}
			// Try to swing tail to the next node as the tail was not pointing to the last node
			atomic.CompareAndSwapPointer(&q.tail, tail, next)
		} else {
			// Read value before CAS
			// Otherwise, another dequeue might free the next node
			value = (*lfNode)(next).value
			// Try to swing Head to the next node
			if atomic.CompareAndSwapPointer(&q.head, head, next) {
				ok = true
				return
			}
			value = nil
		}
	}
	return // Dummy return
}
Пример #13
0
// Seek seeks the handle.
func (t *BoundedTable) Seek(ctx context.Context, handle int64) (int64, bool, error) {
	result := (*boundedItem)(nil)
	if handle < invalidRecordID {
		// this is the first seek call.
		result = (*boundedItem)(atomic.LoadPointer(&t.records[0]))
	} else {
		for i := int64(0); i < t.capacity; i++ {
			record := (*boundedItem)(atomic.LoadPointer(&t.records[i]))
			if record == nil {
				break
			}
			if handle == record.handle {
				result = record
				break
			}
		}
	}
	if result == nil {
		// handle not found.
		return invalidRecordID, false, nil
	}
	if result.handle != invalidRecordID {
		// this record is valid.
		return result.handle, true, nil
	}
	// this record is invalid.
	return invalidRecordID, false, nil
}
Пример #14
0
// gcasRead performs a GCAS-linearizable read of the I-node's main node.
func gcasRead(in *iNode, ctrie *ctrie) *mainNode {
	m := (*mainNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&in.main))))
	prev := (*mainNode)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&m.prev))))
	if prev == nil {
		return m
	}
	return gcasComplete(in, m, ctrie)
}
Пример #15
0
// Insert inserts v into the list in order. An error is returned if v is already present.
func (l *partitionList) Insert(v partition.Partition) error {
	n := &partitionListNode{
		val:  v,
		next: nil,
	}

HEAD:
	headPtr := atomic.LoadPointer(&l.head)

	if headPtr == nil {
		if !atomic.CompareAndSwapPointer(&l.head, headPtr, unsafe.Pointer(n)) {
			goto HEAD
		}

		atomic.AddInt32(&l.size, 1)
		return nil
	}

	headNode := (*partitionListNode)(headPtr)
	if comparePartitions(headNode.val, n.val) > 0 {
		n.next = headPtr
		if !atomic.CompareAndSwapPointer(&l.head, headPtr, unsafe.Pointer(n)) {
			goto HEAD
		}

		atomic.AddInt32(&l.size, 1)
		return nil
	}

NEXT:
	nextPtr := atomic.LoadPointer(&headNode.next)
	if nextPtr == nil {
		if !atomic.CompareAndSwapPointer(&headNode.next, nextPtr, unsafe.Pointer(n)) {
			goto NEXT
		}

		atomic.AddInt32(&l.size, 1)
		return nil
	}

	nextNode := (*partitionListNode)(nextPtr)
	if comparePartitions(nextNode.val, n.val) > 0 {
		n.next = nextPtr
		if !atomic.CompareAndSwapPointer(&headNode.next, nextPtr, unsafe.Pointer(n)) {
			goto NEXT
		}

		atomic.AddInt32(&l.size, 1)
		return nil
	}

	if comparePartitions(nextNode.val, n.val) == 0 {
		return errors.New("catena/partition_list: partition exists")
	}

	headNode = nextNode
	goto NEXT
}
Пример #16
0
func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
	h := (*mNode)(atomic.LoadPointer(&r.mHead))
	i := hash & h.mask
	b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
	if b == nil {
		b = h.initBucket(i)
	}
	return h, b
}
Пример #17
0
func (wstore *WStore) _pingCache(fpos int64, node Node) {
	nc := (*map[int64]Node)(atomic.LoadPointer(&wstore.ncping))
	lc := (*map[int64]Node)(atomic.LoadPointer(&wstore.lcping))
	if node.isLeaf() {
		(*lc)[fpos] = node
	} else {
		(*nc)[fpos] = node
	}
}
Пример #18
0
func (wstore *WStore) ncacheEvict(fposs []int64) {
	wstore.Lock()
	defer wstore.Unlock()

	nc := (*map[int64]Node)(atomic.LoadPointer(&wstore.ncpong))
	lc := (*map[int64]Node)(atomic.LoadPointer(&wstore.lcpong))
	for _, fpos := range fposs {
		delete(*nc, fpos)
		delete(*lc, fpos)
	}
}
Пример #19
0
func (cache *DCache) cacheLookup(fpos int64) Node {
	idx := cache.indexFor(fpos)
	hash := (*[]unsafe.Pointer)(atomic.LoadPointer(&(cache.hash)))
	head := (*DCacheItem)(atomic.LoadPointer(&((*hash)[idx])))
	for head != nil {
		if head.fpos == fpos {
			return head.node
		}
		head = (*DCacheItem)(atomic.LoadPointer(&head.next))
	}
	return nil
}
Пример #20
0
// Contains returns true if, and only if the list contains an element with that
// key. This method is wait-free, so it will always return in a finite number
// of steps, independent of any contention with other threads.
func (l *List) Contains(key string) bool {
	curr := (*node)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&l.head))))
	for curr != nil && curr.key < key {
		curr_m := (*markAndRef)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&curr.m))))
		curr = curr_m.next
	}
	if curr != nil && curr.key == key {
		curr_m := (*markAndRef)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&curr.m))))
		return !curr_m.marked
	}
	return false
}
Пример #21
0
// find returns the nodes of either side of a specific key. It will physically
// delete all nodes marked for deletion while traversing the list.
func (l *List) find(key string) (pred *node, pred_m *markAndRef, curr *node, curr_m *markAndRef) {
retry:
	for {
		curr = (*node)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&l.head))))
		for curr != nil {
			curr_m = (*markAndRef)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&curr.m))))

			if curr_m.marked {
				// curr is marked as deleted. Try to remove it physically by
				// unlinking the node from the list.

				if pred == nil {
					if !atomic.CompareAndSwapPointer(
						(*unsafe.Pointer)(unsafe.Pointer(&l.head)),
						unsafe.Pointer(curr),
						unsafe.Pointer(curr_m.next)) {

						// Another thread has modified the head pointer of our
						// list. The other thread progressed, but we need to
						// restart the list traversal.
						continue retry
					}
				} else {
					m := &markAndRef{false, curr_m.next}
					if !atomic.CompareAndSwapPointer(
						(*unsafe.Pointer)(unsafe.Pointer(&pred.m)),
						unsafe.Pointer(pred_m),
						unsafe.Pointer(m)) {

						// Another thread has progressed by modifying the next
						// pointer of our predecessor. We need to traverse the
						// list again.
						continue retry
					}
					pred_m = m
				}
				curr = curr_m.next
				continue
			}

			if curr.key >= key {
				return
			}

			pred = curr
			pred_m = curr_m
			curr = curr_m.next
		}
		return
	}
	panic("not reachable")
}
Пример #22
0
func (wstore *WStore) assertNotMemberCache(offsets []int64) {
	if wstore.Debug {
		nc := (*map[int64]Node)(atomic.LoadPointer(&wstore.ncping))
		lc := (*map[int64]Node)(atomic.LoadPointer(&wstore.lcping))
		for _, fpos := range offsets {
			if (*nc)[fpos] != nil {
				log.Panicln("to be freed fpos is in ncping-cache", fpos)
			} else if (*lc)[fpos] != nil {
				log.Panicln("to be freed fpos is in ncping-cache", fpos)
			}
		}
	}
}
Пример #23
0
func (wstore *WStore) displayPing() {
	ncping := (*map[int64]Node)(atomic.LoadPointer(&wstore.ncping))
	fposs := make([]int64, 0, 100)
	for fpos, _ := range *ncping {
		fposs = append(fposs, fpos)
	}

	lcping := (*map[int64]Node)(atomic.LoadPointer(&wstore.lcping))
	fposs = make([]int64, 0, 100)
	for fpos, _ := range *lcping {
		fposs = append(fposs, fpos)
	}
}
Пример #24
0
// Lock lock resurs
func (t *TControl) Lock(resursId string) {
	// lock writer
	for !atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&t.writer)), nil, unsafe.Pointer(&resursId)) {
		t.sleep()
	}
	// wait readers
	var rlock *string
	for i := range t.readers {
		rlock = (*string)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&t.readers[i]))))
		for rlock != nil && *rlock == resursId {
			t.sleep()
			rlock = (*string)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&t.readers[i]))))
		}
	}
}
Пример #25
0
func (l *partitionList) Swap(old, new partition.Partition) error {
	n := &partitionListNode{
		val:  new,
		next: nil,
	}

HEAD:
	headPtr := atomic.LoadPointer(&l.head)

	if headPtr == nil {
		return errors.New("catena/partition_list: partition not found")
	}

	headNode := (*partitionListNode)(headPtr)
	if comparePartitions(headNode.val, n.val) == 0 {
		n.next = headNode.next

		if !atomic.CompareAndSwapPointer(&l.head, headPtr, unsafe.Pointer(n)) {
			goto HEAD
		}

		return nil
	}

NEXT:
	nextPtr := atomic.LoadPointer(&headNode.next)
	if nextPtr == nil {
		return errors.New("catena/partition_list: partition not found")
	}

	nextNode := (*partitionListNode)(nextPtr)
	if comparePartitions(nextNode.val, n.val) == 0 {
		n.next = nextNode.next

		if !atomic.CompareAndSwapPointer(&headNode.next, nextPtr, unsafe.Pointer(n)) {
			goto NEXT
		}

		return nil
	}

	if comparePartitions(nextNode.val, n.val) > 0 {
		return errors.New("catena/partition_list: partition not found")
	}

	headNode = nextNode
	goto NEXT
}
Пример #26
0
func (q *LockfreeQuadtree) Insert(p *Point) bool {
	// we don't need to check the boundary within the CAS loop, because it can't change.
	// if the quadtree were changed to allow changing the boundary, this would no longer be threadsafe.
	if !q.boundary.Contains(p) {
		//		fmt.Println("insert outside boundary")
		return false
	}

	for {
		// the value we start working with
		oldPoints := (*PointList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.Points))))
		// if at any point in our attempts to add the point, the length becomes the capacity, break so we can subdivide if necessary and add to a subtree
		if oldPoints == nil || oldPoints.Length >= oldPoints.Capacity {
			break
		}

		newPoints := *oldPoints
		newPoints.First = NewPointListNode(p, newPoints.First)
		newPoints.Length++
		// if the working value is the same, set the new slice with our point
		ok := atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.Points)), unsafe.Pointer(oldPoints), unsafe.Pointer(&newPoints))
		if ok {
			// the CAS succeeded, our point was added, return success
			return true
		}
		// debug
		//		fmt.Println("CAS Insert failed: len(points): " + strconv.Itoa(newPoints.Length))
		// if the working value changed underneath us, loop and try again
	}

	// If we get here, we broke the loop because the length exceeds the capacity.
	// We must now Subdivide if necessary, and add the point to the proper subtree

	// at this point, with the above CAS, even if we simply mutex the Subdivide(), we will have achieved amortized lock-free time.

	// subdivide is threadsafe. The function itself does CAS
	points := (*PointList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.Points))))
	if points != nil {
		q.subdivide()
	}

	// These inserts are themselves threadsafe. Therefore, we don't need to do any special CAS work here.
	ok := q.Nw.Insert(p) || q.Ne.Insert(p) || q.Sw.Insert(p) || q.Se.Insert(p)
	if !ok {
		fmt.Println("insert failed")
	}
	return ok
}
Пример #27
0
// Pick a compaction based on current state; need external synchronization.
func (s *session) pickCompaction() *compaction {
	v := s.version()

	var level int
	var t0 tFiles
	if v.cScore >= 1 {
		level = v.cLevel
		cptr := s.stCompPtrs[level]
		tables := v.tables[level]
		for _, t := range tables {
			if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
				t0 = append(t0, t)
				break
			}
		}
		if len(t0) == 0 {
			t0 = append(t0, tables[0])
		}
	} else {
		if p := atomic.LoadPointer(&v.cSeek); p != nil {
			ts := (*tSet)(p)
			level = ts.level
			t0 = append(t0, ts.table)
		} else {
			v.release()
			return nil
		}
	}

	return newCompaction(s, v, level, t0)
}
Пример #28
0
func (pool *bufferPool) GetOutBuffer() (out *OutBuffer) {
	var ptr unsafe.Pointer
	for {
		ptr = atomic.LoadPointer(&pool.out)
		if ptr == nil {
			break
		}
		if atomic.CompareAndSwapPointer(&pool.out, ptr, ((*OutBuffer)(ptr)).next) {
			break
		}
	}

	atomic.AddUint64(&pool.outGet, 1)
	if ptr == nil {
		atomic.AddUint64(&pool.outNew, 1)
		out = &OutBuffer{Data: make([]byte, 0, pool.bufferInitSize), pool: pool}
	} else {
		out = (*OutBuffer)(ptr)
		atomic.AddInt64(&pool.size, -int64(cap(out.Data)))
	}

	out.isFreed = false
	out.isBroadcast = false
	out.refCount = 0
	return out
}
Пример #29
0
// RowWithCols implements table.Table RowWithCols interface.
func (t *BoundedTable) RowWithCols(ctx context.Context, h int64, cols []*table.Column) ([]types.Datum, error) {
	row := []types.Datum(nil)
	for i := int64(0); i < t.capacity; i++ {
		record := (*boundedItem)(atomic.LoadPointer(&t.records[i]))
		if record == nil {
			// A nil record means consecutive nil records.
			break
		}
		if record.handle == h {
			row = record.data
			break
		}
	}
	if row == nil {
		return nil, table.ErrRowNotFound
	}
	v := make([]types.Datum, len(cols))
	for i, col := range cols {
		if col == nil {
			continue
		}
		v[i] = row[col.Offset]
	}
	return v, nil
}
Пример #30
0
// Load loads DLL file d.Name into memory. It returns an error if fails.
// Load will not try to load DLL, if it is already loaded into memory.
func (d *LazyDLL) Load() error {
	// Non-racy version of:
	// if d.dll != nil {
	if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil {
		return nil
	}
	d.mu.Lock()
	defer d.mu.Unlock()
	if d.dll != nil {
		return nil
	}

	// kernel32.dll is special, since it's where LoadLibraryEx comes from.
	// The kernel already special-cases its name, so it's always
	// loaded from system32.
	var dll *DLL
	var err error
	if d.Name == "kernel32.dll" {
		dll, err = LoadDLL(d.Name)
	} else {
		dll, err = loadLibraryEx(d.Name, d.System)
	}
	if err != nil {
		return err
	}

	// Non-racy version of:
	// d.dll = dll
	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll))
	return nil
}