Beispiel #1
0
func (s *memdQueue) QueueRequest(req *memdQRequest) bool {
	s.lock.RLock()
	if s.isDrained {
		s.lock.RUnlock()
		return false
	}

	if !atomic.CompareAndSwapPointer(&req.queuedWith, nil, unsafe.Pointer(s)) {
		panic("Request was dispatched while already queued somewhere.")
	}

	logSchedf("Writing request to queue!")

	// Try to write the request to the queue, if the queue is full,
	//   we immediately fail the request with a queueOverflow error.
	select {
	case s.reqsCh <- req:
		s.lock.RUnlock()
		return true

	default:
		s.lock.RUnlock()
		// As long as we have not lost ownership, dispatch a queue overflow error.
		if atomic.CompareAndSwapPointer(&req.queuedWith, unsafe.Pointer(s), nil) {
			req.Callback(nil, ErrOverload)
		}
		return true
	}
}
Beispiel #2
0
func (q *queue) dequeue() (interface{}, bool) {
	var temp interface{}
	var oldDummy, oldHead *node

	removed := false

	for !removed {
		oldDummy = q.dummy
		oldHead = oldDummy.next
		oldTail := q.tail

		if q.dummy != oldDummy {
			continue
		}

		if oldHead == nil {
			return nil, false
		}

		if oldTail == oldDummy {
			atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), unsafe.Pointer(oldTail), unsafe.Pointer(oldHead))
			continue
		}

		temp = oldHead.value
		removed = atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.dummy)), unsafe.Pointer(oldDummy), unsafe.Pointer(oldHead))
	}

	return temp, true
}
Beispiel #3
0
func (q *queue) enqueue(v interface{}) {
	var oldTail, oldTailNext *node

	newNode := new(node)
	newNode.value = v

	newNodeAdded := false

	for !newNodeAdded {
		oldTail = q.tail
		oldTailNext = oldTail.next

		if q.tail != oldTail {
			continue
		}

		if oldTailNext != nil {
			atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), unsafe.Pointer(oldTail), unsafe.Pointer(oldTailNext))
			continue
		}

		newNodeAdded = atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&oldTail.next)), unsafe.Pointer(oldTailNext), unsafe.Pointer(newNode))
	}

	atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.tail)), unsafe.Pointer(oldTail), unsafe.Pointer(newNode))
}
Beispiel #4
0
func (c *container) getOrInsert(k uint64, v unsafe.Pointer) unsafe.Pointer {
	bi := k & (c.sz - 1)
	b := c.list[bi]
	for i := range b.elems {
		e := &b.elems[i]
		// Once allocated a valid key, it would never change. So, first check if
		// it's allocated. If not, then allocate it. If can't, or not allocated,
		// then check if it's k. If it is, then replace value. Otherwise continue.
		// This sequence could be problematic, if this happens:
		// Main thread runs Step 1. Check
		if atomic.CompareAndSwapUint64(&e.k, 0, k) { // Step 1.
			atomic.AddUint32(&c.numElems, 1)
			if atomic.CompareAndSwapPointer(&e.v, nil, v) {
				return v
			}
			return atomic.LoadPointer(&e.v)
		}

		if atomic.LoadUint64(&e.k) == k {
			// Swap if previous pointer is nil.
			if atomic.CompareAndSwapPointer(&e.v, nil, v) {
				return v
			}
			return atomic.LoadPointer(&e.v)
		}
	}
	return nil
}
Beispiel #5
0
// Add adds a new element to the list, returning true if, and only if the
// element wasn't already there. This method is lock-free.
func (l *List) Add(key string) bool {
	for {
		pred, pred_m, curr, _ := l.find(key)
		if curr != nil && curr.key == key {
			return false
		}

		node := &node{key, &markAndRef{false, curr}}

		// Insert the new node after the pred node or modify the head of the
		// list if there is no predecessor.
		if pred == nil {
			if atomic.CompareAndSwapPointer(
				(*unsafe.Pointer)(unsafe.Pointer(&l.head)),
				unsafe.Pointer(curr),
				unsafe.Pointer(node)) {
				return true
			}
		} else {
			m := &markAndRef{false, node}
			if atomic.CompareAndSwapPointer(
				(*unsafe.Pointer)(unsafe.Pointer(&pred.m)),
				unsafe.Pointer(pred_m),
				unsafe.Pointer(m)) {
				return true
			}
		}

		// Another thread has modified the pred node, by either marking it as
		// deleted or by inserting another node directly after it. The other
		// thread progressed, but we need to retry our insert.
	}
	panic("not reachable")
}
Beispiel #6
0
func (self *element) next() *element {
	next := atomic.LoadPointer(&self.Pointer)
	for next != nil {
		/*
		 If the pointer of the next element is marked as deleted, that means the next element is supposed to be GONE
		*/
		if nextPointer := atomic.LoadPointer(&(*element)(normal(next)).Pointer); isDeleted(nextPointer) {
			/*
			 If OUR pointer is marked as deleted, that means WE are supposed to be gone
			*/
			if isDeleted(next) {
				/*
				 .. which means that we can steal the pointer of the next element right away,
				 it points to the right place AND it is marked as deleted.
				*/
				atomic.CompareAndSwapPointer(&self.Pointer, next, nextPointer)
			} else {
				/*
				 .. if not, we have to remove the marking on the pointer before we steal it.
				*/
				atomic.CompareAndSwapPointer(&self.Pointer, next, normal(nextPointer))
			}
			next = atomic.LoadPointer(&self.Pointer)
		} else {
			/*
			 If the next element is NOT deleted, then we simply return a pointer to it, and make
			 damn sure that the pointer is a working one even if we are deleted (and, therefore,
			 our pointer is marked as deleted).
			*/
			return (*element)(normal(next))
		}
	}
	return nil
}
Beispiel #7
0
// Dequeue returns the value at the head of the queue and true, or if the queue is empty, it returns a nil value and false
func (q *ZFifo) Dequeue() (value interface{}, ok bool) {
	for {
		head := atomic.LoadPointer(&q.head)               // Read head pointer
		tail := atomic.LoadPointer(&q.tail)               // Read tail pointer
		next := atomic.LoadPointer(&(*lfNode)(head).next) // Read head.next
		if head != q.head {                               // Check head, tail, and next consistency
			continue // Not consistent. Try again
		}

		if head == tail { // Is queue empty or tail failing behind
			if next == unsafe.Pointer(q) { // Is queue empty?
				return
			}
			// Try to swing tail to the next node as the tail was not pointing to the last node
			atomic.CompareAndSwapPointer(&q.tail, tail, next)
		} else {
			// Read value before CAS
			// Otherwise, another dequeue might free the next node
			value = (*lfNode)(next).value
			// Try to swing Head to the next node
			if atomic.CompareAndSwapPointer(&q.head, head, next) {
				ok = true
				return
			}
			value = nil
		}
	}
	return // Dummy return
}
Beispiel #8
0
// Insert inserts v into the list in order. An error is returned if v is already present.
func (l *partitionList) Insert(v partition.Partition) error {
	n := &partitionListNode{
		val:  v,
		next: nil,
	}

HEAD:
	headPtr := atomic.LoadPointer(&l.head)

	if headPtr == nil {
		if !atomic.CompareAndSwapPointer(&l.head, headPtr, unsafe.Pointer(n)) {
			goto HEAD
		}

		atomic.AddInt32(&l.size, 1)
		return nil
	}

	headNode := (*partitionListNode)(headPtr)
	if comparePartitions(headNode.val, n.val) > 0 {
		n.next = headPtr
		if !atomic.CompareAndSwapPointer(&l.head, headPtr, unsafe.Pointer(n)) {
			goto HEAD
		}

		atomic.AddInt32(&l.size, 1)
		return nil
	}

NEXT:
	nextPtr := atomic.LoadPointer(&headNode.next)
	if nextPtr == nil {
		if !atomic.CompareAndSwapPointer(&headNode.next, nextPtr, unsafe.Pointer(n)) {
			goto NEXT
		}

		atomic.AddInt32(&l.size, 1)
		return nil
	}

	nextNode := (*partitionListNode)(nextPtr)
	if comparePartitions(nextNode.val, n.val) > 0 {
		n.next = nextPtr
		if !atomic.CompareAndSwapPointer(&headNode.next, nextPtr, unsafe.Pointer(n)) {
			goto NEXT
		}

		atomic.AddInt32(&l.size, 1)
		return nil
	}

	if comparePartitions(nextNode.val, n.val) == 0 {
		return errors.New("catena/partition_list: partition exists")
	}

	headNode = nextNode
	goto NEXT
}
Beispiel #9
0
// find returns the nodes of either side of a specific key. It will physically
// delete all nodes marked for deletion while traversing the list.
func (l *List) find(key string) (pred *node, pred_m *markAndRef, curr *node, curr_m *markAndRef) {
retry:
	for {
		curr = (*node)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&l.head))))
		for curr != nil {
			curr_m = (*markAndRef)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&curr.m))))

			if curr_m.marked {
				// curr is marked as deleted. Try to remove it physically by
				// unlinking the node from the list.

				if pred == nil {
					if !atomic.CompareAndSwapPointer(
						(*unsafe.Pointer)(unsafe.Pointer(&l.head)),
						unsafe.Pointer(curr),
						unsafe.Pointer(curr_m.next)) {

						// Another thread has modified the head pointer of our
						// list. The other thread progressed, but we need to
						// restart the list traversal.
						continue retry
					}
				} else {
					m := &markAndRef{false, curr_m.next}
					if !atomic.CompareAndSwapPointer(
						(*unsafe.Pointer)(unsafe.Pointer(&pred.m)),
						unsafe.Pointer(pred_m),
						unsafe.Pointer(m)) {

						// Another thread has progressed by modifying the next
						// pointer of our predecessor. We need to traverse the
						// list again.
						continue retry
					}
					pred_m = m
				}
				curr = curr_m.next
				continue
			}

			if curr.key >= key {
				return
			}

			pred = curr
			pred_m = curr_m
			curr = curr_m.next
		}
		return
	}
	panic("not reachable")
}
Beispiel #10
0
func (q *Queue) enqueue1(x string) {
	newValue := unsafe.Pointer(&Node{body: x, next: nil})
	var tail, next unsafe.Pointer
	for {
		tail = q.tail
		next = ((*Node)(tail)).next
		if next != nil {
			atomic.CompareAndSwapPointer(&(q.tail), tail, next)
		} else if atomic.CompareAndSwapPointer(&((*Node)(tail).next), nil, newValue) {
			break
		}
		runtime.Gosched()
	}
}
Beispiel #11
0
// Log implements the Log method required by Backend.
func (b *MemoryBackend) Log(level Level, calldepth int, rec *Record) error {
	var size int32

	n := &node{Record: rec}
	np := unsafe.Pointer(n)

	// Add the record to the tail. If there's no records available, tail and
	// head will both be nil. When we successfully set the tail and the previous
	// value was nil, it's safe to set the head to the current value too.
	for {
		tailp := b.tail
		swapped := atomic.CompareAndSwapPointer(
			&b.tail,
			tailp,
			np,
		)
		if swapped == true {
			if tailp == nil {
				b.head = np
			} else {
				(*node)(tailp).next = n
			}
			size = atomic.AddInt32(&b.size, 1)
			break
		}
	}

	// Since one record was added, we might have overflowed the list. Remove
	// a record if that is the case. The size will fluctate a bit, but
	// eventual consistent.
	if b.maxSize > 0 && size > b.maxSize {
		for {
			headp := b.head
			head := (*node)(b.head)
			if head.next == nil {
				break
			}
			swapped := atomic.CompareAndSwapPointer(
				&b.head,
				headp,
				unsafe.Pointer(head.next),
			)
			if swapped == true {
				atomic.AddInt32(&b.size, -1)
				break
			}
		}
	}
	return nil
}
Beispiel #12
0
func (q *Q) enq(val interface{}) {
	var t, n unsafe.Pointer
	n = unsafe.Pointer(&node{val: val, nxt: nil})
	for {
		t = q.tail
		nxt := ((*node)(t)).nxt
		if nxt != nil {
			atomic.CompareAndSwapPointer(&q.tail, t, nxt)
		} else if atomic.CompareAndSwapPointer(&((*node)(t)).nxt, nil, n) {
			break
		}
	}
	atomic.CompareAndSwapPointer(&q.tail, t, n)
}
Beispiel #13
0
func (q *Queue) enqueue2(x string) {
	node := unsafe.Pointer(&Node{body: x, next: nil})
	p := atomic.LoadPointer(&q.tail)
	oldp := p
	for {
		for ((*Node)(p)).next != nil {
			p = ((*Node)(p).next)
		}
		if atomic.CompareAndSwapPointer(&(((*Node)(p)).next), nil, node) {
			break
		}
	}
	atomic.CompareAndSwapPointer(&(q.tail), oldp, node)
}
Beispiel #14
0
func (l *partitionList) Swap(old, new partition.Partition) error {
	n := &partitionListNode{
		val:  new,
		next: nil,
	}

HEAD:
	headPtr := atomic.LoadPointer(&l.head)

	if headPtr == nil {
		return errors.New("catena/partition_list: partition not found")
	}

	headNode := (*partitionListNode)(headPtr)
	if comparePartitions(headNode.val, n.val) == 0 {
		n.next = headNode.next

		if !atomic.CompareAndSwapPointer(&l.head, headPtr, unsafe.Pointer(n)) {
			goto HEAD
		}

		return nil
	}

NEXT:
	nextPtr := atomic.LoadPointer(&headNode.next)
	if nextPtr == nil {
		return errors.New("catena/partition_list: partition not found")
	}

	nextNode := (*partitionListNode)(nextPtr)
	if comparePartitions(nextNode.val, n.val) == 0 {
		n.next = nextNode.next

		if !atomic.CompareAndSwapPointer(&headNode.next, nextPtr, unsafe.Pointer(n)) {
			goto NEXT
		}

		return nil
	}

	if comparePartitions(nextNode.val, n.val) > 0 {
		return errors.New("catena/partition_list: partition not found")
	}

	headNode = nextNode
	goto NEXT
}
Beispiel #15
0
// helper function for subdivide()
//
// places all points in the tree in the appropriate quadrant,
// and clears the points of this tree.
func (q *LockfreeQuadtree) disperse2() {
	for {
		oldPoints := (*PointList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.Points))))
		if oldPoints == nil || oldPoints.Length == 0 {
			break
		}
		newPoints := *oldPoints
		p := *newPoints.First.Point
		newPoints.First = newPoints.First.Next
		newPoints.Length--
		ok := atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.Points)), unsafe.Pointer(oldPoints), unsafe.Pointer(&newPoints))
		if !ok {
			continue
		}

		ok = q.Nw.Insert(&p) || q.Ne.Insert(&p) || q.Sw.Insert(&p) || q.Se.Insert(&p)
		// debug
		if !ok {
			panic("quadtree contained a point outside boundary")
		}
	}
	// we don't need to compare. We know it needs set at nil now; if someone else set it first, setting again doesn't hurt.
	// this does need to be atomic, however. Else, Query() might read a pointer which was half-set to nil
	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&q.Points)), nil)
}
Beispiel #16
0
func (pq *packetQueue) push(val *Packet) {
	node := unsafe.Pointer(&packetNode{val: val})
	for {
		t := atomic.LoadPointer(&pq.tail)
		rt := (*packetNode)(t)
		if atomic.CompareAndSwapPointer(&rt.next, nil, node) {
			// It'll be a dead loop if atomic.StorePointer() is used.
			// Don't know why.
			// atomic.StorePointer(&lfq.tail, node)
			atomic.CompareAndSwapPointer(&pq.tail, t, node)
			return
		} else {
			continue
		}
	}
}
Beispiel #17
0
// getPostingList tries to get posting list from l.pbuffer. If it is nil, then
// we query RocksDB. There is no need for lock acquisition here.
func (l *List) getPostingList(loop int) *types.PostingList {
	if loop >= 10 {
		x.Fatalf("This is over the 10th loop: %v", loop)
	}
	l.AssertRLock()
	// Wait for any previous commits to happen before retrieving posting list again.
	l.Wait()

	pb := atomic.LoadPointer(&l.pbuffer)
	plist := (*types.PostingList)(pb)

	if plist == nil {
		x.AssertTrue(l.pstore != nil)
		plist = new(types.PostingList)

		if slice, err := l.pstore.Get(l.key); err == nil && slice != nil {
			x.Checkf(plist.Unmarshal(slice.Data()), "Unable to Unmarshal PostingList from store")
			slice.Free()
		}
		if atomic.CompareAndSwapPointer(&l.pbuffer, pb, unsafe.Pointer(plist)) {
			return plist
		}
		// Someone else replaced the pointer in the meantime. Retry recursively.
		return l.getPostingList(loop + 1)
	}
	return plist
}
Beispiel #18
0
func (self *element) next() *element {
	next := atomic.LoadPointer(&self.Pointer)
	for next != nil {
		nextElement := (*element)(next)
		/*
		 If our next element contains &deletedElement that means WE are deleted, and
		 we can just return the next-next element. It will make it impossible to add
		 stuff to us, since we will always lie about our next(), but then again, deleted
		 elements shouldn't get new children anyway.
		*/
		if sp, ok := nextElement.value.(*string); ok && sp == &deletedElement {
			return nextElement.next()
		}
		/*
		 If our next element is itself deleted (by the same criteria) then we will just replace
		 it with its next() (which should be the first thing behind it that isn't itself deleted
		 (the power of recursion compels you) and then check again.
		*/
		if nextElement.isDeleted() {
			atomic.CompareAndSwapPointer(&self.Pointer, next, unsafe.Pointer(nextElement.next()))
			next = atomic.LoadPointer(&self.Pointer)
		} else {
			/*
			 If it isn't deleted then we just return it.
			*/
			return nextElement
		}
	}
	/*
	 And if our next is nil, then we are at the end of the list and can just return nil for next()
	*/
	return nil
}
Beispiel #19
0
func ReadFirst(b []byte, v interface{}) (read bool, total int, err error) {
	var t unsafe.Pointer
	var r *[2]marshal.Reader
	if t = readerCache; t != nil {
		if atomic.CompareAndSwapPointer(&readerCache, t, nil) {
			r = (*[2]marshal.Reader)(t)
			*r = [2]marshal.Reader{{Body: b}, {}}
			goto Got
		}
	}
	r = &[2]marshal.Reader{{Body: b}, {}}
Got:
	total = r[0].IntUint32()
	if total > 0 {
		sz := r[0].IntUint32()
		if r[0].Err == nil {
			r[1].Body = r[0].Slice(sz + 4)
			err = ReadRawTuple(&r[1], v)
			read = true
		} else {
			err = r[0].Err
		}
	}
	atomic.StorePointer(&readerCache, unsafe.Pointer(r))
	return
}
Beispiel #20
0
func (h *Handle) Release() {
	nPtr := atomic.LoadPointer(&h.n)
	if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
		n := (*Node)(nPtr)
		n.unrefLocked()
	}
}
Beispiel #21
0
// Evict some clean items found by randomly walking a tree branch.
// For concurrent users, only the single mutator thread should call
// EvictSomeItems(), making it serialized with mutations.
func (t *Collection) EvictSomeItems() (numEvicted uint64) {
	if t.store.readOnly {
		return 0
	}
	i, err := t.store.walk(t, false, func(n *node) (*nodeLoc, bool) {
		if !n.item.Loc().isEmpty() {
			i := n.item.Item()
			if i != nil && atomic.CompareAndSwapPointer(&n.item.item,
				unsafe.Pointer(i), unsafe.Pointer(nil)) {
				t.store.ItemDecRef(t, i)
				numEvicted++
			}
		}
		next := &n.left
		if (rand.Int() & 0x01) == 0x01 {
			next = &n.right
		}
		if next.isEmpty() {
			return nil, false
		}
		return next, true
	})
	if i != nil && err != nil {
		t.store.ItemDecRef(t, i)
	}
	return numEvicted
}
Beispiel #22
0
func (node *Node) addChar(c *byte, w *wrk) *Node {
	*w.i = int64(*c - 'a')
	if *w.i < 0 || *w.i > 25 {
		return node
	}
	if w.tmp = (atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&(node.ptrs[*w.i]))))); w.tmp == nil {
		atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer((&node.ptrs[*w.i]))), w.tmp, unsafe.Pointer(newNode()))
		w.mn, w.mx = atomic.LoadInt64(&(node.minIdx)), atomic.LoadInt64(&(node.maxIdx))
		for {
			switch {
			case w.mn > *w.i:
				if !atomic.CompareAndSwapInt64(&(node.minIdx), w.mn, *w.i) {
					w.mn = atomic.LoadInt64(&(node.minIdx))
				} else {
					w.mn = *w.i
				}
			case w.mx < *w.i:
				if !atomic.CompareAndSwapInt64(&(node.maxIdx), w.mx, *w.i) {
					w.mx = atomic.LoadInt64(&(node.maxIdx))
				} else {
					w.mx = *w.i
				}
			default:
				return node.ptrs[*w.i]
			}
		}
	}
	return node.ptrs[*w.i]
}
Beispiel #23
0
// @todo caculate miss or hit statistics
func (mp *MemPool) popFromList(index int) *ElasticBuf {
	bufList := &mp.bufLists[index]
	// fmt.Printf("pop bufList %p mp.bufList[index] %p \n", bufList, &mp.bufLists[index])
	var ptr unsafe.Pointer
	for {
		ptr = atomic.LoadPointer(bufList)
		if ptr == nil {
			// fmt.Println("pop ptr is nil index is ", index)
			goto final
		}
		// fmt.Printf("pop load bufList ptr %p", ptr)
		if atomic.CompareAndSwapPointer(bufList, ptr, ((*ElasticBuf)(ptr)).next) {
			// fmt.Println("pop malloc from pool success")
			mp.bufStat[index].decre()
			goto final
		}
	}
final:
	if ptr == nil {
		p := NewElasticBuf(index, mp)
		// fmt.Printf("pop ptr==nil  make a new buf pointer address %p, buf address %p\n", &p, p)
		return p
	}
	return (*ElasticBuf)(ptr)
}
Beispiel #24
0
func (cache *DCache) cacheEvict(fpos int64) Node {
	var node Node
	idx := cache.indexFor(fpos)
	for {
		var retry bool
		hash := (*[]unsafe.Pointer)(atomic.LoadPointer(&(cache.hash)))
		addr := &((*hash)[idx])
		hd := (*DCacheItem)(atomic.LoadPointer(addr))
		for hd != nil {
			nx := atomic.LoadPointer(&hd.next)
			if hd.fpos == fpos {
				if !atomic.CompareAndSwapPointer(addr, unsafe.Pointer(hd), nx) {
					retry = true
				} else {
					node = hd.node
				}
				break
			}
			addr = &hd.next
			hd = (*DCacheItem)(nx)
		}
		if retry {
			continue
		}
		break
	}
	return node
}
Beispiel #25
0
// Replace or insert an item of a given key.
// A random item Priority (e.g., rand.Int()) will usually work well,
// but advanced users may consider using non-random item priorities
// at the risk of unbalancing the lookup tree.
func (t *Collection) SetItem(item *Item) (err error) {
	if item.Key == nil || len(item.Key) > 0xffff || len(item.Key) == 0 ||
		item.Val == nil {
		return errors.New("Item.Key/Val missing or too long")
	}
	if item.Priority < 0 {
		return errors.New("Item.Priority must be non-negative")
	}
	root := atomic.LoadPointer(&t.root)
	atomic.AddUint64(&t.store.nodeAllocs, 1)
	r, err := t.store.union(t, (*nodeLoc)(root),
		&nodeLoc{node: unsafe.Pointer(&node{item: itemLoc{item: unsafe.Pointer(&Item{
			Key:      item.Key,
			Val:      item.Val,
			Priority: item.Priority,
		})},
			numNodes: 1,
			numBytes: uint64(len(item.Key)) + uint64(item.NumValBytes(t.store)),
		})})
	if err != nil {
		return err
	}
	if !atomic.CompareAndSwapPointer(&t.root, root, unsafe.Pointer(r)) {
		return errors.New("concurrent mutation attempted")
	}
	return nil
}
Beispiel #26
0
func (pool *bufferPool) GetOutBuffer() (out *OutBuffer) {
	var ptr unsafe.Pointer
	for {
		ptr = atomic.LoadPointer(&pool.out)
		if ptr == nil {
			break
		}
		if atomic.CompareAndSwapPointer(&pool.out, ptr, ((*OutBuffer)(ptr)).next) {
			break
		}
	}

	atomic.AddUint64(&pool.outGet, 1)
	if ptr == nil {
		atomic.AddUint64(&pool.outNew, 1)
		out = &OutBuffer{Data: make([]byte, 0, pool.bufferInitSize), pool: pool}
	} else {
		out = (*OutBuffer)(ptr)
		atomic.AddInt64(&pool.size, -int64(cap(out.Data)))
	}

	out.isFreed = false
	out.isBroadcast = false
	out.refCount = 0
	return out
}
Beispiel #27
0
// Push inserts an element to the back of the queue.
// It performs exactly the same as list.List.PushBack() with sync.Mutex.
func (lfq *LockfreeQueue) Push(val interface{}) {
	node := unsafe.Pointer(&lfqNode{val: val})
	for {
		t := atomic.LoadPointer(&lfq.tail)
		rt := (*lfqNode)(t)
		if atomic.CompareAndSwapPointer(&rt.next, nil, node) {
			// It'll be a dead loop if atomic.StorePointer() is used.
			// Don't know why.
			// atomic.StorePointer(&lfq.tail, node)
			atomic.CompareAndSwapPointer(&lfq.tail, t, node)
			return
		} else {
			continue
		}
	}
}
Beispiel #28
0
func (golf *Golfhash) insert2alloc(key unsafe.Pointer, val unsafe.Pointer, hash hashVal, st *hashSubTable) bool {
	idx := int((uint64(hash) >> uint64(st.shift)) & uint64(hashIdxMask))
	for i := idx; i < idx+defMaxProbes; i++ {
		e := st.entry[i]
		if e != nil && (e.hash&hashMask) == hashSubHash {
			if golf.insert2alloc(key, val, hash, (*hashSubTable)(e.val)) {
				return true
			}
		} else {
			nst := newHashSubTable(st.used + defPower)
			if e != nil {
				golf.insert2nil(e.key, e.val, e.hash, nst) // no possiblity to fail
			}
			he := &hashEntry{
				hash: hashSubHash,
				key:  nil,
				val:  unsafe.Pointer(nst),
			}
			if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&st.entry[i])), unsafe.Pointer(e), unsafe.Pointer(he)) {
				if golf.insert2nil(key, val, hash, nst) {
					return true
				}
			}
		}
	}
	return false
}
Beispiel #29
0
// clean replaces an I-node's C-node with a copy that has any tombed I-nodes
// resurrected.
func clean(i *iNode) {
	mainPtr := (*unsafe.Pointer)(unsafe.Pointer(&i.main))
	main := (*mainNode)(atomic.LoadPointer(mainPtr))
	if main.cNode != nil {
		atomic.CompareAndSwapPointer(mainPtr,
			unsafe.Pointer(main), unsafe.Pointer(toCompressed(main.cNode)))
	}
}
Beispiel #30
0
func (self *Hash) grow() {
	oldExponent := atomic.LoadUint32(&self.exponent)
	newExponent := oldExponent + 1
	newBuckets := make([]unsafe.Pointer, 1<<oldExponent)
	if atomic.CompareAndSwapPointer(&self.buckets[newExponent], nil, unsafe.Pointer(&newBuckets)) {
		atomic.CompareAndSwapUint32(&self.exponent, oldExponent, newExponent)
	}
}