Пример #1
0
// Ensure that command parallelism constraints are respected
func TestCommandParallelismConstraint(t *testing.T) {
	assert := assert.New(t)
	sConfig, cConfig := getConfigs()
	cConfig.MaxConcurrentCommands = 1
	service := NewService(sConfig)

	counter := uint64(0)

	wg := &sync.WaitGroup{}
	wg.Add(10)
	for i := 0; i < 10; i++ {
		go func() {
			for j := 0; j < 10; j++ {
				err := service.Run(testCommand, func() (error, error) {
					if !atomic.CompareAndSwapUint64(&counter, 0, 1) {
						t.FailNow()
					}

					time.Sleep(time.Millisecond)

					if !atomic.CompareAndSwapUint64(&counter, 1, 0) {
						t.FailNow()
					}

					return nil, nil
				}, nil)
				assert.Nil(err)
			}
			wg.Done()
		}()
	}
	wg.Wait()
}
Пример #2
0
func TestNoRaceAtomicCASCASUInt64(t *testing.T) {
	var x int64
	var s uint64
	go func() {
		x = 2
		if !atomic.CompareAndSwapUint64(&s, 0, 1) {
			panic("")
		}
	}()
	for !atomic.CompareAndSwapUint64(&s, 1, 0) {
		runtime.Gosched()
	}
	x = 1
}
Пример #3
0
func TestPageCache(t *testing.T) {
	c1 := newPageCache()

	changeFirst := func(p Pages) {
		p[0].Description = "changed"
	}

	var o1 uint64 = 0
	var o2 uint64 = 0

	var wg sync.WaitGroup

	var l1 sync.Mutex
	var l2 sync.Mutex

	var testPageSets []Pages

	for j := 0; j < 50; j++ {
		testPageSets = append(testPageSets, createSortTestPages(j+1))
	}

	for i := 0; i < 100; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			for j, pages := range testPageSets {
				msg := fmt.Sprintf("Go %d %d %d %d", i, j, o1, o2)
				l1.Lock()
				p, c := c1.get("k1", pages, nil)
				assert.Equal(t, !atomic.CompareAndSwapUint64(&o1, uint64(j), uint64(j+1)), c, "c1: "+msg)
				l1.Unlock()
				p2, c2 := c1.get("k1", p, nil)
				assert.True(t, c2)
				assert.True(t, probablyEqualPages(p, p2))
				assert.True(t, probablyEqualPages(p, pages))
				assert.NotNil(t, p, msg)

				l2.Lock()
				p3, c3 := c1.get("k2", pages, changeFirst)
				assert.Equal(t, !atomic.CompareAndSwapUint64(&o2, uint64(j), uint64(j+1)), c3, "c3: "+msg)
				l2.Unlock()
				assert.NotNil(t, p3, msg)
				assert.Equal(t, p3[0].Description, "changed", msg)
			}
		}()
	}

	wg.Wait()

}
Пример #4
0
// ObserveHist adds an observation to the given histogram. The id parameter is a handle
// returned by the AddHistogram method. Using numbers not returned by AddHistogram is
// undefined behavior and may cause a panic.
func ObserveHist(id uint32, value uint64) {
	h := &hists[id]

	// We lock here to ensure that the min and max values are true to this time
	// period, meaning extractAndReset won't pull the data out from under us
	// while the current observation is being compared. Otherwise, min and max
	// could come from the previous period on the next read. Same with average.
	h.lock.RLock()

	// Keep a running total for average
	atomic.AddUint64(&h.dat.total, value)

	// Set max and min (if needed) in an atomic fashion
	for {
		max := atomic.LoadUint64(&h.dat.max)
		if value < max || atomic.CompareAndSwapUint64(&h.dat.max, max, value) {
			break
		}
	}
	for {
		min := atomic.LoadUint64(&h.dat.min)
		if value > min || atomic.CompareAndSwapUint64(&h.dat.min, min, value) {
			break
		}
	}

	// Record the bucketized histograms
	bucket := getBucket(value)
	atomic.AddUint64(&bhists[id].buckets[bucket], 1)

	// Count and possibly return for sampling
	c := atomic.AddUint64(&h.dat.count, 1)
	if hSampled[id] {
		// Sample, keep every 4th observation
		if (c & 0x3) > 0 {
			h.lock.RUnlock()
			return
		}
	}

	// Get the current index as the count % buflen
	idx := atomic.AddUint64(&h.dat.kept, 1) & buflen

	// Add observation
	h.dat.buf[idx] = value

	// No longer "reading"
	h.lock.RUnlock()
}
Пример #5
0
func NewScampDebugger(conn *tls.Conn, clientType string) (handle *ScampDebugger, err error) {
	var worked bool = false
	var thisDebuggerId uint64 = 0
	for i := 0; i < 10; i++ {
		loadedVal := atomic.LoadUint64(&scampDebuggerId)
		thisDebuggerId = loadedVal + 1
		worked = atomic.CompareAndSwapUint64(&scampDebuggerId, loadedVal, thisDebuggerId)
		if worked {
			break
		}
	}
	if !worked {
		panic("never should happen...")
	}

	handle = new(ScampDebugger)

	var path = fmt.Sprintf("%s.%s.%s.%d", writeTeeTargetPath, randomDebuggerString, clientType, thisDebuggerId)

	handle.file, err = os.Create(path)
	if err != nil {
		return
	}

	return
}
Пример #6
0
// Put adds the provided item to the queue.  If the queue is full, this
// call will block until an item is added to the queue or Dispose is called
// on the queue.  An error will be returned if the queue is disposed.
func (rb *RingBuffer) Put(item interface{}) error {
	var n *node
	pos := atomic.LoadUint64(&rb.queue)
	i := 0
L:
	for {
		if atomic.LoadUint64(&rb.disposed) == 1 {
			return disposedError
		}

		n = rb.nodes[pos&rb.mask]
		seq := atomic.LoadUint64(&n.position)
		switch dif := seq - pos; {
		case dif == 0:
			if atomic.CompareAndSwapUint64(&rb.queue, pos, pos+1) {
				break L
			}
		case dif < 0:
			panic(`Ring buffer in a compromised state during a put operation.`)
		default:
			pos = atomic.LoadUint64(&rb.queue)
		}

		if i == 10000 {
			runtime.Gosched() // free up the cpu before the next iteration
			i = 0
		} else {
			i++
		}
	}

	n.data = item
	atomic.StoreUint64(&n.position, pos+1)
	return nil
}
Пример #7
0
func (c *container) getOrInsert(k uint64, v unsafe.Pointer) unsafe.Pointer {
	bi := k & (c.sz - 1)
	b := c.list[bi]
	for i := range b.elems {
		e := &b.elems[i]
		// Once allocated a valid key, it would never change. So, first check if
		// it's allocated. If not, then allocate it. If can't, or not allocated,
		// then check if it's k. If it is, then replace value. Otherwise continue.
		// This sequence could be problematic, if this happens:
		// Main thread runs Step 1. Check
		if atomic.CompareAndSwapUint64(&e.k, 0, k) { // Step 1.
			atomic.AddUint32(&c.numElems, 1)
			if atomic.CompareAndSwapPointer(&e.v, nil, v) {
				return v
			}
			return atomic.LoadPointer(&e.v)
		}

		if atomic.LoadUint64(&e.k) == k {
			// Swap if previous pointer is nil.
			if atomic.CompareAndSwapPointer(&e.v, nil, v) {
				return v
			}
			return atomic.LoadPointer(&e.v)
		}
	}
	return nil
}
Пример #8
0
// Enqueue adds a new element to the tail of the ring buffer
// It returns true if the operation is successful, false otherwise
// It blocks on a full queue
func (rb *RingBuffer) Enqueue(data interface{}) bool {
	var cell *ring_cell
	pos := atomic.LoadUint64(&rb.enqueue_pos_)
	i := 0
Loop:
	for {
		cell = rb.buffer_[pos&rb.buffer_mask_]
		seq := atomic.LoadUint64(&cell.sequence_)
		switch dif := seq - pos; {
		case dif == 0:
			if atomic.CompareAndSwapUint64(&rb.enqueue_pos_, pos, pos+1) {
				break Loop
			}
		case dif < 0:
			return false
		default:
			pos = atomic.LoadUint64(&rb.enqueue_pos_)
		}
		// freeup the cpu
		if i >= freeup_threshold {
			runtime.Gosched()
			i = 0
		} else {
			i++
		}
	}

	cell.data_ = data
	atomic.StoreUint64(&cell.sequence_, pos+1)
	return true
}
Пример #9
0
// unlock removes a reference from mu and unlocks mu.
// It reports whether there is no remaining reference.
func (mu *fdMutex) rwunlock(read bool) bool {
	var mutexBit, mutexWait, mutexMask uint64
	var mutexSema *uint32
	if read {
		mutexBit = mutexRLock
		mutexWait = mutexRWait
		mutexMask = mutexRMask
		mutexSema = &mu.rsema
	} else {
		mutexBit = mutexWLock
		mutexWait = mutexWWait
		mutexMask = mutexWMask
		mutexSema = &mu.wsema
	}
	for {
		old := atomic.LoadUint64(&mu.state)
		if old&mutexBit == 0 || old&mutexRefMask == 0 {
			panic("net: inconsistent fdMutex")
		}
		// Drop lock, drop reference and wake read waiter if present.
		new := (old &^ mutexBit) - mutexRef
		if old&mutexMask != 0 {
			new -= mutexWait
		}
		if atomic.CompareAndSwapUint64(&mu.state, old, new) {
			if old&mutexMask != 0 {
				runtime_Semrelease(mutexSema)
			}
			return new&(mutexClosed|mutexRefMask) == mutexClosed
		}
	}
}
Пример #10
0
// Get will return the next item in the queue.  This call will block
// if the queue is empty.  This call will unblock when an item is added
// to the queue or Dispose is called on the queue.  An error will be returned
// if the queue is disposed.
func (rb *RingBuffer) Get() (interface{}, error) {
	var n *node
	pos := atomic.LoadUint64(&rb.dequeue)
	i := 0
L:
	for {
		if atomic.LoadUint64(&rb.disposed) == 1 {
			return nil, ErrDisposed
		}

		n = rb.nodes[pos&rb.mask]
		seq := atomic.LoadUint64(&n.position)
		switch dif := seq - (pos + 1); {
		case dif == 0:
			if atomic.CompareAndSwapUint64(&rb.dequeue, pos, pos+1) {
				break L
			}
		case dif < 0:
			panic(`Ring buffer in compromised state during a get operation.`)
		default:
			pos = atomic.LoadUint64(&rb.dequeue)
		}

		if i == 10000 {
			runtime.Gosched() // free up the cpu before the next iteration
			i = 0
		} else {
			i++
		}
	}
	data := n.data
	n.data = nil
	atomic.StoreUint64(&n.position, pos+rb.mask+1)
	return data, nil
}
Пример #11
0
// Dequeue removes and returns the `oldest` element from the ring buffer
// It also returns true if the operation is successful, false otherwise
// It blocks on an empty queue
func (rb *RingBuffer) Dequeue() (data interface{}, b bool) {
	var cell *ring_cell
	pos := atomic.LoadUint64(&rb.dequeue_pos_)
	i := 0
Loop:
	for {
		cell = rb.buffer_[pos&rb.buffer_mask_]
		seq := atomic.LoadUint64(&cell.sequence_)
		switch dif := seq - pos - 1; {
		case dif == 0:
			if atomic.CompareAndSwapUint64(&rb.dequeue_pos_, pos, pos+1) {
				break Loop
			}
		case dif < 0:
			return nil, false
		default:
			pos = atomic.LoadUint64(&rb.dequeue_pos_)
		}
		// freeup the cpu
		if i >= freeup_threshold {
			runtime.Gosched()
			i = 0
		} else {
			i++
		}
	}
	data = cell.data_
	atomic.StoreUint64(&cell.sequence_, pos+rb.buffer_mask_+1)
	b = true
	return data, b
}
Пример #12
0
// atomically adds incr to val, returns new val
func incrementAndGet(val *uint64, incr uint64) uint64 {
	currVal := atomic.LoadUint64(val)
	for !atomic.CompareAndSwapUint64(val, currVal, currVal+incr) {
		currVal = atomic.LoadUint64(val)
	}
	return currVal + incr
}
Пример #13
0
// increfAndClose sets the state of mu to closed.
// It reports whether there is no remaining reference.
func (mu *fdMutex) increfAndClose() bool {
	for {
		old := atomic.LoadUint64(&mu.state)
		if old&mutexClosed != 0 {
			return false
		}
		// Mark as closed and acquire a reference.
		new := (old | mutexClosed) + mutexRef
		if new&mutexRefMask == 0 {
			panic("net: inconsistent fdMutex")
		}
		// Remove all read and write waiters.
		new &^= mutexRMask | mutexWMask
		if atomic.CompareAndSwapUint64(&mu.state, old, new) {
			// Wake all read and write waiters,
			// they will observe closed flag after wakeup.
			for old&mutexRMask != 0 {
				old -= mutexRWait
				runtime_Semrelease(&mu.rsema)
			}
			for old&mutexWMask != 0 {
				old -= mutexWWait
				runtime_Semrelease(&mu.wsema)
			}
			return true
		}
	}
}
Пример #14
0
func (s *IDGenerator) GetStream() (int, bool) {
	// based closely on the java-driver stream ID generator
	// avoid false sharing subsequent requests.
	offset := atomic.LoadUint32(&s.offset)
	for !atomic.CompareAndSwapUint32(&s.offset, offset, (offset+1)%s.numBuckets) {
		offset = atomic.LoadUint32(&s.offset)
	}
	offset = (offset + 1) % s.numBuckets

	for i := uint32(0); i < s.numBuckets; i++ {
		pos := int((i + offset) % s.numBuckets)

		bucket := atomic.LoadUint64(&s.streams[pos])
		if bucket == math.MaxUint64 {
			// all streams in use
			continue
		}

		for j := 0; j < bucketBits; j++ {
			mask := uint64(1 << streamOffset(j))
			if bucket&mask == 0 {
				if atomic.CompareAndSwapUint64(&s.streams[pos], bucket, bucket|mask) {
					atomic.AddInt32(&s.inuseStreams, 1)
					return streamFromBucket(int(pos), j), true
				}
				bucket = atomic.LoadUint64(&s.streams[offset])
			}
		}
	}

	return 0, false
}
Пример #15
0
func (s *IDGenerator) Clear(stream int) (inuse bool) {
	offset := bucketOffset(stream)
	bucket := atomic.LoadUint64(&s.streams[offset])

	mask := uint64(1) << streamOffset(stream)
	if bucket&mask != mask {
		// already cleared
		return false
	}

	for !atomic.CompareAndSwapUint64(&s.streams[offset], bucket, bucket & ^mask) {
		bucket = atomic.LoadUint64(&s.streams[offset])
		if bucket&mask != mask {
			// already cleared
			return false
		}
	}

	// TODO: make this account for 0 stream being reserved
	if atomic.AddInt32(&s.inuseStreams, -1) < 0 {
		// TODO(zariel): remove this
		panic("negative streams inuse")
	}

	return true
}
Пример #16
0
func (ptree *ptree) checkAndRun(action action) {
	if ptree.actions.Len() > 0 {
		if action != nil {
			ptree.actions.Put(action)
		}
		if atomic.CompareAndSwapUint64(&ptree.running, 0, 1) {
			var a interface{}
			var err error
			for ptree.actions.Len() > 0 {
				a, err = ptree.actions.Get()
				if err != nil {
					return
				}
				ptree.cache = append(ptree.cache, a)
				if uint64(len(ptree.cache)) >= ptree.bufferSize {
					break
				}
			}

			go ptree.operationRunner(ptree.cache, true)
		}
	} else if action != nil {
		if atomic.CompareAndSwapUint64(&ptree.running, 0, 1) {
			switch action.operation() {
			case get:
				ptree.read(action)
				action.complete()
				ptree.reset()
			case add, remove:
				if len(action.keys()) > multiThreadAt {
					ptree.operationRunner(interfaces{action}, true)
				} else {
					ptree.operationRunner(interfaces{action}, false)
				}
			case apply:
				q := action.(*applyAction)
				n := getParent(ptree.root, q.start)
				ptree.apply(n, q)
				q.complete()
				ptree.reset()
			}
		} else {
			ptree.actions.Put(action)
			ptree.checkAndRun(nil)
		}
	}
}
Пример #17
0
func (c *Counter) ReadAndReset() uint64 {
	for {
		oldCount := c.value
		if atomic.CompareAndSwapUint64(&c.value, oldCount, 0) {
			return oldCount
		}
	}
}
Пример #18
0
func (g *gauge) Add(delta float64) {
	for {
		old := atomic.LoadUint64(&g.value)
		new := math.Float64bits(math.Float64frombits(old) + delta)
		if atomic.CompareAndSwapUint64(&g.value, old, new) {
			return
		}
	}
}
Пример #19
0
// ResetMinTo resets the minimum to be at least id.
func (g *SeqIDGen) StartFrom(id uint64) {
	val := atomic.LoadUint64(&g.id)
	for val < id {
		if atomic.CompareAndSwapUint64(&g.id, val, id) {
			break
		}
		val = atomic.LoadUint64(&g.id)
	}
}
Пример #20
0
// Add ignores the non-integer part of delta.
func (c *AtomicCounter) Add(delta float64) {
	for {
		oldBits := atomic.LoadUint64((*uint64)(c))
		newBits := math.Float64bits(math.Float64frombits(oldBits) + delta)
		if atomic.CompareAndSwapUint64((*uint64)(c), oldBits, newBits) {
			return
		}
	}
}
Пример #21
0
func (v *value) Add(val float64) {
	for {
		oldBits := atomic.LoadUint64(&v.valBits)
		newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
		if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
			return
		}
	}
}
Пример #22
0
// nonce returns a unique string.
func nonce() string {
	n := atomic.AddUint64(&nonceCounter, 1)
	if n == 1 {
		binary.Read(rand.Reader, binary.BigEndian, &n)
		n ^= uint64(time.Now().UnixNano())
		atomic.CompareAndSwapUint64(&nonceCounter, 1, n)
	}
	return strconv.FormatUint(n, 16)
}
Пример #23
0
func (tree *tree) checkAndRun(action action) {
	if tree.actions.Len() > 0 {
		if action != nil {
			tree.actions.Put(action)
		}
		if atomic.CompareAndSwapUint64(&tree.running, 0, 1) {
			var a interface{}
			var err error
			for tree.actions.Len() > 0 {
				a, err = tree.actions.Get()
				if err != nil {
					return
				}
				tree.cache = append(tree.cache, a)
				if uint64(len(tree.cache)) >= tree.bufferSize {
					break
				}
			}

			go tree.operationRunner(tree.cache, true)
		}
	} else if action != nil {
		if atomic.CompareAndSwapUint64(&tree.running, 0, 1) {
			switch action.operation() {
			case get:
				ga := action.(*getAction)
				result := tree.search(ga.lookup)
				ga.result = result
				action.complete()
				tree.reset()
			case add, remove:
				if len(action.keys()) > multiThreadAt {
					tree.operationRunner(interfaces{action}, true)
				} else {
					tree.operationRunner(interfaces{action}, false)
				}
			}
		} else {
			tree.actions.Put(action)
			tree.checkAndRun(nil)
		}
	}
}
Пример #24
0
// Lock resurs
func (t *TControl) Lock(resursId uint64) {
	for !atomic.CompareAndSwapUint64(&t.writer, unlocked, resursId) {
		t.sleep()
	}
	for i := range t.readers {
		for atomic.LoadUint64(&t.readers[i]) == resursId {
			t.sleep()
		}
	}
}
Пример #25
0
// Add adds delta to v.
func (v *Float) Add(delta float64) { // 增加delta值到v
	for {
		cur := atomic.LoadUint64(&v.f)
		curVal := math.Float64frombits(cur)
		nxtVal := curVal + delta
		nxt := math.Float64bits(nxtVal)
		if atomic.CompareAndSwapUint64(&v.f, cur, nxt) {
			return
		}
	}
}
Пример #26
0
// Reuse given file number.
func (s *session) reuseFileNum(num uint64) {
	for {
		old, x := s.stNextFileNum, num
		if old != x+1 {
			x = old
		}
		if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
			break
		}
	}
}
Пример #27
0
func (o *VisibleObject) NetworkID() uint64 {
	if id := atomic.LoadUint64(&o.networkID); id != 0 {
		// simple case: we already have a network ID; return it
		return id
	}
	// set our network ID to the next available ID, but do nothing if
	// we already have an ID set.
	atomic.CompareAndSwapUint64(&o.networkID, 0, atomic.AddUint64(&nextNetworkID, 1))
	// we definitely have a network ID at this point; return it.
	return atomic.LoadUint64(&o.networkID)
}
Пример #28
0
// Mark file number as used.
func (s *session) markFileNum(num uint64) {
	nextFileNum := num + 1
	for {
		old, x := s.stNextFileNum, nextFileNum
		if old > x {
			x = old
		}
		if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
			break
		}
	}
}
Пример #29
0
// ValueReset returns the current value of the counter, and resets it to zero.
// This is useful for metrics backends whose counter aggregations expect deltas,
// like Graphite.
func (c *Counter) ValueReset() float64 {
	for {
		var (
			old  = atomic.LoadUint64(&c.bits)
			newf = 0.0
			new  = math.Float64bits(newf)
		)
		if atomic.CompareAndSwapUint64(&c.bits, old, new) {
			return math.Float64frombits(old)
		}
	}
}
Пример #30
0
// Add implements Counter.
func (c *Counter) Add(delta float64) {
	for {
		var (
			old  = atomic.LoadUint64(&c.bits)
			newf = math.Float64frombits(old) + delta
			new  = math.Float64bits(newf)
		)
		if atomic.CompareAndSwapUint64(&c.bits, old, new) {
			break
		}
	}
}