Beispiel #1
0
func SlightlyConcurrentMergeSort(data []int) {
	var activeWorkers int32
	var aux func([]int)
	aux = func(data []int) {
		if len(data) <= 1 {
			return
		}
		pivot := len(data) / 2

		left := append([]int{}, data[:pivot]...)
		right := append([]int{}, data[pivot:]...)

		curActiveWorkers := atomic.LoadInt32(&activeWorkers)

		if curActiveWorkers < int32(runtime.NumCPU()) && atomic.CompareAndSwapInt32(&activeWorkers, curActiveWorkers, curActiveWorkers+1) {

			var wg sync.WaitGroup
			wg.Add(1)
			go func() {
				aux(left)
				wg.Done()
			}()
			aux(right)
			wg.Wait()
		} else {
			MergeSort(left)
			MergeSort(right)
		}
		Merge(data, left, right)

		curActiveWorkers = atomic.LoadInt32(&activeWorkers)
		atomic.CompareAndSwapInt32(&activeWorkers, curActiveWorkers, curActiveWorkers-1)
	}
	aux(data)
}
func (self *supervisorWithPidfile) stop() {
	if 1 != atomic.LoadInt32(&self.owner) {
		atomic.CompareAndSwapInt32(&self.srv_status, SRV_RUNNING, SRV_INIT)
		self.logString("[sys] ignore process\r\n")
		self.logString("[sys] ====================  srv  end  ====================\r\n")
		return
	}

	defer func() {
		if o := recover(); nil != o {
			self.last_error = errors.New(fmt.Sprint(o))
		} else {
			self.last_error = nil
		}
	}()

	if !atomic.CompareAndSwapInt32(&self.srv_status, SRV_RUNNING, SRV_STOPPING) &&
		!atomic.CompareAndSwapInt32(&self.srv_status, SRV_STARTING, SRV_STOPPING) {
		return
	}

	self.interrupt()
	atomic.CompareAndSwapInt32(&self.srv_status, SRV_STOPPING, PROC_INIT)
	atomic.StoreInt32(&self.owner, 0)
	self.logString("[sys] ====================  srv  end  ====================\r\n")
}
Beispiel #3
0
func (r *RunnaStyle) Run(f Runnable) {
	if atomic.CompareAndSwapInt32(&r.sem, 0, 1) {
		go func() {
			ticker := time.NewTicker(r.RunInterval)
			defer ticker.Stop()

			fmt.Println("let's run")
			var idle int32 = 0
			for {
				if atomic.LoadInt32(&r.sem) == -1 {
					return
				}
				<-ticker.C

				if f() {
					idle = 0
				} else {
					idle++
				}

				if idle >= r.maxidle {
					fmt.Println("idle for", idle)
					break
				}
			}

			atomic.CompareAndSwapInt32(&r.sem, 1, 0)
		}()
	} else {
		fmt.Println("already running")
	}
}
Beispiel #4
0
func prodConsSwap() {
	wg := new(sync.WaitGroup)
	n := 1000000
	var lock int32 // Global.
	atomic.StoreInt32(&lock, 0)

	wg.Add(1)
	go func() {
		for i := 0; i < n; i++ {
			for !atomic.CompareAndSwapInt32(&lock, 0, 1) {
			}

			counter++

			atomic.StoreInt32(&lock, 0)
		}
		wg.Done()
	}()

	wg.Add(1)
	go func() {
		for i := 0; i < n; i++ {
			for !atomic.CompareAndSwapInt32(&lock, 0, 1) {
			}

			counter--

			atomic.StoreInt32(&lock, 0)
		}
		wg.Done()
	}()

	wg.Wait()
}
Beispiel #5
0
// Lock locks m.
// If the lock is already in use, the calling goroutine
// blocks until the mutex is available.
func (m *Mutex) Lock() {
	// Fast path: grab unlocked mutex.
	if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
		return
	}

	awoke := false
	for {
		old := m.state
		new := old | mutexLocked
		if old&mutexLocked != 0 {
			new = old + 1<<mutexWaiterShift
		}
		if awoke {
			// The goroutine has been woken from sleep,
			// so we need to reset the flag in either case.
			new &^= mutexWoken
		}
		if atomic.CompareAndSwapInt32(&m.state, old, new) {
			if old&mutexLocked == 0 {
				break
			}
			runtime.Semacquire(&m.sema)
			awoke = true
		}
	}
}
Beispiel #6
0
// Creates direct connection to caller to speed response time.
func (s *session) peerConnect(peer string) error {

	s.connMapLock.RLock()
	if _, ok := s.connMap[peer]; ok {
		s.connMapLock.RUnlock()
		return nil
	}
	s.connMapLock.RUnlock()

	if !atomic.CompareAndSwapInt32(&s.peerLock, 0, 1) {
		return nil
	}
	defer atomic.CompareAndSwapInt32(&s.peerLock, 1, 0)

	socketf := fmt.Sprintf("%s.%s", s.socketf, peer)

	conn, err := net.Dial("unix", socketf)
	if err != nil {
		return err
	}
	c := s.addconnection(conn)

	// Forgo logging on peer connection.
	go func() {
		if err := c.reciever(); err != nil && err != ErrClosed {
			s.log.Println(err)
		}
	}()

	return nil
}
Beispiel #7
0
func (b *Buffer) clean() {
	if b.cleanLock == 1 || !atomic.CompareAndSwapInt32(&b.cleanLock, 0, 1) {
		return
	}

	if b.voteCount >= b.peerCount {
		for {
			if atomic.CompareAndSwapInt32(&b.voteLock, 0, 1) {
				b.peerTail = b.nextPeerTail
				b.voteCount = 0
				b.voteRound++

				b.voteLock = 0
				break
			}
		}
	}

	for b.tail != b.peerTail {
		b.values[b.tail] = nil

		b.tail++
		if b.tail > b.maxIdx {
			b.tail = 0
		}
	}

	b.cleanLock = 0

}
Beispiel #8
0
func (a *HotActor) updateQueryCache(t time.Time) error {
	defer atomic.CompareAndSwapInt32(&a.queryCacheUpdated, 1, 0)
	if !atomic.CompareAndSwapInt32(&a.queryCacheUpdated, 0, 1) {
		return nil
	}
	if (t.Unix() - a.lastQueryCacheUpdate.Unix()) < GOAT_UPDATE_SPAN_SEC {
		return nil
	}
	qcsize := len(a.queryDurations)
	a.wg = utils.NewWaitGroup(2 * qcsize) //hot and flame
	var from time.Time
	for i := 0; i < qcsize; i++ {
		if a.queryDurations[i] > 0 {
			from = t.Add(-1 * a.queryDurations[i])
		} else {
			from = time.Time{}
		}
		go a.updateQueryCacheTask(from, t, proto.TopicListRequest_Hot, &a.hotQueryCache[i], a.wg)
		go a.updateQueryCacheTask(from, t, proto.TopicListRequest_Flame, &a.flameQueryCache[i], a.wg)
	}
	if err := a.wg.Wait(); err != nil {
		log.Printf("update fails with %v", err)
		a.available = false
		return err
	} else {
		log.Printf("update success")
		a.available = true
		a.lastQueryCacheUpdate = t
	}
	return nil
}
Beispiel #9
0
// 获取uuid的离线消息数量
func GetMsgNum(uuid string) int {

	var (
		n   int
		err error
	)

	if atomic.CompareAndSwapInt32(&offlinemsg_readMark1, 0, 1) {
		n, err = redis.Int(offlinemsg_readCon1.Do("LLEN", uuid))
		atomic.StoreInt32(&offlinemsg_readMark1, 0)

	} else if atomic.CompareAndSwapInt32(&offlinemsg_readMark2, 0, 1) {
		n, err = redis.Int(offlinemsg_readCon2.Do("LLEN", uuid))
		atomic.StoreInt32(&offlinemsg_readMark2, 0)

	} else if atomic.CompareAndSwapInt32(&offlinemsg_readMark3, 0, 1) {
		n, err = redis.Int(offlinemsg_readCon3.Do("LLEN", uuid))
		atomic.StoreInt32(&offlinemsg_readMark3, 0)

	} else if atomic.CompareAndSwapInt32(&offlinemsg_readMark4, 0, 1) {
		n, err = redis.Int(offlinemsg_readCon4.Do("LLEN", uuid))
		atomic.StoreInt32(&offlinemsg_readMark4, 0)
	}

	if err != nil {
		return 0
	}
	return n
}
Beispiel #10
0
func (bl *BufferListener) Put(value []byte) {
	b := bl.buffer

	tempSlice := make([]byte, len(value))
	copy(tempSlice, value)

	for {
		putIdx := b.putReserve

		newIdx := putIdx + 1
		if int(newIdx) > b.maxIdx {
			newIdx = 0
		}

		for int(newIdx) == b.tail {
			b.clean()
		}

		if atomic.CompareAndSwapInt32(&b.putReserve, putIdx, newIdx) {
			b.values[putIdx] = tempSlice

			for {
				temp := b.head

				if atomic.CompareAndSwapInt32(&b.head, temp, newIdx) {
					break
				}
			}

			break
		}
	}

	b.clean()
}
Beispiel #11
0
// start first negotiation
// start n-1 data tun
func (c *Client) restart() (tun *Conn, rn int32) {
	if atomic.CompareAndSwapInt32(&c.restarting, 0, 1) {
		// discard old conn retrying
		c.pendingConn.clearAll()
		// discard requests are waiting for tokens
		c.pendingTK.clearAll()
		// release mux
		if c.mux != nil {
			c.mux.destroy()
		}
		c.mux = newClientMultiplexer()
		// try negotiating connection infinitely until success
		for i := 0; tun == nil; i++ {
			if i > 0 {
				time.Sleep(RETRY_INTERVAL)
			}
			tun = c.initialNegotiation()
		}
		atomic.CompareAndSwapInt32(&c.restarting, 1, 0)
		atomic.CompareAndSwapInt32(&c.State, CLT_PENDING, CLT_WORKING)
		rn = atomic.AddInt32(&c.round, 1)
		for j := c.tp.parallels; j > 1; j-- {
			go c.StartTun(false)
		}
	}
	return
}
Beispiel #12
0
//gc dupnodelist
func (t *Btree) gc() {
	for {
		t.Lock()
		if atomic.CompareAndSwapInt32(&t.state, StateNormal, StateGc) {
			if len(t.dupnodelist) > 0 {
				id := t.dupnodelist[len(t.dupnodelist)-1]
				switch t.nodes[id].(type) {
				case *Node:
					*t.NodeCount--
				case *Leaf:
					*t.LeafCount--
				default:
					atomic.CompareAndSwapInt32(&t.state, StateGc, StateNormal)
					continue
				}
				t.FreeList = append(t.FreeList, id)
				t.dupnodelist = t.dupnodelist[:len(t.dupnodelist)-1]
				atomic.CompareAndSwapInt32(&t.state, StateGc, StateNormal)
			}
		} else {
			time.Sleep(time.Second)
		}
		t.Unlock()
	}
}
Beispiel #13
0
// 返回名称为key的list的长度
func (this *RedisListHelper) GetLength(key string) int {
	var (
		n   int
		err error
	)

	if atomic.CompareAndSwapInt32(&this.readMark1, 0, 1) {
		n, err = redis.Int(this.readCon1.Do("LLEN", key))
		atomic.StoreInt32(&this.readMark1, 0)

	} else if atomic.CompareAndSwapInt32(&this.readMark2, 0, 1) {
		n, err = redis.Int(this.readCon2.Do("LLEN", key))
		atomic.StoreInt32(&this.readMark2, 0)

	} else if atomic.CompareAndSwapInt32(&this.readMark3, 0, 1) {
		n, err = redis.Int(this.readCon3.Do("LLEN", key))
		atomic.StoreInt32(&this.readMark3, 0)

	} else if atomic.CompareAndSwapInt32(&this.readMark4, 0, 1) {
		n, err = redis.Int(this.readCon4.Do("LLEN", key))
		atomic.StoreInt32(&this.readMark4, 0)
	}

	if err != nil {
		return 0
	}
	return n
}
func (pc *SocketClient) writerRoutine() {
writerloop:
	for {
		switch atomic.LoadInt32(&pc.state) {
		case SocketClosed:
			pc.changes.Lock()
			for _, v := range pc.queue {
				v.responseChan <- ErrShutdown
			}
			pc.changes.Unlock()
			break writerloop
		case SocketOpen:
			//log.Printf("[SocketClient] Writer state: Open")
			request := pc.writeQueueHead
			if request == nil {
				select {
				case request = <-pc.writeQueue:
					pc.writeQueueHead = request
					if request == nil {
						continue
					}
				case <-time.After(1 * time.Second):
					continue
				}
			}
			_, err := pc.conn.Write(request.request)
			if err != nil {
				log.Printf("[SocketClient] Write failed: %s", err.Error())
				atomic.CompareAndSwapInt32(&pc.state, SocketOpen, SocketReconnecting)
			} else {
				//log.Printf("[SocketClient] Wrote: %s", request.request)
				pc.writeQueueHead = nil
				request.sent = true
			}
		case SocketReconnecting:
			//log.Printf("[SocketClient] Writer state: Recon")
			pc.changes.Lock()
			conn, err := net.DialTCP(pc.address.Network(), nil, pc.address)
			if err == nil {
				pc.reader = bufio.NewReader(conn)
				pc.conn = conn
				sentshit := make([]int64, 0, 32)
				for k, v := range pc.queue {
					if v.sent {
						v.responseChan <- ErrReconnect
						sentshit = append(sentshit, k)
					}
				}
				for _, k := range sentshit {
					delete(pc.queue, k)
				}
				atomic.CompareAndSwapInt32(&pc.state, SocketReconnecting, SocketOpen)
			} else {
				log.Printf("[SocketClient] Reconnect failed: %s", err.Error())
				time.Sleep(5 * time.Second)
			}
			pc.changes.Unlock()
		}
	}
}
Beispiel #15
0
func (this *Writer) Stop() {
	if !atomic.CompareAndSwapInt32(&this.stopFlag, 0, 1) {
		return
	}
	if atomic.CompareAndSwapInt32(&this.state, StateConnected, StateDisconnected) {
		this.Close()
	}
	close(this.exitChan)
}
Beispiel #16
0
// ListenAndServe listents to connections on the URI requested, and handles any
// incoming MQTT client sessions. It should not return until Close() is called
// or if there's some critical error that stops the server from running. The URI
// supplied should be of the form "protocol://host:port" that can be parsed by
// url.Parse(). For example, an URI could be "tcp://0.0.0.0:1883".
func (this *Server) ListenAndServe(uri string) error {
	defer atomic.CompareAndSwapInt32(&this.running, 1, 0)

	if !atomic.CompareAndSwapInt32(&this.running, 0, 1) {
		return fmt.Errorf("server/ListenAndServe: Server is already running")
	}

	this.quit = make(chan struct{})

	u, err := url.Parse(uri)
	if err != nil {
		return err
	}

	this.ln, err = net.Listen(u.Scheme, u.Host)
	if err != nil {
		return err
	}
	defer this.ln.Close()

	glog.Infof("server/ListenAndServe: server is ready...")

	var tempDelay time.Duration // how long to sleep on accept failure

	for {
		conn, err := this.ln.Accept()

		if err != nil {
			// http://zhen.org/blog/graceful-shutdown-of-go-net-dot-listeners/
			select {
			case <-this.quit:
				return nil

			default:
			}

			// Borrowed from go1.3.3/src/pkg/net/http/server.go:1699
			if ne, ok := err.(net.Error); ok && ne.Temporary() {
				if tempDelay == 0 {
					tempDelay = 5 * time.Millisecond
				} else {
					tempDelay *= 2
				}
				if max := 1 * time.Second; tempDelay > max {
					tempDelay = max
				}
				glog.Errorf("server/ListenAndServe: Accept error: %v; retrying in %v", err, tempDelay)
				time.Sleep(tempDelay)
				continue
			}
			return err
		}

		go this.handleConnection(conn)
	}
}
Beispiel #17
0
//	Enqueue command
func (o *Object) SendCommand(c Command, incseq bool) bool {
	if !atomic.CompareAndSwapInt32(&o.enlargingQue, 0, 0) {
		o.Lock()
		o.Unlock()
	}

	if incseq {
		o.incSeqnum()
	}

	defer func() {
		if err := recover(); err != nil {
			//queue maybe enlarging,and be closed
			o.SendCommand(c, false)
		}
	}()

	//If the queue is full, then enlarge it to two times the size of
redo:
	select {
	case o.que <- c:
	default:
		//Here the lock competition may be more intense when enlarge the beginning,
		//may be enlarge goroutine not to snatch the lock, so in this case is very bad, but no way, let him go
		o.Lock()
		if len(o.que) < cap(o.que) {
			o.Unlock()
			goto redo
		} else {
			if atomic.CompareAndSwapInt32(&o.enlargingQue, 0, 1) {
				defer func() {
					atomic.StoreInt32(&o.enlargingQue, 0)
					o.Unlock()
				}()
				oldCap := cap(o.que)
				newCap := oldCap * 2
				newQue := make(chan Command, newCap)
				//Here closed out queue is to inform other goroutine, then send later.
				close(o.que)
				for cc := range o.que {
					newQue <- cc
				}
				newQue <- c
				o.que = newQue
				return true
			} else {
				o.Unlock()
				runtime.Gosched()
				goto redo
			}
		}
	}

	return true
}
Beispiel #18
0
// Lock 用于锁定 m。
// 若该锁正在使用,调用的Go程就会阻塞,直到该互斥体可用。
func (m *Mutex) Lock() {
	// Fast path: grab unlocked mutex.
	// 快速通道:抢占锁定的互斥体。
	if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
		if race.Enabled {
			race.Acquire(unsafe.Pointer(m))
		}
		return
	}

	awoke := false
	iter := 0
	for {
		old := m.state
		new := old | mutexLocked
		if old&mutexLocked != 0 {
			if runtime_canSpin(iter) {
				// Active spinning makes sense.
				// Try to set mutexWoken flag to inform Unlock
				// to not wake other blocked goroutines.
				if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
					atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
					awoke = true
				}
				runtime_doSpin()
				iter++
				continue
			}
			new = old + 1<<mutexWaiterShift
		}
		if awoke {
			// The goroutine has been woken from sleep,
			// so we need to reset the flag in either case.
			// 此Go程已从睡眠状态被唤醒,因此无论在哪种状态下,
			// 我们都需要充值此标记。
			if new&mutexWoken == 0 {
				panic("sync: inconsistent mutex state")
			}
			new &^= mutexWoken
		}
		if atomic.CompareAndSwapInt32(&m.state, old, new) {
			if old&mutexLocked == 0 {
				break
			}
			runtime_Semacquire(&m.sema)
			awoke = true
			iter = 0
		}
	}

	if race.Enabled {
		race.Acquire(unsafe.Pointer(m))
	}
}
Beispiel #19
0
// 删除名称为key的list的index在[start,end]区间的所有value, total为list的长度
// 绝大部分情况下start不会超过3, 那么先截取[end+1, total-1], 再往头部插入[0, start-1]
func (this *RedisListHelper) DelRangeValues(key string, start, end, total int) bool {
	if key == "" || total < 1 || start < 0 || end < start || total-1 < end {
		return false
	}

	values := make([]string, 0)

	if atomic.CompareAndSwapInt32(&this.delMark1, 0, 1) {
		if start >= 1 {
			values, _ = redis.Strings(this.delCon1.Do("LRANGE", key, 0, start-1))
		}
		this.delCon1.Do("LTRIM", key, end+1, total-1)
		for i := len(values) - 1; i >= 0; i-- {
			this.delCon1.Do("LPUSH", key, values[i])
		}
		atomic.StoreInt32(&this.delMark1, 0)
		return true

	} else if atomic.CompareAndSwapInt32(&this.delMark2, 0, 1) {
		if start >= 1 {
			values, _ = redis.Strings(this.delCon2.Do("LRANGE", key, 0, start-1))
		}
		this.delCon2.Do("LTRIM", key, end+1, total-1)
		for i := len(values) - 1; i >= 0; i-- {
			this.delCon2.Do("LPUSH", key, values[i])
		}
		atomic.StoreInt32(&this.delMark2, 0)
		return true

	} else if atomic.CompareAndSwapInt32(&this.delMark3, 0, 1) {
		if start >= 1 {
			values, _ = redis.Strings(this.delCon3.Do("LRANGE", key, 0, start-1))
		}
		this.delCon3.Do("LTRIM", key, end+1, total-1)
		for i := len(values) - 1; i >= 0; i-- {
			this.delCon3.Do("LPUSH", key, values[i])
		}
		atomic.StoreInt32(&this.delMark3, 0)
		return true

	} else if atomic.CompareAndSwapInt32(&this.delMark4, 0, 1) {
		if start >= 1 {
			values, _ = redis.Strings(this.delCon4.Do("LRANGE", key, 0, start-1))
		}
		this.delCon4.Do("LTRIM", key, end+1, total-1)
		for i := len(values) - 1; i >= 0; i-- {
			this.delCon4.Do("LPUSH", key, values[i])
		}
		atomic.StoreInt32(&this.delMark4, 0)
		return true
	}

	return false
}
Beispiel #20
0
func (batch *scribeMessageBatch) waitForFlush(timeout time.Duration) {
	flushed := int32(0)
	time.AfterFunc(timeout, func() {
		if atomic.CompareAndSwapInt32(&flushed, 0, 1) {
			batch.flushing.Unlock()
		}
	})

	batch.flushing.Lock()
	if atomic.CompareAndSwapInt32(&flushed, 0, 1) {
		batch.flushing.Unlock()
	}
}
Beispiel #21
0
// Lock locks m.
// If the lock is already in use, the calling goroutine
// blocks until the mutex is available.
func (m *Mutex) Lock() { // 排他锁加锁
	// Fast path: grab unlocked mutex.
	if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) { // CAS操作加锁,如果原值为0,变为1,表明加锁成功
		if raceenabled {
			raceAcquire(unsafe.Pointer(m))
		}
		return // 加锁成功
	}
	// 如果加锁不成功
	awoke := false // 初值为当前的goroutine未被唤醒
	iter := 0
	for {
		old := m.state
		new := old | mutexLocked
		if old&mutexLocked != 0 {
			if runtime_canSpin(iter) {
				// Active spinning makes sense.
				// Try to set mutexWoken flag to inform Unlock
				// to not wake other blocked goroutines.
				if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
					atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
					awoke = true
				}
				runtime_doSpin()
				iter++
				continue
			}
			new = old + 1<<mutexWaiterShift
		}
		if awoke { // 当前的goroutine从睡眠中被唤醒,
			// The goroutine has been woken from sleep,
			// so we need to reset the flag in either case.
			if new&mutexWoken == 0 {
				panic("sync: inconsistent mutex state")
			}
			new &^= mutexWoken // 将Woken位清0
		}
		if atomic.CompareAndSwapInt32(&m.state, old, new) { // 状态未变,将新状态赋给老状态,多了一个等待者,否则继续到for来一遍
			if old&mutexLocked == 0 { // 如果锁被释放了,直接跳出
				break
			}
			runtime_Semacquire(&m.sema) // 当前的goroutine阻塞等待被唤醒
			awoke = true                // 当前的goroutine被唤醒,重新执行一遍for循环
			iter = 0
		}
	}

	if raceenabled { // 竞争条件检查
		raceAcquire(unsafe.Pointer(m))
	}
}
Beispiel #22
0
func TestNoRaceAtomicCASCASInt32(t *testing.T) {
	var x int64
	var s int32
	go func() {
		x = 2
		if !atomic.CompareAndSwapInt32(&s, 0, 1) {
			panic("")
		}
	}()
	for !atomic.CompareAndSwapInt32(&s, 1, 0) {
		runtime.Gosched()
	}
	x = 1
}
Beispiel #23
0
//WhenAll receives Futures slice and returns a Future.
//If all Futures are resolved, this Future will be resolved and return results slice.
//If any Future is cancelled, this Future will be cancelled.
//Otherwise will rejected with results slice returned by all Futures.
//Legit types of act are same with Start function
func whenAllFuture(fs ...*Future) *Future {
	wf := NewPromise()
	rs := make([]interface{}, len(fs))

	if len(fs) == 0 {
		wf.Resolve([]interface{}{})
	} else {
		n := int32(len(fs))
		cancelOthers := func(j int) {
			for k, f1 := range fs {
				if k != j {
					f1.Cancel()
				}
			}
		}

		go func() {
			isCancelled := int32(0)
			for i, f := range fs {
				j := i

				f.OnSuccess(func(v interface{}) {
					rs[j] = v
					if atomic.AddInt32(&n, -1) == 0 {
						wf.Resolve(rs)
					}
				}).OnFailure(func(v interface{}) {
					if atomic.CompareAndSwapInt32(&isCancelled, 0, 1) {
						//try to cancel all futures
						cancelOthers(j)

						//errs := make([]error, 0, 1)
						//errs = append(errs, v.(error))
						e := newAggregateError1("Error appears in WhenAll:", v)
						wf.Reject(e)
					}
				}).OnCancel(func() {
					if atomic.CompareAndSwapInt32(&isCancelled, 0, 1) {
						//try to cancel all futures
						cancelOthers(j)

						wf.Cancel()
					}
				})
			}
		}()
	}

	return wf.Future
}
Beispiel #24
0
func (c *Channel) exit(deleted bool) error {
	c.exitMutex.Lock()
	defer c.exitMutex.Unlock()

	if !atomic.CompareAndSwapInt32(&c.exitFlag, 0, 1) {
		return errors.New("exiting")
	}

	if deleted {
		c.ctx.nsqd.logf("CHANNEL(%s): deleting", c.name)

		// since we are explicitly deleting a channel (not just at system exit time)
		// de-register this from the lookupd
		c.ctx.nsqd.Notify(c)
	} else {
		c.ctx.nsqd.logf("CHANNEL(%s): closing", c.name)
	}

	// this forceably closes client connections
	c.RLock()
	for _, client := range c.clients {
		client.Close()
	}
	c.RUnlock()

	if deleted {
		// empty the queue (deletes the backend files, too)
		c.Empty()
		return c.backend.Delete()
	}

	// write anything leftover to disk
	c.flush()
	return c.backend.Close()
}
Beispiel #25
0
// CompareAndSet atomically sets the boolean value if the current value is equal to updated value.
func (ab *AtomicBool) CompareAndToggle(expect bool) bool {
	updated := 1
	if expect {
		updated = 0
	}
	return atomic.CompareAndSwapInt32(&ab.val, int32(1-updated), int32(updated))
}
Beispiel #26
0
// Unlock unlocks m.
// It is a run-time error if m is not locked on entry to Unlock.
//
// A locked Mutex is not associated with a particular goroutine.
// It is allowed for one goroutine to lock a Mutex and then
// arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() {
	if race.Enabled {
		_ = m.state
		race.Release(unsafe.Pointer(m))
	}

	// Fast path: drop lock bit.
	new := atomic.AddInt32(&m.state, -mutexLocked)
	if (new+mutexLocked)&mutexLocked == 0 {
		panic("sync: unlock of unlocked mutex")
	}

	old := new
	for {
		// If there are no waiters or a goroutine has already
		// been woken or grabbed the lock, no need to wake anyone.
		if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken) != 0 {
			return
		}
		// Grab the right to wake someone.
		new = (old - 1<<mutexWaiterShift) | mutexWoken
		if atomic.CompareAndSwapInt32(&m.state, old, new) {
			runtime_Semrelease(&m.sema)
			return
		}
		old = m.state
	}
}
Beispiel #27
0
func (this *parseState) Start() (ok bool) {
	if atomic.CompareAndSwapInt32(&this.state, 0, 1) {
		this.Status.Add(1)
		ok = true
	}
	return
}
Beispiel #28
0
func (w *Producer) connect() error {
	w.guard.Lock()
	defer w.guard.Unlock()

	if atomic.LoadInt32(&w.stopFlag) == 1 {
		return ErrStopped
	}

	if !atomic.CompareAndSwapInt32(&w.state, StateInit, StateConnected) {
		return ErrNotConnected
	}

	w.log(LogLevelInfo, "(%s) connecting to nsqd", w.addr)

	conn := NewConn(w.addr, &w.config, &producerConnDelegate{w})
	conn.SetLogger(w.logger, w.logLvl, fmt.Sprintf("%3d (%%s)", w.id))

	_, err := conn.Connect()
	if err != nil {
		conn.Close()
		w.log(LogLevelError, "(%s) error connecting to nsqd - %s", w.addr, err)
		atomic.StoreInt32(&w.state, StateInit)
		return err
	}
	w.conn = conn

	w.wg.Add(1)
	go w.router()

	return nil
}
Beispiel #29
0
// Stop will gracefully stop the Reader
func (q *Reader) Stop() {
	var buf bytes.Buffer

	if !atomic.CompareAndSwapInt32(&q.stopFlag, 0, 1) {
		return
	}

	log.Printf("Stopping reader")

	q.RLock()
	l := len(q.nsqConnections)
	q.RUnlock()

	if l == 0 {
		q.stopHandlers()
	} else {
		q.RLock()
		for _, c := range q.nsqConnections {
			err := c.sendCommand(&buf, StartClose())
			if err != nil {
				log.Printf("[%s] failed to start close - %s", c, err.Error())
			}
		}
		q.RUnlock()

		go func() {
			<-time.After(time.Duration(30) * time.Second)
			q.stopHandlers()
		}()
	}

	if len(q.lookupdHTTPAddrs) != 0 {
		q.lookupdExitChan <- 1
	}
}
Beispiel #30
0
func (c *Conn) closeWithError(err error) {
	if !atomic.CompareAndSwapInt32(&c.closed, 0, 1) {
		return
	}

	// we should attempt to deliver the error back to the caller if it
	// exists
	if err != nil {
		c.mu.RLock()
		for _, req := range c.calls {
			// we need to send the error to all waiting queries, put the state
			// of this conn into not active so that it can not execute any queries.
			select {
			case req.resp <- err:
			case <-req.timeout:
			}
		}
		c.mu.RUnlock()
	}

	// if error was nil then unblock the quit channel
	close(c.quit)
	c.conn.Close()

	if err != nil {
		c.errorHandler.HandleError(c, err, true)
	}
}