示例#1
0
文件: client.go 项目: gsrpc/gorpc
func (client *_Client) doconnect() {
	if !atomic.CompareAndSwapUint32(&client.state, uint32(StateDisconnect), uint32(StateConnecting)) {
		return
	}

	go func() {
		conn, err := client.f()

		if err != nil {

			if !atomic.CompareAndSwapUint32(&client.state, uint32(StateConnecting), uint32(StateDisconnect)) {
				return
			}

			client.E("%s connect server error:%s", client.name, gserrors.New(err))

			if client.reconnectTimeout != 0 {

				time.AfterFunc(client.reconnectTimeout, func() {
					client.doconnect()
				})
			}

			return
		}

		client.connected(conn)

	}()
}
示例#2
0
// ListenAndServe starts listening for connections, recording them and proxying
// to the target URL.
func (p *Proxy) ListenAndServe() (err error) {
	if atomic.CompareAndSwapUint32(&p.isrun, 0, 1) {
		defer func() {
			// Ignore "use of closed network connection" comming from closed
			// net.Listener when p was explicitely stopped.
			if !atomic.CompareAndSwapUint32(&p.isrun, 1, 0) {
				err = nil
			}
		}()
		p.m.Lock()
		var l net.Listener
		if l, err = net.Listen("tcp", p.addr); err != nil {
			p.m.Unlock()
			return
		}
		var src *net.TCPAddr
		src, err = urltotcpaddr(p.targ)
		if err != nil {
			p.m.Unlock()
			return
		}
		if p.rl, err = newRecListener(l, src, p.Record); err != nil {
			p.m.Unlock()
			return
		}
		p.wgr.Done()
		p.m.Unlock()
		err = p.srv.Serve(p.rl)
		return
	}
	return ErrAlreadyRunning
}
示例#3
0
// 内部访问接口
// 错误处理
func (chn *Channel) handleError(err error) {

	defer func() {
		if r := recover(); r != nil {
			mylog.GetErrorLogger().Println("Channel handleError Panic")
		}
	}()

	if err != nil {
		mylog.GetErrorLogger().Println("handleError", err.Error())
	}

	if atomic.CompareAndSwapUint32(&chn.valid, 0, 0) {
		return
	}

	atomic.CompareAndSwapUint32(&chn.valid, 1, 0)

	stat.GetLocalStatistInst().Off()

	func() {
		chn.resMutex.Lock()
		defer chn.resMutex.Unlock()
		for _, r := range chn.openRes {
			stat.GetLocalStatistInst().CloseRes()
			mylog.GetErrorLogger().Println(" release chn res ", r.GetID())
			clientDataID := new(ResourceClient)
			clientDataID.ClientInf = chn
			r.Close(clientDataID, "", true)
			ReleaseResourcer(r)
		}

		for _, v := range chn.registerRes {

			v.Unregister()
			ReleaseResourcer(v)
		}
	}()

	func() {

		fmt.Println("Chn Close")
		chn.chnMutex.Lock()
		defer chn.chnMutex.Unlock()
		chn.connSocket.Close()
		close(chn.rwChn)
		close(chn.notifyChn)
	}()

}
示例#4
0
func (pu *PUDevice) handleError(err error) {
	mylog.GetErrorLogger().Println(err)
	if atomic.CompareAndSwapUint32(&pu.Valid, 0, 0) {
		return
	}

	atomic.CompareAndSwapUint32(&pu.Valid, 1, 0)
	pu.connSocket.Close()
	stat.GetLocalStatistInst().Off()
	if atomic.CompareAndSwapUint32(&pu.SendFlag, 1, 1) {
		stat.GetLocalStatistInst().CloseRes()
	}
	close(pu.rwChan)
	go pu.ReRun(pu.SN)
}
示例#5
0
// Implements Service
func (bs *BaseService) Reset() (bool, error) {
	if atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) {
		// whether or not we've started, we can reset
		atomic.CompareAndSwapUint32(&bs.started, 1, 0)

		return true, bs.impl.OnReset()
	} else {
		if bs.log != nil {
			bs.log.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl)
		}
		return false, nil
	}
	// never happens
	return false, nil
}
示例#6
0
func (a *AddrBook) Stop() {
	if atomic.CompareAndSwapUint32(&a.stopped, 0, 1) {
		log.Info("Stopping AddrBook")
		close(a.quit)
		a.wg.Wait()
	}
}
示例#7
0
func (s *Stream) resetWith(errorCode frame.ErrorCode, resetErr error) {
	// only ever send one reset
	if !atomic.CompareAndSwapUint32(&s.sentRst, 0, 1) {
		return
	}

	// close the stream
	s.closeWithAndRemoveLater(resetErr)

	// make the reset frame
	rst := frame.NewWStreamRst()
	if err := rst.Set(s.id, errorCode); err != nil {
		s.die(frame.InternalError, err)
	}

	// need write lock to make sure no data frames get sent after we send the reset
	s.writer.Lock()

	// send it
	if err := s.session.writeFrame(rst, zeroTime); err != nil {
		s.writer.Unlock()
		s.die(frame.InternalError, err)
	}

	s.writer.Unlock()
}
示例#8
0
// OpenStream is used to create a new stream
func (s *Session) OpenStream() (*Stream, error) {
	if s.IsClosed() {
		return nil, ErrSessionShutdown
	}
	if atomic.LoadInt32(&s.remoteGoAway) == 1 {
		return nil, ErrRemoteGoAway
	}

GET_ID:
	// Get and ID, and check for stream exhaustion
	id := atomic.LoadUint32(&s.nextStreamID)
	if id >= math.MaxUint32-1 {
		return nil, ErrStreamsExhausted
	}
	if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
		goto GET_ID
	}

	// Register the stream
	stream := newStream(s, id, streamInit)
	s.streamLock.Lock()
	s.streams[id] = stream
	s.streamLock.Unlock()

	// Send the window update to create
	return stream, stream.sendWindowUpdate()
}
func (c *ConcurrentSolver) attrHelper(G *graphs.Graph, removed []bool, tmpMap []int32, flags []uint32, ch chan int, i int, node int, wg *sync.WaitGroup) {
	for _, v0 := range G.Nodes[node].Inc {
		if !removed[v0] {
			flag := G.Nodes[v0].Player == i
			if atomic.CompareAndSwapUint32(&flags[v0], 0, 1) {
				if flag {
					ch <- v0
					atomic.AddInt32(&tmpMap[v0], 1)
				} else {
					adj_counter := 0
					for _, x := range G.Nodes[v0].Adj {
						if !removed[x] {
							adj_counter += 1
						}
					}
					atomic.AddInt32(&tmpMap[v0], int32(adj_counter))
					if adj_counter == 1 {
						ch <- v0
					}
				}
			} else if !flag {
				if atomic.AddInt32(&tmpMap[v0], -1) == 1 {
					ch <- v0
				}
			}
		}
	}
	wg.Done()
}
示例#10
0
func (o *emptyObject) Release() {
	if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
		return
	}
	o.node.evict()
	o.node = nil
}
示例#11
0
// Implements Reactor
func (bcR *BlockchainReactor) Stop() {
	if atomic.CompareAndSwapUint32(&bcR.running, 1, 0) {
		log.Info("Stopping BlockchainReactor")
		close(bcR.quit)
		bcR.pool.Stop()
	}
}
示例#12
0
// Resume resumes a TBucket in a paused state and begins adding new tokens to
// the bucket again.
//
// Resume returns true if the TBucket has been resumed, or false if the TBucket
// is not in a paused state.
func (tb *TBucket) Resume() bool {
	if tb.IsClosed() || !atomic.CompareAndSwapUint32(&tb.paused, 1, 0) {
		return false
	}
	tb.prch <- struct{}{}
	return true
}
示例#13
0
// Close stops the internal ticker that adds tokens. The TBucket instance is now
// permanently closed and cannpt be reopened. When the TBucket will no longer be
// used, this function must be called to stop the internal timer from continuing
// to fire.
//
// It returns true if the TBucket has been closed, or false if the TBucket has
// already been closed.
func (tb *TBucket) Close() bool {
	if !atomic.CompareAndSwapUint32(&tb.closed, 0, 1) {
		return false
	}
	tb.cch <- struct{}{}
	return true
}
示例#14
0
文件: tlsproxy.go 项目: koding/koding
func (p *Proxy) Close() error {
	if atomic.CompareAndSwapUint32(&p.closed, 0, 1) {
		return p.listener.Close()
	}

	return nil
}
示例#15
0
func (dc *DatabaseContext) TakeDbOffline(reason string) error {
	base.LogTo("CRUD", "Taking Database : %v, offline", dc.Name)
	dbState := atomic.LoadUint32(&dc.State)
	//If the DB is already trasitioning to: offline or is offline silently return
	if dbState == DBOffline || dbState == DBResyncing || dbState == DBStopping {
		return nil
	}

	if atomic.CompareAndSwapUint32(&dc.State, DBOnline, DBStopping) {

		//notify all active _changes feeds to close
		close(dc.ExitChanges)

		base.LogTo("CRUD", "Waiting for all active calls to complete on Database : %v", dc.Name)
		//Block until all current calls have returned, including _changes feeds
		dc.AccessLock.Lock()
		defer dc.AccessLock.Unlock()

		base.LogTo("CRUD", "Database : %v, is offline", dc.Name)
		//set DB state to Offline
		atomic.StoreUint32(&dc.State, DBOffline)

		if dc.EventMgr.HasHandlerForEvent(DBStateChange) {
			dc.EventMgr.RaiseDBStateChangeEvent(dc.Name, "offline", reason, *dc.Options.AdminInterface)
		}

		return nil
	} else {
		base.LogTo("CRUD", "Unable to take Database offline, database must be in Online state")
		return base.HTTPErrorf(http.StatusServiceUnavailable, "Unable to take Database offline, database must be in Online state")
	}
}
示例#16
0
func atomics() {
	_ = atomic.LoadUint32(&x)             // ERROR "intrinsic substitution for LoadUint32"
	atomic.StoreUint32(&x, 1)             // ERROR "intrinsic substitution for StoreUint32"
	atomic.AddUint32(&x, 1)               // ERROR "intrinsic substitution for AddUint32"
	atomic.SwapUint32(&x, 1)              // ERROR "intrinsic substitution for SwapUint32"
	atomic.CompareAndSwapUint32(&x, 1, 2) // ERROR "intrinsic substitution for CompareAndSwapUint32"
}
示例#17
0
// RecrusiveRewatch implements RecursiveWatcher interface. It fails:
//
//   * with errNotWatched when the given path is not being watched
//   * with errInvalidEventSet when oldevent does not match the current event set
//   * with errAlreadyWatched when watch-point given by the oldpath was meant to
//     be relocated to newpath, but the newpath is already watched
//   * a non-nil error when setting the watch-point with FSEvents fails
//
// TODO(rjeczalik): Improve handling of watch-point relocation? See two TODOs
// that follows.
func (fse *fsevents) RecursiveRewatch(oldpath, newpath string, oldevent, newevent Event) error {
	switch [2]bool{oldpath == newpath, oldevent == newevent} {
	case [2]bool{true, true}:
		w, ok := fse.watches[oldpath]
		if !ok {
			return errNotWatched
		}
		atomic.StoreInt32(&w.isrec, 1)
		return nil
	case [2]bool{true, false}:
		w, ok := fse.watches[oldpath]
		if !ok {
			return errNotWatched
		}
		if !atomic.CompareAndSwapUint32(&w.events, uint32(oldevent), uint32(newevent)) {
			return errors.New("invalid event state diff")
		}
		atomic.StoreInt32(&w.isrec, 1)
		return nil
	default:
		// TODO(rjeczalik): rewatch newpath only if exists?
		// TODO(rjeczalik): migrate w.prev to new watch?
		if _, ok := fse.watches[newpath]; ok {
			return errAlreadyWatched
		}
		if err := fse.Unwatch(oldpath); err != nil {
			return err
		}
		// TODO(rjeczalik): revert unwatch if watch fails?
		return fse.watch(newpath, newevent, 1)
	}
}
示例#18
0
func (d *spawnDispatch) asyncKill() {
	// There are 3 cases:
	// * If the process has been spawned - kill it
	// * if the process has not been spawned yet - cancel it
	//		It's not our repsonsibility to clean up resources and kill anything
	// * if ctx has been cancelled - exit
	select {
	case pr, ok := <-d.process:
		if !ok {
			// we will not receive the process
			// initialDispatch has closed the channel
			return
		}

		if atomic.CompareAndSwapUint32(d.killed, 0, 1) {
			killMeter.Mark(1)
			if err := pr.Kill(); err != nil {
				d.stream.Error(d.ctx, replyKillError, errKillError, err.Error())
				return
			}

			d.stream.Close(d.ctx, replyKillOk)
		}
	case <-d.ctx.Done():
		// NOTE: should we kill anything here?
	default:
		// cancel spawning process
		spawnCancelMeter.Mark(1)
		d.cancelSpawn()
	}
}
示例#19
0
// Flush the log channels. Concurrent users of the logger should quit before
// Flush() is called to ensure it completes.
func Flush() {
	if atomic.CompareAndSwapUint32(&running, 1, 0) {
		flush(writeCh, writer)
		flush(errorCh, errWriter)
		quitCh <- struct{}{}
	}
}
示例#20
0
文件: agentops.go 项目: nullbus/gocb
func (c *Agent) getAnyReplica(key []byte, cb GetCallback) (PendingOp, error) {
	opRes := &multiPendingOp{}

	var cbCalled uint32
	handler := func(value []byte, flags uint32, cas Cas, err error) {
		if atomic.CompareAndSwapUint32(&cbCalled, 0, 1) {
			// Cancel all other commands if possible.
			opRes.Cancel()
			// Dispatch Callback
			cb(value, flags, cas, err)
		}
	}

	// Dispatch a getReplica for each replica server
	numReplicas := c.NumReplicas()
	for repIdx := 1; repIdx <= numReplicas; repIdx++ {
		op, err := c.getOneReplica(key, repIdx, handler)
		if err == nil {
			opRes.ops = append(opRes.ops, op)
		}
	}

	// If we have no pending ops, no requests were successful
	if len(opRes.ops) == 0 {
		return nil, &agentError{"No replicas available"}
	}

	return opRes, nil
}
示例#21
0
// read and check the mask bit, if not set then set with mask
func (e *edgeConn) bitwiseCompareAndSet(mask uint32) bool {
	c := atomic.LoadUint32(&e.closed)
	if c&mask == 0 {
		return atomic.CompareAndSwapUint32(&e.closed, c, c|mask)
	}
	return false
}
示例#22
0
func (cu *CUClient) heartTask() {
	defer func() {
		if r := recover(); r != nil {
			cu.handleError(base.DTerror{"heartTask Panic"})
		}
	}()

	baseHD := new(base.BaseHeader)
	baseHD.CommandId = base.HEART_CMD
	msg := baseHD.Encode()

	var failCount int = 0

	for {

		if atomic.CompareAndSwapUint32(&cu.Valid, 0, 0) {
			break
		}
		cu.connSocket.Write(msg.Bytes())
		select {
		case <-cu.heartChn:
			failCount = 0
		case <-time.After(10 * time.Second):
			failCount++
		}
		if failCount >= 3 {
			cu.handleError(base.DTerror{"CU Heart"})
			break
		}
		time.Sleep(10 * time.Second)
	}
}
示例#23
0
func (c *Cond) signalImpl(all bool) { // 通知的具体实现,all表示是否通知所有的等待者
	c.checker.check() // 检查Cond没有被拷贝
	if raceenabled {
		raceDisable()
	}
	for {
		old := atomic.LoadUint32(&c.waiters) // 查看有多少人在等待该条件
		if old == 0 {                        // 如果没人等待,直接返回
			if raceenabled {
				raceEnable()
			}
			return
		}
		new := old - 1
		if all {
			new = 0
		}
		if atomic.CompareAndSwapUint32(&c.waiters, old, new) {
			if raceenabled {
				raceEnable()
			}
			runtime_Syncsemrelease(&c.sema, old-new) // 设置唤醒多少个
			return
		}
	}
}
示例#24
0
func (s *IDGenerator) GetStream() (int, bool) {
	// based closely on the java-driver stream ID generator
	// avoid false sharing subsequent requests.
	offset := atomic.LoadUint32(&s.offset)
	for !atomic.CompareAndSwapUint32(&s.offset, offset, (offset+1)%s.numBuckets) {
		offset = atomic.LoadUint32(&s.offset)
	}
	offset = (offset + 1) % s.numBuckets

	for i := uint32(0); i < s.numBuckets; i++ {
		pos := int((i + offset) % s.numBuckets)

		bucket := atomic.LoadUint64(&s.streams[pos])
		if bucket == math.MaxUint64 {
			// all streams in use
			continue
		}

		for j := 0; j < bucketBits; j++ {
			mask := uint64(1 << streamOffset(j))
			if bucket&mask == 0 {
				if atomic.CompareAndSwapUint64(&s.streams[pos], bucket, bucket|mask) {
					atomic.AddInt32(&s.inuseStreams, 1)
					return streamFromBucket(int(pos), j), true
				}
				bucket = atomic.LoadUint64(&s.streams[offset])
			}
		}
	}

	return 0, false
}
示例#25
0
// .Start() begins multiplexing packets to and from "channels".
func (c *MConnection) Start() {
	if atomic.CompareAndSwapUint32(&c.started, 0, 1) {
		log.Debug("Starting MConnection", "connection", c)
		go c.sendRoutine()
		go c.recvRoutine()
	}
}
示例#26
0
文件: cond.go 项目: sreis/go
func (c *Cond) signalImpl(all bool) {
	c.checker.check()
	if race.Enabled {
		race.Disable()
	}
	for {
		old := atomic.LoadUint32(&c.waiters)
		if old == 0 {
			if race.Enabled {
				race.Enable()
			}
			return
		}
		new := old - 1
		if all {
			new = 0
		}
		if atomic.CompareAndSwapUint32(&c.waiters, old, new) {
			if race.Enabled {
				race.Enable()
			}
			runtime_Syncsemrelease(&c.sema, old-new)
			return
		}
	}
}
示例#27
0
func (h *lruHandle) Release() {
	if !atomic.CompareAndSwapUint32(&h.once, 0, 1) {
		return
	}
	h.node.deref()
	h.node = nil
}
示例#28
0
// Implements Reactor
func (pexR *PEXReactor) Start(sw *Switch) {
	if atomic.CompareAndSwapUint32(&pexR.started, 0, 1) {
		log.Info("Starting PEXReactor")
		pexR.sw = sw
		go pexR.ensurePeersRoutine()
	}
}
示例#29
0
// Implements Reactor
func (conR *ConsensusReactor) Stop() {
	if atomic.CompareAndSwapUint32(&conR.running, 1, 0) {
		log.Info("Stopping ConsensusReactor")
		conR.conS.Stop()
		close(conR.quit)
	}
}
示例#30
0
// RenewWhenExpires renews the token before it expires.
func (t *TokenRenewer) RenewWhenExpires() {
	if atomic.CompareAndSwapUint32(&t.active, 0, 1) {
		t.client.OnConnect(t.startRenewLoop)
		t.client.OnTokenExpire(t.sendRenewTokenSignal)
		t.client.OnDisconnect(t.sendDisconnectSignal)
	}
}