Пример #1
0
func TestRefreshNoChange(t *testing.T) {
	var (
		addr      = &net.SRV{Target: "my-target", Port: 5678}
		addrs     = []*net.SRV{addr}
		name      = "my-name"
		ticker    = time.NewTicker(time.Second)
		lookups   = uint32(0)
		lookupSRV = func(string, string, string) (string, []*net.SRV, error) {
			atomic.AddUint32(&lookups, 1)
			return "", addrs, nil
		}
		e       = func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil }
		factory = func(string) (endpoint.Endpoint, io.Closer, error) { return e, nil, nil }
		logger  = log.NewNopLogger()
	)

	ticker.Stop()
	tickc := make(chan time.Time)
	ticker.C = tickc

	p := NewPublisherDetailed(name, ticker, lookupSRV, factory, logger)
	defer p.Stop()

	if want, have := uint32(1), atomic.LoadUint32(&lookups); want != have {
		t.Errorf("want %d, have %d", want, have)
	}

	tickc <- time.Now()

	if want, have := uint32(2), atomic.LoadUint32(&lookups); want != have {
		t.Errorf("want %d, have %d", want, have)
	}
}
Пример #2
0
func testThrottling(t *testing.T, protocol int) {
	// Create a long block chain to download and the tester
	targetBlocks := 8 * blockCacheLimit
	hashes, blocks := makeChain(targetBlocks, 0, genesis)

	tester := newTester()
	tester.newPeer("peer", protocol, hashes, blocks)

	// Wrap the importer to allow stepping
	blocked, proceed := uint32(0), make(chan struct{})
	tester.downloader.chainInsertHook = func(blocks []*Block) {
		atomic.StoreUint32(&blocked, uint32(len(blocks)))
		<-proceed
	}
	// Start a synchronisation concurrently
	errc := make(chan error)
	go func() {
		errc <- tester.sync("peer", nil)
	}()
	// Iteratively take some blocks, always checking the retrieval count
	for {
		// Check the retrieval count synchronously (! reason for this ugly block)
		tester.lock.RLock()
		retrieved := len(tester.ownBlocks)
		tester.lock.RUnlock()
		if retrieved >= targetBlocks+1 {
			break
		}
		// Wait a bit for sync to throttle itself
		var cached int
		for start := time.Now(); time.Since(start) < time.Second; {
			time.Sleep(25 * time.Millisecond)

			tester.downloader.queue.lock.RLock()
			cached = len(tester.downloader.queue.blockPool)
			tester.downloader.queue.lock.RUnlock()

			if cached == blockCacheLimit || len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) == targetBlocks+1 {
				break
			}
		}
		// Make sure we filled up the cache, then exhaust it
		time.Sleep(25 * time.Millisecond) // give it a chance to screw up
		if cached != blockCacheLimit && len(tester.ownBlocks)+cached+int(atomic.LoadUint32(&blocked)) != targetBlocks+1 {
			t.Fatalf("block count mismatch: have %v, want %v (owned %v, target %v)", cached, blockCacheLimit, len(tester.ownBlocks), targetBlocks+1)
		}
		// Permit the blocked blocks to import
		if atomic.LoadUint32(&blocked) > 0 {
			atomic.StoreUint32(&blocked, uint32(0))
			proceed <- struct{}{}
		}
	}
	// Check that we haven't pulled more blocks than available
	if len(tester.ownBlocks) > targetBlocks+1 {
		t.Fatalf("target block count mismatch: have %v, want %v", len(tester.ownBlocks), targetBlocks+1)
	}
	if err := <-errc; err != nil {
		t.Fatalf("block synchronization failed: %v", err)
	}
}
Пример #3
0
// Release the peer connection certifying that we are done with it.
func (p *PeerResponse) Release() {
	ref := atomic.LoadUint32(&p.peer.refCount)
	// Decrement ownership
	for ref > 0 && atomic.CompareAndSwapUint32(&p.peer.refCount, ref, ref-1) == false {
		ref = atomic.LoadUint32(&p.peer.refCount)
	}
}
Пример #4
0
// Test that Pool does not hold pointers to previously cached
// resources
func TestPoolGC(t *testing.T) {
	var p Pool
	var fin uint32
	const N = 100
	for i := 0; i < N; i++ {
		v := new(int)
		runtime.SetFinalizer(v, func(vv *int) {
			atomic.AddUint32(&fin, 1)
		})
		p.Put(v)
	}
	for i := 0; i < N; i++ {
		p.Get()
	}
	for i := 0; i < 5; i++ {
		runtime.GC()
		time.Sleep(time.Millisecond)
		// 2 pointers can remain on stack or elsewhere
		if atomic.LoadUint32(&fin) >= N-2 {
			return
		}
	}
	t.Fatalf("only %v out of %v resources are finalized",
		atomic.LoadUint32(&fin), N)
}
Пример #5
0
// save stat
func (p *Whisper) doCheckpoint() {
	updateOperations := atomic.LoadUint32(&p.updateOperations)
	commitedPoints := atomic.LoadUint32(&p.commitedPoints)
	atomic.AddUint32(&p.updateOperations, -updateOperations)
	atomic.AddUint32(&p.commitedPoints, -commitedPoints)

	created := atomic.LoadUint32(&p.created)
	atomic.AddUint32(&p.created, -created)

	logrus.WithFields(logrus.Fields{
		"updateOperations": int(updateOperations),
		"commitedPoints":   int(commitedPoints),
		"created":          int(created),
	}).Info("[persister] doCheckpoint()")

	p.Stat("updateOperations", float64(updateOperations))
	p.Stat("commitedPoints", float64(commitedPoints))
	if updateOperations > 0 {
		p.Stat("pointsPerUpdate", float64(commitedPoints)/float64(updateOperations))
	} else {
		p.Stat("pointsPerUpdate", 0.0)
	}

	p.Stat("created", float64(created))

}
Пример #6
0
func (c *LRUCache) gc() {
	time.Sleep(30 * time.Second)
	ms := new(runtime.MemStats)
	var gcFactor uint32 = 0
	for {
		runtime.ReadMemStats(ms)

		// update gc factor
		if ms.HeapAlloc < c.configuration.size {
			// stop gc only when cache < 90% of cache limit.
			if gcFactor != 0 && ms.HeapAlloc < uint64(0.9*float64(c.configuration.size)) {
				c.setGcFactor(0)
				gcFactor = 0
			}
		} else {
			if gcFactor == 0 || gcFactor != atomic.LoadUint32(&c.gcFactorCfg) {
				gcFactor = atomic.LoadUint32(&c.gcFactorCfg)
				c.setGcFactor(gcFactor)
			}
		}

		// notify gcing
		if gcFactor != 0 && c.configuration.callback != nil {
			c.configuration.callback()
		}

		time.Sleep(10 * time.Second)
	}
}
Пример #7
0
func (s *IDGenerator) GetStream() (int, bool) {
	// based closely on the java-driver stream ID generator
	// avoid false sharing subsequent requests.
	offset := atomic.LoadUint32(&s.offset)
	for !atomic.CompareAndSwapUint32(&s.offset, offset, (offset+1)%s.numBuckets) {
		offset = atomic.LoadUint32(&s.offset)
	}
	offset = (offset + 1) % s.numBuckets

	for i := uint32(0); i < s.numBuckets; i++ {
		pos := int((i + offset) % s.numBuckets)

		bucket := atomic.LoadUint64(&s.streams[pos])
		if bucket == math.MaxUint64 {
			// all streams in use
			continue
		}

		for j := 0; j < bucketBits; j++ {
			mask := uint64(1 << streamOffset(j))
			if bucket&mask == 0 {
				if atomic.CompareAndSwapUint64(&s.streams[pos], bucket, bucket|mask) {
					atomic.AddInt32(&s.inuseStreams, 1)
					return streamFromBucket(int(pos), j), true
				}
				bucket = atomic.LoadUint64(&s.streams[offset])
			}
		}
	}

	return 0, false
}
Пример #8
0
func (t *lockstepBusySim) loop(e *entity) {
	lastnotify := t.notify
	t.wg.Done()
	runtime.Gosched()
	for {
		notify := atomic.LoadUint32(&t.notify)
		backoff := 0
		for notify == lastnotify {
			if backoff < 5 {
				runtime.Gosched()
			} else {
				time.Sleep(1000)
			}
			backoff++
			notify = atomic.LoadUint32(&t.notify)
		}
		lastnotify = notify
		fn := t.state
		if fn == nil {
			t.wg.Done()
			break
		}
		fn(e)
		t.wg.Done()
	}
}
Пример #9
0
// Establishes a new socket connection to the 9P server and creates
// a client object for it. Negotiates the dialect and msize for the
// connection. Returns a Clnt object, or Error.
func Connect(c net.Conn, msize uint32, dotu bool) (*Clnt, error) {
	clnt := NewClnt(c, msize, dotu)
	ver := "9P2000"
	if clnt.Dotu {
		ver = "9P2000.u"
	}

	clntmsize := atomic.LoadUint32(&clnt.Msize)
	tc := ninep.NewFcall(clntmsize)
	err := ninep.PackTversion(tc, clntmsize, ver)
	if err != nil {
		return nil, err
	}

	rc, err := clnt.Rpc(tc)
	if err != nil {
		return nil, err
	}

	if rc.Msize < atomic.LoadUint32(&clnt.Msize) {
		atomic.StoreUint32(&clnt.Msize, rc.Msize)
	}

	clnt.Dotu = rc.Version == "9P2000.u" && clnt.Dotu
	return clnt, nil
}
Пример #10
0
// Test that Pool does not hold pointers to previously cached
// resources
func TestPoolGC(t *testing.T) {
	var p Pool
	var fin uint32
	const N = 100
	for i := 0; i < N; i++ {
		v := new(string)
		runtime.SetFinalizer(v, func(vv *string) {
			atomic.AddUint32(&fin, 1)
		})
		p.Put(v)
	}
	for i := 0; i < N; i++ {
		p.Get()
	}
	for i := 0; i < 5; i++ {
		runtime.GC()
		time.Sleep(time.Millisecond)
		// 1 pointer can remain on stack or elsewhere
		if atomic.LoadUint32(&fin) >= N-1 {
			return
		}

		// gccgo has a less precise heap.
		if runtime.Compiler == "gccgo" && atomic.LoadUint32(&fin) >= N-5 {
			return
		}
	}
	t.Fatalf("only %v out of %v resources are finalized",
		atomic.LoadUint32(&fin), N)
}
Пример #11
0
// TestV2NoRetryEOF tests destructive api calls won't retry on a disconnection.
func TestV2NoRetryEOF(t *testing.T) {
	defer testutil.AfterTest(t)
	// generate an EOF response; specify address so appears first in sorted ep list
	lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("eof:123.%d.sock", os.Getpid()))
	defer lEOF.Close()
	tries := uint32(0)
	go func() {
		for {
			conn, err := lEOF.Accept()
			if err != nil {
				return
			}
			atomic.AddUint32(&tries, 1)
			conn.Close()
		}
	}()
	eofURL := integration.UrlScheme + "://" + lEOF.Addr().String()
	cli := integration.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil)
	kapi := client.NewKeysAPI(cli)
	for i, f := range noRetryList(kapi) {
		startTries := atomic.LoadUint32(&tries)
		if err := f(); err == nil {
			t.Errorf("#%d: expected EOF error, got nil", i)
		}
		endTries := atomic.LoadUint32(&tries)
		if startTries+1 != endTries {
			t.Errorf("#%d: expected 1 try, got %d", i, endTries-startTries)
		}
	}
}
Пример #12
0
func getAllCallbackGauges() ([]IntMetric, []FloatMetric) {
	numIDs := int(atomic.LoadUint32(curIntCbID))
	retint := make([]IntMetric, numIDs)

	for i := 0; i < numIDs; i++ {
		retint[i] = IntMetric{
			Name: intcbnames[i],
			Val:  intcallbacks[i](),
			Tgs:  intcbtags[i],
		}
	}

	numIDs = int(atomic.LoadUint32(curFloatCbID))
	retfloat := make([]FloatMetric, numIDs)

	for i := 0; i < numIDs; i++ {
		retfloat[i] = FloatMetric{
			Name: floatcbnames[i],
			Val:  floatcallbacks[i](),
			Tgs:  floatcbtags[i],
		}
	}

	return retint, retfloat
}
Пример #13
0
// Tests that fast sync gets disabled as soon as a real block is successfully
// imported into the blockchain.
func TestFastSyncDisabling(t *testing.T) {
	// Create a pristine protocol manager, check that fast sync is left enabled
	pmEmpty := newTestProtocolManagerMust(t, true, 0, nil, nil)
	if atomic.LoadUint32(&pmEmpty.fastSync) == 0 {
		t.Fatalf("fast sync disabled on pristine blockchain")
	}
	// Create a full protocol manager, check that fast sync gets disabled
	pmFull := newTestProtocolManagerMust(t, true, 1024, nil, nil)
	if atomic.LoadUint32(&pmFull.fastSync) == 1 {
		t.Fatalf("fast sync not disabled on non-empty blockchain")
	}
	// Sync up the two peers
	io1, io2 := p2p.MsgPipe()

	go pmFull.handle(pmFull.newPeer(63, p2p.NewPeer(discover.NodeID{}, "empty", nil), io2))
	go pmEmpty.handle(pmEmpty.newPeer(63, p2p.NewPeer(discover.NodeID{}, "full", nil), io1))

	time.Sleep(250 * time.Millisecond)
	pmEmpty.synchronise(pmEmpty.peers.BestPeer())

	// Check that fast sync was disabled
	if atomic.LoadUint32(&pmEmpty.fastSync) == 1 {
		t.Fatalf("fast sync not disabled after successful synchronisation")
	}
}
Пример #14
0
// synchronise tries to sync up our local block chain with a remote peer.
func (pm *ProtocolManager) synchronise(peer *peer) {
	// Short circuit if no peers are available
	if peer == nil {
		return
	}
	// Make sure the peer's TD is higher than our own
	currentBlock := pm.blockchain.CurrentBlock()
	td := pm.blockchain.GetTd(currentBlock.Hash())

	pHead, pTd := peer.Head()
	if pTd.Cmp(td) <= 0 {
		return
	}
	// Otherwise try to sync with the downloader
	mode := downloader.FullSync
	if atomic.LoadUint32(&pm.fastSync) == 1 {
		mode = downloader.FastSync
	}
	if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
		return
	}
	atomic.StoreUint32(&pm.synced, 1) // Mark initial sync done

	// If fast sync was enabled, and we synced up, disable it
	if atomic.LoadUint32(&pm.fastSync) == 1 {
		// Disable fast sync if we indeed have something in our chain
		if pm.blockchain.CurrentBlock().NumberU64() > 0 {
			glog.V(logger.Info).Infof("fast sync complete, auto disabling")
			atomic.StoreUint32(&pm.fastSync, 0)
		}
	}
}
Пример #15
0
// Test that Pool does not hold pointers to previously cached
// resources
func TestPoolGC(t *testing.T) {
	var p Pool
	var fin uint32
	const N = 100
	for i := 0; i < N; i++ {
		v := new(string)
		runtime.SetFinalizer(v, func(vv *string) {
			atomic.AddUint32(&fin, 1)
		})
		p.Put(uint32(i), v)
	}
	for i := 0; i < N; i++ {
		p.Get(uint32(i))
	}
	for i := 0; i < 5; i++ {
		runtime.GC()
		time.Sleep(time.Duration(i*100+10) * time.Millisecond)
		// 1 pointer can remain on stack or elsewhere
		if atomic.LoadUint32(&fin) >= N-1 {
			return
		}
	}
	t.Fatalf("only %v out of %v resources are finalized",
		atomic.LoadUint32(&fin), N)
}
Пример #16
0
// Trap sets up a simplified signal "trap", appropriate for common
// behavior expected from a vanilla unix command-line tool in general
// (and the Docker engine in particular).
//
// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
// * If SIGINT or SIGTERM are repeated 3 times before cleanup is complete, then cleanup is
// skipped and the process terminated directly.
// * If "DEBUG" is set in the environment, SIGQUIT causes an exit without cleanup.
//
func Trap(cleanup func()) {
	c := make(chan os.Signal, 1)
	signals := []os.Signal{os.Interrupt, syscall.SIGTERM}
	if os.Getenv("DEBUG") == "" {
		signals = append(signals, syscall.SIGQUIT)
	}
	gosignal.Notify(c, signals...)
	go func() {
		interruptCount := uint32(0)
		for sig := range c {
			go func(sig os.Signal) {
				log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
				switch sig {
				case os.Interrupt, syscall.SIGTERM:
					// If the user really wants to interrupt, let him do so.
					if atomic.LoadUint32(&interruptCount) < 3 {
						atomic.AddUint32(&interruptCount, 1)
						// Initiate the cleanup only once
						if atomic.LoadUint32(&interruptCount) == 1 {
							// Call cleanup handler
							cleanup()
							os.Exit(0)
						} else {
							return
						}
					} else {
						log.Printf("Force shutdown of docker, interrupting cleanup\n")
					}
				case syscall.SIGQUIT:
				}
				os.Exit(128 + int(sig.(syscall.Signal)))
			}(sig)
		}
	}()
}
Пример #17
0
func (rcv *UDP) statWorker(exit chan bool) {
	ticker := time.NewTicker(rcv.metricInterval)
	defer ticker.Stop()

	for {
		select {
		case <-ticker.C:
			metricsReceived := atomic.LoadUint32(&rcv.metricsReceived)
			atomic.AddUint32(&rcv.metricsReceived, -metricsReceived)
			rcv.Stat("udp.metricsReceived", float64(metricsReceived))

			incompleteReceived := atomic.LoadUint32(&rcv.incompleteReceived)
			atomic.AddUint32(&rcv.incompleteReceived, -incompleteReceived)
			rcv.Stat("udp.incompleteReceived", float64(incompleteReceived))

			errors := atomic.LoadUint32(&rcv.errors)
			atomic.AddUint32(&rcv.errors, -errors)
			rcv.Stat("udp.errors", float64(errors))

			logrus.WithFields(logrus.Fields{
				"metricsReceived":    int(metricsReceived),
				"incompleteReceived": int(incompleteReceived),
				"errors":             int(errors),
			}).Info("[udp] doCheckpoint()")

		case <-exit:
			rcv.conn.Close()
			return
		}
	}
}
Пример #18
0
func (wrapper *workerWrapper) Loop() {
	tout := time.Duration(5)
	// 我们不能简单检查 job 的channel 是否关闭
	for !wrapper.worker.TunnyReady() {
		if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
			break
		}
		time.Sleep(tout * time.Millisecond)
	}

	wrapper.readyChan <- 1

	for data := range wrapper.jobChan {
		wrapper.outputChan <- wrapper.worker.TunnyJob(data)
		for !wrapper.worker.TunnyReady() {
			if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
				break
			}
			time.Sleep(tout * time.Millisecond)
		}
		wrapper.readyChan <- 1
	}

	close(wrapper.readyChan)
	close(wrapper.outputChan)
}
Пример #19
0
func (wrapper *workerWrapper) Loop() {

	// TODO: Configure?
	tout := time.Duration(5)

	for !wrapper.worker.TunnyReady() {
		// It's sad that we can't simply check if jobChan is closed here.
		if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
			break
		}
		time.Sleep(tout * time.Millisecond)
	}

	wrapper.readyChan <- 1

	for data := range wrapper.jobChan {
		wrapper.outputChan <- wrapper.worker.TunnyJob(data)
		for !wrapper.worker.TunnyReady() {
			if atomic.LoadUint32(&wrapper.poolOpen) == 0 {
				break
			}
			time.Sleep(tout * time.Millisecond)
		}
		wrapper.readyChan <- 1
	}

	close(wrapper.readyChan)
	close(wrapper.outputChan)

}
Пример #20
0
// TestNetworkFailure tests that the connection manager handles a network
// failure gracefully.
func TestNetworkFailure(t *testing.T) {
	var dials uint32
	errDialer := func(network, address string) (net.Conn, error) {
		atomic.AddUint32(&dials, 1)
		return nil, errors.New("network down")
	}
	cmgr, err := New(&Config{
		TargetOutbound: 5,
		RetryDuration:  5 * time.Millisecond,
		Dial:           errDialer,
		GetNewAddress:  func() (string, error) { return "127.0.0.1:18555", nil },
		OnConnection: func(c *ConnReq, conn net.Conn) {
			t.Fatalf("network failure: got unexpected connection - %v", c.Addr)
		},
	})
	if err != nil {
		t.Fatalf("New error: %v", err)
	}
	cmgr.Start()
	time.AfterFunc(10*time.Millisecond, cmgr.Stop)
	cmgr.Wait()
	wantMaxDials := uint32(75)
	if atomic.LoadUint32(&dials) > wantMaxDials {
		t.Fatalf("network failure: unexpected number of dials - got %v, want < %v",
			atomic.LoadUint32(&dials), wantMaxDials)
	}
}
Пример #21
0
func getAllGauges() ([]IntMetric, []FloatMetric) {
	numIDs := int(atomic.LoadUint32(curIntGaugeID))
	retint := make([]IntMetric, numIDs)

	for i := 0; i < numIDs; i++ {
		retint[i] = IntMetric{
			Name: intgnames[i],
			Val:  atomic.LoadUint64(&intgauges[i]),
			Tgs:  intgtags[i],
		}
	}

	numIDs = int(atomic.LoadUint32(curFloatGaugeID))
	retfloat := make([]FloatMetric, numIDs)

	for i := 0; i < numIDs; i++ {
		// The int64 bit pattern of the float value needs to be converted back
		// into a float64 here. This is a literal reinterpretation of the same
		// exact bits.
		intval := atomic.LoadUint64(&floatgauges[i])

		retfloat[i] = FloatMetric{
			Name: floatgnames[i],
			Val:  math.Float64frombits(intval),
			Tgs:  floatgtags[i],
		}
	}

	return retint, retfloat
}
Пример #22
0
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (c *BoundContract) Call(opts *CallOpts, result interface{}, method string, params ...interface{}) error {
	// Don't crash on a lazy user
	if opts == nil {
		opts = new(CallOpts)
	}
	// Make sure we have a contract to operate on, and bail out otherwise
	if (opts.Pending && atomic.LoadUint32(&c.pendingHasCode) == 0) || (!opts.Pending && atomic.LoadUint32(&c.latestHasCode) == 0) {
		if code, err := c.caller.HasCode(c.address, opts.Pending); err != nil {
			return err
		} else if !code {
			return ErrNoCode
		}
		if opts.Pending {
			atomic.StoreUint32(&c.pendingHasCode, 1)
		} else {
			atomic.StoreUint32(&c.latestHasCode, 1)
		}
	}
	// Pack the input, call and unpack the results
	input, err := c.abi.Pack(method, params...)
	if err != nil {
		return err
	}
	output, err := c.caller.ContractCall(c.address, input, opts.Pending)
	if err != nil {
		return err
	}
	return c.abi.Unpack(result, method, output)
}
Пример #23
0
func (this *Connection) flush() {
	current := this.Elapsed()

	if this.State() == StateTerminated {
		return
	}
	if this.State() == StateActive && current-atomic.LoadUint32(&this.lastIncomingTime) >= 30000 {
		this.Close()
	}
	if this.State() == StateReadyToClose && this.sendingWorker.IsEmpty() {
		this.SetState(StateTerminating)
	}

	if this.State() == StateTerminating {
		log.Debug("KCP|Connection: #", this.conv, " sending terminating cmd.")
		seg := NewCmdOnlySegment()
		defer seg.Release()

		seg.Conv = this.conv
		seg.Command = CommandTerminate
		this.output.Write(seg)
		this.output.Flush()

		if current-atomic.LoadUint32(&this.stateBeginTime) > 8000 {
			this.SetState(StateTerminated)
		}
		return
	}
	if this.State() == StatePeerTerminating && current-atomic.LoadUint32(&this.stateBeginTime) > 4000 {
		this.SetState(StateTerminating)
	}

	if this.State() == StateReadyToClose && current-atomic.LoadUint32(&this.stateBeginTime) > 15000 {
		this.SetState(StateTerminating)
	}

	// flush acknowledges
	this.receivingWorker.Flush(current)
	this.sendingWorker.Flush(current)

	if this.sendingWorker.PingNecessary() || this.receivingWorker.PingNecessary() || current-atomic.LoadUint32(&this.lastPingTime) >= 5000 {
		seg := NewCmdOnlySegment()
		seg.Conv = this.conv
		seg.Command = CommandPing
		seg.ReceivinNext = this.receivingWorker.nextNumber
		seg.SendingNext = this.sendingWorker.firstUnacknowledged
		if this.State() == StateReadyToClose {
			seg.Option = SegmentOptionClose
		}
		this.output.Write(seg)
		this.lastPingTime = current
		this.sendingWorker.MarkPingNecessary(false)
		this.receivingWorker.MarkPingNecessary(false)
		seg.Release()
	}

	// flash remain segments
	this.output.Flush()
}
Пример #24
0
func (d *dbStat) MarshalJSON() ([]byte, error) {
	m := map[string]interface{}{}
	m["written"] = atomic.LoadUint64(&d.written)
	m["qlen"] = atomic.LoadUint32(&d.qlen)
	m["opens"] = atomic.LoadUint32(&d.opens)
	m["closes"] = atomic.LoadUint32(&d.closes)
	return json.Marshal(m)
}
Пример #25
0
func Timeout() {
	for {
		select {
		case <-time.After(time.Second * time.Duration(atomic.LoadUint32(&delay))):
			log.Print("Timeout...........", atomic.LoadUint32(&delay))
		}
	}
}
Пример #26
0
// Get returns buffer with length of n.
func (p *BufferPool) Get(n int) []byte {
	atomic.AddUint32(&p.get, 1)

	if poolNum := p.poolNum(n); poolNum == 0 {
		// Fast path.
		if b, ok := p.pool[0].Get().([]byte); ok {
			switch {
			case cap(b) > n:
				atomic.AddUint32(&p.less, 1)
				return b[:n]
			case cap(b) == n:
				atomic.AddUint32(&p.equal, 1)
				return b[:n]
			default:
				panic("not reached")
			}
		} else {
			atomic.AddUint32(&p.miss, 1)
		}

		return make([]byte, n, p.baseline0)
	} else {
		sizePtr := &p.size[poolNum-1]

		if b, ok := p.pool[poolNum].Get().([]byte); ok {
			switch {
			case cap(b) > n:
				atomic.AddUint32(&p.less, 1)
				return b[:n]
			case cap(b) == n:
				atomic.AddUint32(&p.equal, 1)
				return b[:n]
			default:
				atomic.AddUint32(&p.greater, 1)
				if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
					p.pool[poolNum].Put(b)
				}
			}
		} else {
			atomic.AddUint32(&p.miss, 1)
		}

		if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
			if size == 0 {
				atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
			} else {
				sizeMissPtr := &p.sizeMiss[poolNum-1]
				if atomic.AddUint32(sizeMissPtr, 1) == 20 {
					atomic.StoreUint32(sizePtr, uint32(n))
					atomic.StoreUint32(sizeMissPtr, 0)
				}
			}
			return make([]byte, n)
		} else {
			return make([]byte, n, size)
		}
	}
}
Пример #27
0
// Allocate will store the block position of the last allocated number/bit, so
// the search can continue from there. Hopefully this amortizes the O(N) cost of
// scanning the bitmap over time, when allocations and deallocations are
// balanced
func (a *concurrentBitmap) Allocate() (uint64, error) {
	var (
		blocks    = a.mem.Blocks()
		lastBlock = uint32(a.Max() >> 5) // max / 32, each block has 32 bits
		hint      = a.hint
	)
	if r := a.Max() % 32; r != 0 {
		// needs some more bits in an extra block
		lastBlock++
	}
	if hint >= lastBlock {
		hint %= lastBlock
	}

blocks:
	for j, i := uint32(0), hint; j <= lastBlock; i++ {
		j++
		if i >= lastBlock {
			i %= lastBlock
		}
		base := uint64(i) << 5 // i * 32

		block := atomic.LoadUint32(&blocks[i])
		if block == 0xFFFFFFFF {
			continue // all being used
		}

	retry:
		// try all 32 bits on this block
		for mask, offset := uint32(0x80000000), uint32(0); mask != 0x00000000; mask >>= 1 {
			if i == lastBlock-1 {
				// this is the last block, only try the necessary bits
				if r := uint32(a.Max() % 32); r != 0 && offset >= r {
					break retry
				}
			}
			bitSet := block | mask
			if bitSet == block {
				offset++
				continue retry // bit was already allocated
			}
			if atomic.CompareAndSwapUint32(&blocks[i], block, bitSet) {
				// allocated! start from here next time
				atomic.StoreUint32(&a.hint, i)
				return base + uint64(offset), nil
			} else {
				// block has changed, reload and retry
				block = atomic.LoadUint32(&blocks[i])
				if block == 0xFFFFFFFF {
					continue blocks // all being used
				}
				goto retry
			}
			offset++
		}
	}
	return 0, ErrNoFreeNumber
}
Пример #28
0
// If data size exceeds quota or disk files number exceeds quota,
// then we block the client operations.
func (filter *ProxyFilterImpl) PassFilter(op_code int) bool {
	// When we read state of 'mfblockeded', the state of 'mablocked' may
	// change from 'UNBLOCKED' to 'BLOCKED', so, our implementation only
	// achieves soft limit not hard limit. Since we have over quota storage
	// space settings, this is not a big issue.
	return (op_code != OP_UPDATE && op_code != OP_INSERT) ||
		(atomic.LoadUint32(&filter.mablocked) == UNBLOCKED &&
			atomic.LoadUint32(&filter.mfblocked) == UNBLOCKED)
}
Пример #29
0
Файл: pool.go Проект: ovh/tat
func (p *ConnPool) Stats() *Stats {
	return &Stats{
		Requests:   atomic.LoadUint32(&p.stats.Requests),
		Hits:       atomic.LoadUint32(&p.stats.Hits),
		Timeouts:   atomic.LoadUint32(&p.stats.Timeouts),
		TotalConns: uint32(p.Len()),
		FreeConns:  uint32(p.FreeLen()),
	}
}
Пример #30
0
func (p *ConnPool) Stats() *PoolStats {
	stats := p.stats
	stats.Requests = atomic.LoadUint32(&p.stats.Requests)
	stats.Waits = atomic.LoadUint32(&p.stats.Waits)
	stats.Timeouts = atomic.LoadUint32(&p.stats.Timeouts)
	stats.TotalConns = uint32(p.Len())
	stats.FreeConns = uint32(p.FreeLen())
	return &stats
}