예제 #1
0
파일: reader.go 프로젝트: perryhau/nsq
// update the reader ready state, updating each connection as appropriate
func (q *Reader) SetMaxInFlight(maxInFlight int) {
	if atomic.LoadInt32(&q.stopFlag) == 1 {
		return
	}

	if maxInFlight > MaxReadyCount {
		log.Printf("WARNING: tried to SetMaxInFlight() > %d, truncating...", MaxReadyCount)
		maxInFlight = MaxReadyCount
	}

	if q.maxInFlight == maxInFlight {
		return
	}

	q.maxInFlight = maxInFlight
	for _, c := range q.nsqConnections {
		if atomic.LoadInt32(&c.stopFlag) == 1 {
			continue
		}
		s := q.ConnectionMaxInFlight()
		if q.VerboseLogging {
			log.Printf("[%s] RDY %d", c, s)
		}
		atomic.StoreInt64(&c.rdyCount, int64(s))
		err := c.sendCommand(Ready(s))
		if err != nil {
			handleError(q, c, fmt.Sprintf("[%s] error reading response %s", c, err.Error()))
		}
	}
}
예제 #2
0
// Ensure that the mock's Tick channel sends at the correct time.
func TestMock_Tick(t *testing.T) {
	var n int32
	clock := NewMock()

	// Create a channel to increment every 10 seconds.
	go func() {
		tick := clock.Tick(10 * time.Second)
		for {
			<-tick
			atomic.AddInt32(&n, 1)
		}
	}()
	gosched()

	// Move clock forward to just before the first tick.
	clock.Add(9 * time.Second)
	if atomic.LoadInt32(&n) != 0 {
		t.Fatalf("expected 0, got %d", n)
	}

	// Move clock forward to the start of the first tick.
	clock.Add(1 * time.Second)
	if atomic.LoadInt32(&n) != 1 {
		t.Fatalf("expected 1, got %d", n)
	}

	// Move clock forward over several ticks.
	clock.Add(30 * time.Second)
	if atomic.LoadInt32(&n) != 4 {
		t.Fatalf("expected 4, got %d", n)
	}
}
예제 #3
0
// ResetElectionTimeoutMs sets the minimum and maximum election timeouts to the
// passed values, and returns the old values.
func ResetElectionTimeoutMs(newMin, newMax int) (int, int) {
	oldMin := atomic.LoadInt32(&minimumElectionTimeoutMs)
	oldMax := atomic.LoadInt32(&maximumElectionTimeoutMs)
	atomic.StoreInt32(&minimumElectionTimeoutMs, int32(newMin))
	atomic.StoreInt32(&maximumElectionTimeoutMs, int32(newMax))
	return int(oldMin), int(oldMax)
}
예제 #4
0
파일: handshake.go 프로젝트: yuewko/coredns
// checkLimitsForObtainingNewCerts checks to see if name can be issued right
// now according to mitigating factors we keep track of and preferences the
// user has set. If a non-nil error is returned, do not issue a new certificate
// for name.
func checkLimitsForObtainingNewCerts(name string) error {
	// User can set hard limit for number of certs for the process to issue
	if onDemandMaxIssue > 0 && atomic.LoadInt32(OnDemandIssuedCount) >= onDemandMaxIssue {
		return fmt.Errorf("%s: maximum certificates issued (%d)", name, onDemandMaxIssue)
	}

	// Make sure name hasn't failed a challenge recently
	failedIssuanceMu.RLock()
	when, ok := failedIssuance[name]
	failedIssuanceMu.RUnlock()
	if ok {
		return fmt.Errorf("%s: throttled; refusing to issue cert since last attempt on %s failed", name, when.String())
	}

	// Make sure, if we've issued a few certificates already, that we haven't
	// issued any recently
	lastIssueTimeMu.Lock()
	since := time.Since(lastIssueTime)
	lastIssueTimeMu.Unlock()
	if atomic.LoadInt32(OnDemandIssuedCount) >= 10 && since < 10*time.Minute {
		return fmt.Errorf("%s: throttled; last certificate was obtained %v ago", name, since)
	}

	// 👍Good to go
	return nil
}
예제 #5
0
파일: consumer.go 프로젝트: pgpst/pgpst
// ConnectToNSQLookupd adds an nsqlookupd address to the list for this Consumer instance.
//
// If it is the first to be added, it initiates an HTTP request to discover nsqd
// producers for the configured topic.
//
// A goroutine is spawned to handle continual polling.
func (r *Consumer) ConnectToNSQLookupd(addr string) error {
	if atomic.LoadInt32(&r.stopFlag) == 1 {
		return errors.New("consumer stopped")
	}
	if atomic.LoadInt32(&r.runningHandlers) == 0 {
		return errors.New("no handlers")
	}

	if err := validatedLookupAddr(addr); err != nil {
		return err
	}

	atomic.StoreInt32(&r.connectedFlag, 1)

	r.mtx.Lock()
	for _, x := range r.lookupdHTTPAddrs {
		if x == addr {
			r.mtx.Unlock()
			return nil
		}
	}
	r.lookupdHTTPAddrs = append(r.lookupdHTTPAddrs, addr)
	numLookupd := len(r.lookupdHTTPAddrs)
	r.mtx.Unlock()

	// if this is the first one, kick off the go loop
	if numLookupd == 1 {
		r.queryLookupd()
		r.wg.Add(1)
		go r.lookupdLoop()
	}

	return nil
}
예제 #6
0
파일: node.go 프로젝트: flike/kingshard
func (n *Node) checkSlave() {
	n.RLock()
	if n.Slave == nil {
		n.RUnlock()
		return
	}
	slaves := make([]*DB, len(n.Slave))
	copy(slaves, n.Slave)
	n.RUnlock()

	for i := 0; i < len(slaves); i++ {
		if err := slaves[i].Ping(); err != nil {
			golog.Error("Node", "checkSlave", "Ping", 0, "db.Addr", slaves[i].Addr(), "error", err.Error())
		} else {
			if atomic.LoadInt32(&(slaves[i].state)) == Down {
				golog.Info("Node", "checkSlave", "Slave up", 0, "db.Addr", slaves[i].Addr())
				n.UpSlave(slaves[i].addr)
			}
			slaves[i].SetLastPing()
			if atomic.LoadInt32(&(slaves[i].state)) != ManualDown {
				atomic.StoreInt32(&(slaves[i].state), Up)
			}
			continue
		}

		if int64(n.DownAfterNoAlive) > 0 && time.Now().Unix()-slaves[i].GetLastPing() > int64(n.DownAfterNoAlive/time.Second) {
			golog.Info("Node", "checkSlave", "Slave down", 0,
				"db.Addr", slaves[i].Addr(),
				"slave_down_time", int64(n.DownAfterNoAlive/time.Second))
			//If can't ping slave after DownAfterNoAlive, set slave Down
			n.DownSlave(slaves[i].addr, Down)
		}
	}

}
예제 #7
0
func (r *RouteFetcher) startEventCycle() {
	go func() {
		useCachedToken := true
		for {
			r.logger.Debug("fetching-token")
			token, err := r.UaaClient.FetchToken(useCachedToken)
			if err != nil {
				metrics.IncrementCounter(TokenFetchErrors)
				r.logger.Error("failed-to-fetch-token", err)
			} else {
				r.logger.Debug("token-fetched-successfully")
				if atomic.LoadInt32(&r.stopEventSource) == 1 {
					return
				}
				err = r.subscribeToEvents(token)
				if err != nil && err.Error() == "unauthorized" {
					useCachedToken = false
				} else {
					useCachedToken = true
				}
				if atomic.LoadInt32(&r.stopEventSource) == 1 {
					return
				}
				time.Sleep(time.Duration(r.SubscriptionRetryIntervalInSeconds) * time.Second)
			}
		}
	}()
}
예제 #8
0
파일: elevator.go 프로젝트: fordaz/katas
func (elevator *Elevator) animate() {
	i := 0
	for elevator.getNumScheduledStops() > 0 && i < 100000 {
		fmt.Printf("  Moving %s, current floor %d\n", elevator.Direction, *elevator.CurrentFloor)
		if elevator.Direction == Up {
			if atomic.LoadInt32(elevator.CurrentFloor) < int32(elevator.numFloors-1) {
				atomic.AddInt32(elevator.CurrentFloor, 1)
				time.Sleep(10 * time.Millisecond)
			}
		}
		if elevator.Direction == Down {
			if atomic.LoadInt32(elevator.CurrentFloor) > int32(0) {
				atomic.AddInt32(elevator.CurrentFloor, -1)
				time.Sleep(10 * time.Millisecond)
			}
		}
		if elevator.scheduledStops[*elevator.CurrentFloor] == 1 {
			fmt.Printf("Visiting floor %d\n", *elevator.CurrentFloor)
			elevator.descheduleStop(*elevator.CurrentFloor)
		}
		i = i + 1
	}
	if i >= 100000 {
		panic("Something is really wrong, elevator went cuckoo")
	}

	elevator.stopElevator()
}
예제 #9
0
파일: debug.go 프로젝트: jdhenke/jdh
// prints message every second until returned thunk is executed.
// useful for determining what function calls are hanging.
func CheckHang(message string, args ...interface{}) func() {
	done := int32(0)
	go func() {

		// do nothing if done is called quickly; reduces printed messages
		time.Sleep(1000 * time.Millisecond)
		if atomic.LoadInt32(&done) != 0 {
			return
		}

		// loop until done, printing message at each interval
		for atomic.LoadInt32(&done) == 0 {
			fmt.Printf(message, args...)
			fmt.Printf("\t There are %v live goroutines.\n", runtime.NumGoroutine())
			time.Sleep(1000 * time.Millisecond)
		}

	}()

	// thunk to be called by client when done
	return func() {
		atomic.StoreInt32(&done, 1)
	}

}
예제 #10
0
// String implements fmt.Stringer.
func (p *peer) String() string {
	return fmt.Sprintf("Peer %s [%s]", p.id,
		fmt.Sprintf("reputation %3d, ", atomic.LoadInt32(&p.rep))+
			fmt.Sprintf("capacity %3d, ", atomic.LoadInt32(&p.capacity))+
			fmt.Sprintf("ignored %4d", p.ignored.Size()),
	)
}
예제 #11
0
파일: app_test.go 프로젝트: nedmax/tsuru
func (s *S) TestRemoveUnits(c *gocheck.C) {
	var calls int32
	ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		v := atomic.LoadInt32(&calls)
		atomic.StoreInt32(&calls, v+1)
		w.WriteHeader(http.StatusNoContent)
	}))
	srvc := service.Service{Name: "mysql", Endpoint: map[string]string{"production": ts.URL}}
	err := srvc.Create()
	c.Assert(err, gocheck.IsNil)
	defer s.conn.Services().Remove(bson.M{"_id": "mysql"})
	app := App{
		Name:      "chemistry",
		Framework: "python",
	}
	instance := service.ServiceInstance{
		Name:        "my-inst",
		ServiceName: "mysql",
		Teams:       []string{s.team.Name},
		Apps:        []string{app.Name},
	}
	instance.Create()
	defer s.conn.ServiceInstances().Remove(bson.M{"name": "my-inst"})
	err = s.conn.Apps().Insert(app)
	c.Assert(err, gocheck.IsNil)
	defer s.conn.Apps().Remove(bson.M{"name": app.Name})
	c.Assert(err, gocheck.IsNil)
	s.provisioner.Provision(&app)
	defer s.provisioner.Destroy(&app)
	app.AddUnits(4)
	otherApp := App{Name: app.Name, Units: app.Units}
	err = otherApp.RemoveUnits(2)
	c.Assert(err, gocheck.IsNil)
	ts.Close()
	units := s.provisioner.GetUnits(&app)
	c.Assert(units, gocheck.HasLen, 3) // when you provision you already have one, so it's 4+1-2 (in provisioner, in app struct we have 2)
	c.Assert(units[0].Name, gocheck.Equals, "chemistry/0")
	c.Assert(units[1].Name, gocheck.Equals, "chemistry/3")
	c.Assert(units[2].Name, gocheck.Equals, "chemistry/4")
	err = app.Get()
	c.Assert(err, gocheck.IsNil)
	c.Assert(app.Framework, gocheck.Equals, "python")
	c.Assert(app.Units, gocheck.HasLen, 2)
	c.Assert(app.Units[0].Name, gocheck.Equals, "chemistry/3")
	c.Assert(app.Units[1].Name, gocheck.Equals, "chemistry/4")
	ok := make(chan int8)
	go func() {
		for _ = range time.Tick(1e3) {
			if atomic.LoadInt32(&calls) == 2 {
				ok <- 1
				return
			}
		}
	}()
	select {
	case <-ok:
	case <-time.After(2e9):
		c.Fatal("Did not call service endpoint twice.")
	}
}
예제 #12
0
파일: gostrich.go 프로젝트: xianxu/gostrich
// returns ticks of current and previous seconds
func (t *QpsTracker) Ticks() (c1, e1, c2, e2 int32) {
	c1 = atomic.LoadInt32(&t.c[int((atomic.LoadInt64(&t.active)+int64(1))%2)])
	e1 = atomic.LoadInt32(&t.e[int((atomic.LoadInt64(&t.active)+int64(1))%2)])
	c2 = atomic.LoadInt32(&t.c[int((atomic.LoadInt64(&t.active))%2)])
	e2 = atomic.LoadInt32(&t.e[int((atomic.LoadInt64(&t.active))%2)])
	return
}
예제 #13
0
func (t *testCourse) Elect(k icarus.LoginSession) (bool, error) {
	conc := atomic.AddInt32(&t.concurrent, 1)
	if conc > 1 && t.noConcurrent {
		log.Fatalf("2 goroutines electing at the same time!")
	}
	defer atomic.AddInt32(&t.concurrent, -1)

	if t.forbidden {
		log.Fatalf("Entered forbidden elect function.")
	}

	log.Printf("Electing...Remaining %d, Elected %d", t.remaining, t.elected)
	if reflect.TypeOf(k).Name() != "string" {
		log.Fatalf("Wrong type of login session.")
	}
	if k.(string) != session {
		log.Fatalf("Wrong value of login session.")
	}

	atomic.AddInt32(&t.elected, 1)
	time.Sleep(time.Duration(rand.Float32()*3000) * time.Millisecond)
	if atomic.LoadInt32(&t.errorToMake) > 0 {
		atomic.AddInt32(&t.errorToMake, -1)
		return false, errors.New("False Alarm")
	} else {
		if atomic.LoadInt32(&t.remaining) > 0 {
			atomic.AddInt32(&t.remaining, -1)
			return false, nil
		} else {
			log.Println("Elected!")
			return true, nil
		}
	}
}
예제 #14
0
파일: syncdebug.go 프로젝트: edrex-duex/go4
func (m *RWMutexTracker) RLock() {
	m.logOnce.Do(m.startLogger)
	atomic.AddInt32(&m.nwaitr, 1)

	// Catch read-write-read lock. See if somebody (us? via
	// another goroutine?) already has a read lock, and then
	// somebody else is waiting to write, meaning our second read
	// will deadlock.
	if atomic.LoadInt32(&m.nhaver) > 0 && atomic.LoadInt32(&m.nwaitw) > 0 {
		buf := getBuf()
		buf = buf[:runtime.Stack(buf, false)]
		log.Printf("Potential R-W-R deadlock at: %s", buf)
		putBuf(buf)
	}

	m.mu.RLock()
	atomic.AddInt32(&m.nwaitr, -1)
	atomic.AddInt32(&m.nhaver, 1)

	gid := GoroutineID()
	m.hmu.Lock()
	defer m.hmu.Unlock()
	if m.holdr == nil {
		m.holdr = make(map[int64]bool)
	}
	if m.holdr[gid] {
		buf := getBuf()
		buf = buf[:runtime.Stack(buf, false)]
		log.Fatalf("Recursive call to RLock: %s", buf)
	}
	m.holdr[gid] = true
}
예제 #15
0
파일: reader.go 프로젝트: SohoStudio/nsq
func (q *Reader) ConnectToNSQ(addr string) error {
	if atomic.LoadInt32(&q.stopFlag) == 1 {
		return errors.New("reader stopped")
	}

	if atomic.LoadInt32(&q.runningHandlers) == 0 {
		return errors.New("no handlers")
	}

	_, ok := q.nsqConnections[addr]
	if ok {
		return ErrAlreadyConnected
	}

	log.Printf("[%s] connecting to nsqd", addr)

	connection, err := newNSQConn(addr, q.ReadTimeout, q.WriteTimeout)
	if err != nil {
		return err
	}

	err = connection.sendCommand(Subscribe(q.TopicName, q.ChannelName, q.ShortIdentifier, q.LongIdentifier))
	if err != nil {
		connection.Close()
		return fmt.Errorf("[%s] failed to subscribe to %s:%s - %s", q.TopicName, q.ChannelName, err.Error())
	}

	q.nsqConnections[connection.String()] = connection

	go q.readLoop(connection)
	go q.finishLoop(connection)

	return nil
}
예제 #16
0
/**
 * Returns true if this map contains no key-value mappings.
 */
func (this *ConcurrentMap) IsEmpty() bool {
	segments := this.segments
	/*
	 * if any segment count isn't zero, Map will be no empty.
	 * 检查是否每个segment的count是否为0,并记录modCount和总和
	 */
	mc := make([]int32, len(segments))
	var mcsum int32 = 0
	for i := 0; i < len(segments); i++ {
		if atomic.LoadInt32(&segments[i].count) != 0 {
			return false
		} else {
			mc[i] = atomic.LoadInt32(&segments[i].modCount)
			mcsum += mc[i]
		}
	}

	/*
	 * if mcsum isn't zero, then modification is made,
	 * we will check per-segments count and if modCount be modified
	 * to avoid ABA problems in which an element in one segment was added and
	 * in another removed during traversal
	 */
	if mcsum != 0 {
		for i := 0; i < len(segments); i++ {
			if atomic.LoadInt32(&segments[i].count) != 0 || mc[i] != atomic.LoadInt32(&segments[i].modCount) {
				return false
			}
		}
	}
	return true
}
예제 #17
0
func (c *counter) increment() {
	cas := atomic.CompareAndSwapInt32
	old := atomic.LoadInt32(&c.val)
	for swp := cas(&c.val, old, old+1); !swp; swp = cas(&c.val, old, old+1) {
		old = atomic.LoadInt32(&c.val)
	}
}
예제 #18
0
func (c *Client) publishMessage(subject string) {
	var err error
	var guid string

	c.delayPublish()

	if trace {
		log.Printf("%s: Sending message %d to %s.\n", c.clientID,
			atomic.LoadInt32(&c.publishCount), subject)
	}

	if c.config.PubAsync {
		guid, err = c.sc.PublishAsync(subject, c.payload, c.ah)
	} else {
		err = c.sc.Publish(subject, c.payload)
	}
	if err != nil {
		log.Fatalf("%s: Error publishing: %v.\n", c.clientID, err)
	}

	if trace {
		if guid == "" {
			guid = "N/A"
		}
		log.Printf("%s: Success sending %d (guid:%s) to %s.\n", c.clientID,
			atomic.LoadInt32(&c.publishCount), guid, subject)
	}
	atomic.AddInt32(&c.publishCount, 1)
}
예제 #19
0
파일: node.go 프로젝트: flike/kingshard
func (n *Node) checkMaster() {
	db := n.Master
	if db == nil {
		golog.Error("Node", "checkMaster", "Master is no alive", 0)
		return
	}

	if err := db.Ping(); err != nil {
		golog.Error("Node", "checkMaster", "Ping", 0, "db.Addr", db.Addr(), "error", err.Error())
	} else {
		if atomic.LoadInt32(&(db.state)) == Down {
			golog.Info("Node", "checkMaster", "Master up", 0, "db.Addr", db.Addr())
			n.UpMaster(db.addr)
		}
		db.SetLastPing()
		if atomic.LoadInt32(&(db.state)) != ManualDown {
			atomic.StoreInt32(&(db.state), Up)
		}
		return
	}

	if int64(n.DownAfterNoAlive) > 0 && time.Now().Unix()-db.GetLastPing() > int64(n.DownAfterNoAlive/time.Second) {
		golog.Info("Node", "checkMaster", "Master down", 0,
			"db.Addr", db.Addr(),
			"Master_down_time", int64(n.DownAfterNoAlive/time.Second))
		n.DownMaster(db.addr, Down)
	}
}
예제 #20
0
func TestPeriodicallyPassiveUnregister(t *testing.T) {
	t.Parallel()

	stopch := make(chan bool)

	timesrc := mkSimTime()
	qt := newPeriodicallyInt(timesrc, 0, 1)
	defer qt.Stop()

	ran := int32(0)
	qt.Register(stopch, func(time.Time) bool {
		if atomic.LoadInt32(&ran) == 0 {
			close(stopch)
		}
		atomic.AddInt32(&ran, 1)
		return true
	})

	for i := 0; i < 5; i++ {
		timesrc.ch <- time.Now()
	}

	qt.Stop()
	<-timesrc.stopped

	// It's possible to get a run or two in before closing, but we
	// shouldn't see more than two because there's only one
	// worker.  That means one check with no worker, and one check
	// while the worker is busy.  Beyond that, either the workers
	// are all busy or we've detected it missing.
	if atomic.LoadInt32(&ran) > 2 {
		t.Fatalf("Ticker did not update expected number of times: %v", ran)
	}
}
예제 #21
0
파일: stats.go 프로젝트: lucmichalski/fae
func (this *stats) run() {
	ticker := time.NewTicker(time.Second)
	defer ticker.Stop()

	t1 := time.Now()

	var lastCalls int64
	for _ = range ticker.C {
		if neatStat {
			log.Printf("c:%6d qps:%20s errs:%10s",
				Concurrency,
				gofmt.Comma(this.callOk-lastCalls),
				gofmt.Comma(this.callErrs))
		} else {
			log.Printf("%s c:%d sessions:%s calls:%s qps:%s errs:%s conns:%d go:%d",
				time.Since(t1),
				Concurrency,
				gofmt.Comma(int64(atomic.LoadInt32(&this.sessionN))),
				gofmt.Comma(atomic.LoadInt64(&this.callOk)),
				gofmt.Comma(this.callOk-lastCalls),
				gofmt.Comma(this.callErrs),
				atomic.LoadInt32(&this.concurrentN),
				runtime.NumGoroutine())
		}

		lastCalls = this.callOk
	}

}
예제 #22
0
// @rest GET /v1/status
func (this *manServer) statusHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	log.Info("status %s(%s)", r.RemoteAddr, getHttpRemoteIp(r))

	output := make(map[string]interface{})
	output["options"] = Options
	output["loglevel"] = logLevel.String()
	output["manager"] = manager.Default.Dump()
	pubConns := int(atomic.LoadInt32(&this.gw.pubServer.activeConnN))
	subConns := int(atomic.LoadInt32(&this.gw.subServer.activeConnN))
	output["pubconn"] = strconv.Itoa(pubConns)
	output["subconn"] = strconv.Itoa(subConns)
	output["hh_appends"] = strconv.FormatInt(hh.Default.AppendN(), 10)
	output["hh_delivers"] = strconv.FormatInt(hh.Default.DeliverN(), 10)
	output["goroutines"] = strconv.Itoa(runtime.NumGoroutine())

	var mem runtime.MemStats
	runtime.ReadMemStats(&mem)
	output["heap"] = gofmt.ByteSize(mem.HeapSys).String()
	output["objects"] = gofmt.Comma(int64(mem.HeapObjects))

	b, err := json.MarshalIndent(output, "", "    ")
	if err != nil {
		log.Error("%s(%s) %v", r.RemoteAddr, getHttpRemoteIp(r), err)
	}

	w.Write(b)
}
예제 #23
0
파일: client.go 프로젝트: clsung/gnatsd
// Lock should be held
func (c *client) initClient() {
	s := c.srv
	c.cid = atomic.AddUint64(&s.gcid, 1)
	c.bw = bufio.NewWriterSize(c.nc, startBufSize)
	c.subs = make(map[string]*subscription)
	c.debug = (atomic.LoadInt32(&debug) != 0)
	c.trace = (atomic.LoadInt32(&trace) != 0)

	// This is a scratch buffer used for processMsg()
	// The msg header starts with "MSG ",
	// in bytes that is [77 83 71 32].
	c.msgb = [msgScratchSize]byte{77, 83, 71, 32}

	// This is to track pending clients that have data to be flushed
	// after we process inbound msgs from our own connection.
	c.pcd = make(map[*client]struct{})

	// snapshot the string version of the connection
	conn := "-"
	if ip, ok := c.nc.(*net.TCPConn); ok {
		addr := ip.RemoteAddr().(*net.TCPAddr)
		conn = fmt.Sprintf("%s:%d", addr.IP, addr.Port)
	}

	switch c.typ {
	case CLIENT:
		c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid)
	case ROUTER:
		c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid)
	}
}
예제 #24
0
파일: raft_test.go 프로젝트: joelvim/sensit
func TestRaft_ApplyConcurrent_Timeout(t *testing.T) {
	// Make the cluster
	conf := inmemConfig(t)
	conf.HeartbeatTimeout = 80 * time.Millisecond
	conf.ElectionTimeout = 80 * time.Millisecond
	c := MakeCluster(1, t, conf)
	defer c.Close()

	// Wait for a leader
	leader := c.Leader()

	// Enough enqueues should cause at least one timeout...
	var didTimeout int32 = 0
	for i := 0; (i < 500) && (atomic.LoadInt32(&didTimeout) == 0); i++ {
		go func(i int) {
			future := leader.Apply([]byte(fmt.Sprintf("test%d", i)), time.Microsecond)
			if future.Error() == ErrEnqueueTimeout {
				atomic.StoreInt32(&didTimeout, 1)
			}
		}(i)
		// give the leaderloop some otherthings to do in order to increase the odds of a timeout
		if i%5 == 0 {
			leader.VerifyLeader()
		}
	}

	// Wait
	time.Sleep(20 * time.Millisecond)

	// Some should have failed
	if atomic.LoadInt32(&didTimeout) == 0 {
		t.Fatalf("expected a timeout")
	}
}
예제 #25
0
func TestPoll(t *testing.T) {
	invocations := 0
	f := ConditionFunc(func() (bool, error) {
		invocations++
		return true, nil
	})
	fp := fakePoller{max: 1}
	if err := pollInternal(fp.GetWaitFunc(time.Microsecond, time.Microsecond), f); err != nil {
		t.Fatalf("unexpected error %v", err)
	}
	if invocations != 1 {
		t.Errorf("Expected exactly one invocation, got %d", invocations)
	}
	used := atomic.LoadInt32(&fp.used)
	if used != 1 {
		t.Errorf("Expected exactly one tick, got %d", used)
	}

	expectedError := errors.New("Expected error")
	f = ConditionFunc(func() (bool, error) {
		return false, expectedError
	})
	fp = fakePoller{max: 1}
	if err := pollInternal(fp.GetWaitFunc(time.Microsecond, time.Microsecond), f); err == nil || err != expectedError {
		t.Fatalf("Expected error %v, got none %v", expectedError, err)
	}
	if invocations != 1 {
		t.Errorf("Expected exactly one invocation, got %d", invocations)
	}
	used = atomic.LoadInt32(&fp.used)
	if used != 1 {
		t.Errorf("Expected exactly one tick, got %d", used)
	}
}
예제 #26
0
// Add 添加 delta,对于 WaitGroup 的 counter 来说,它可能为负数。
// 若 counter 变为零,在 Wait() 被释放后所有Go程就会阻塞。
// 若 counter 变为负数,Add 就会引发Panic。
//
// 注意,当 counter 为零时,用正整数的 delta 调用它必须发生在调用 Wait 之前。
// 用负整数的 delta 调用它,或在 counter 大于零时开始用正整数的 delta 调用它,
// 那么它可以在任何时候发生。
// 一般来说,这意味着对 Add 的调用应当在该语句创建Go程,或等待其它事件之前执行。
// 具体见 WaitGroup 的示例。
func (wg *WaitGroup) Add(delta int) {
	if raceenabled {
		_ = wg.m.state // trigger nil deref early
		if delta < 0 {
			// Synchronize decrements with Wait.
			raceReleaseMerge(unsafe.Pointer(wg))
		}
		raceDisable()
		defer raceEnable()
	}
	v := atomic.AddInt32(&wg.counter, int32(delta))
	if raceenabled {
		if delta > 0 && v == int32(delta) {
			// The first increment must be synchronized with Wait.
			// Need to model this as a read, because there can be
			// several concurrent wg.counter transitions from 0.
			raceRead(unsafe.Pointer(&wg.sema))
		}
	}
	if v < 0 {
		panic("sync: negative WaitGroup counter")
	}
	if v > 0 || atomic.LoadInt32(&wg.waiters) == 0 {
		return
	}
	wg.m.Lock()
	if atomic.LoadInt32(&wg.counter) == 0 {
		for i := int32(0); i < wg.waiters; i++ {
			runtime_Semrelease(wg.sema)
		}
		wg.waiters = 0
		wg.sema = nil
	}
	wg.m.Unlock()
}
예제 #27
0
// Ensure that the mock's Ticker can be stopped.
func TestMock_Ticker_Stop(t *testing.T) {
	var n int32
	clock := NewMock()

	// Create a channel to increment every second.
	ticker := clock.Ticker(1 * time.Second)
	go func() {
		for {
			<-ticker.C
			atomic.AddInt32(&n, 1)
		}
	}()
	gosched()

	// Move clock forward.
	clock.Add(5 * time.Second)
	if atomic.LoadInt32(&n) != 5 {
		t.Fatalf("expected 5, got: %d", n)
	}

	ticker.Stop()

	// Move clock forward again.
	clock.Add(5 * time.Second)
	if atomic.LoadInt32(&n) != 5 {
		t.Fatalf("still expected 5, got: %d", n)
	}
}
예제 #28
0
// lazyinit sets up all required file descriptors and starts 1+consumersCount
// goroutines. The producer goroutine blocks until file-system notifications
// occur. Then, all events are read from system buffer and sent to consumer
// goroutines which construct valid notify events. This method uses
// Double-Checked Locking optimization.
func (i *inotify) lazyinit() error {
	if atomic.LoadInt32(&i.fd) == invalidDescriptor {
		i.Lock()
		defer i.Unlock()
		if atomic.LoadInt32(&i.fd) == invalidDescriptor {
			fd, err := syscall.InotifyInit()
			if err != nil {
				return err
			}
			i.fd = int32(fd)
			if err = i.epollinit(); err != nil {
				_, _ = i.epollclose(), syscall.Close(int(fd)) // Ignore errors.
				i.fd = invalidDescriptor
				return err
			}
			esch := make(chan []*event)
			go i.loop(esch)
			i.wg.Add(consumersCount)
			for n := 0; n < consumersCount; n++ {
				go i.send(esch)
			}
		}
	}
	return nil
}
예제 #29
0
파일: client_v2.go 프로젝트: kzvezdarov/nsq
func (c *ClientV2) Stats() ClientStats {
	c.RLock()
	// TODO: deprecated, remove in 1.0
	name := c.ClientID

	clientId := c.ClientID
	hostname := c.Hostname
	userAgent := c.UserAgent
	c.RUnlock()
	return ClientStats{
		// TODO: deprecated, remove in 1.0
		Name: name,

		Version:       "V2",
		RemoteAddress: c.RemoteAddr().String(),
		ClientID:      clientId,
		Hostname:      hostname,
		UserAgent:     userAgent,
		State:         atomic.LoadInt32(&c.State),
		ReadyCount:    atomic.LoadInt64(&c.ReadyCount),
		InFlightCount: atomic.LoadInt64(&c.InFlightCount),
		MessageCount:  atomic.LoadUint64(&c.MessageCount),
		FinishCount:   atomic.LoadUint64(&c.FinishCount),
		RequeueCount:  atomic.LoadUint64(&c.RequeueCount),
		ConnectTime:   c.ConnectTime.Unix(),
		SampleRate:    atomic.LoadInt32(&c.SampleRate),
		TLS:           atomic.LoadInt32(&c.TLS) == 1,
		Deflate:       atomic.LoadInt32(&c.Deflate) == 1,
		Snappy:        atomic.LoadInt32(&c.Snappy) == 1,
	}
}
예제 #30
0
func (dw *DocumentsWriter) subtractFlushedNumDocs(numFlushed int) {
	oldValue := atomic.LoadInt32(&dw.numDocsInRAM)
	for !atomic.CompareAndSwapInt32(&dw.numDocsInRAM, oldValue, oldValue-int32(numFlushed)) {
		oldValue = atomic.LoadInt32(&dw.numDocsInRAM)
	}
	assert(atomic.LoadInt32(&dw.numDocsInRAM) >= 0)
}