Пример #1
0
func (c *ClientV2) IsReadyForMessages() bool {
	if c.Channel.IsPaused() {
		return false
	}

	readyCount := atomic.LoadInt64(&c.ReadyCount)
	errCnt := atomic.LoadInt64(&c.subErrCnt)
	if errCnt > 3 {
		// slow down this client if has some error
		readyCount = 1
	}
	inFlightCount := atomic.LoadInt64(&c.InFlightCount)
	deferCnt := atomic.LoadInt64(&c.DeferredCount)

	if nsqLog.Level() > 1 {
		nsqLog.LogDebugf("[%s] state rdy: %4d inflt: %4d, errCnt: %d",
			c, readyCount, inFlightCount, errCnt)
	}

	// deferCnt should consider as not in flight
	if inFlightCount >= readyCount+deferCnt || readyCount <= 0 {
		return false
	}
	if deferCnt > readyCount*100 || deferCnt > 1000 {
		nsqLog.Infof("[%s] too much deferred message : %v rdy: %4d inflt: %4d",
			c, deferCnt, readyCount, inFlightCount)

		return false
	}
	return true
}
Пример #2
0
// Satisfies the `pipeline.ReportingPlugin` interface to provide sandbox state
// information to the Heka report and dashboard.
func (s *SandboxDecoder) ReportMsg(msg *message.Message) error {
	if s.sb == nil {
		return fmt.Errorf("Decoder is not running")
	}
	s.reportLock.RLock()
	defer s.reportLock.RUnlock()

	message.NewIntField(msg, "Memory", int(s.sb.Usage(TYPE_MEMORY,
		STAT_CURRENT)), "B")
	message.NewIntField(msg, "MaxMemory", int(s.sb.Usage(TYPE_MEMORY,
		STAT_MAXIMUM)), "B")
	message.NewIntField(msg, "MaxInstructions", int(s.sb.Usage(
		TYPE_INSTRUCTIONS, STAT_MAXIMUM)), "count")
	message.NewIntField(msg, "MaxOutput", int(s.sb.Usage(TYPE_OUTPUT,
		STAT_MAXIMUM)), "B")
	message.NewInt64Field(msg, "ProcessMessageCount", atomic.LoadInt64(&s.processMessageCount), "count")
	message.NewInt64Field(msg, "ProcessMessageFailures", atomic.LoadInt64(&s.processMessageFailures), "count")
	message.NewInt64Field(msg, "ProcessMessageSamples", s.processMessageSamples, "count")

	var tmp int64 = 0
	if s.processMessageSamples > 0 {
		tmp = s.processMessageDuration / s.processMessageSamples
	}
	message.NewInt64Field(msg, "ProcessMessageAvgDuration", tmp, "ns")

	return nil
}
Пример #3
0
// logThrottling handles logging of throttled events and has to be started as a
// goroutine. It stops once s.loopStopping is closed.
//
// Logging strategy: Whenever Throttle() is called and returns true, an signal
// is sent to s.throttled. If that happens for the first time, an Error is
// logged that the storage is now throttled. As long as signals continues to be
// sent via s.throttled at least once per minute, nothing else is logged. Once
// no signal has arrived for a minute, an Info is logged that the storage is not
// throttled anymore. This resets things to the initial state, i.e. once a
// signal arrives again, the Error will be logged again.
func (s *memorySeriesStorage) logThrottling() {
	timer := time.NewTimer(time.Minute)
	timer.Stop()

	// Signal exit of the goroutine. Currently only needed by test code.
	defer close(s.logThrottlingStopped)

	for {
		select {
		case <-s.throttled:
			if !timer.Reset(time.Minute) {
				log.
					With("chunksToPersist", s.getNumChunksToPersist()).
					With("maxChunksToPersist", s.maxChunksToPersist).
					With("memoryChunks", atomic.LoadInt64(&numMemChunks)).
					With("maxToleratedMemChunks", int(float64(s.maxMemoryChunks)*toleranceFactorMemChunks)).
					Error("Storage needs throttling. Scrapes and rule evaluations will be skipped.")
			}
		case <-timer.C:
			log.
				With("chunksToPersist", s.getNumChunksToPersist()).
				With("maxChunksToPersist", s.maxChunksToPersist).
				With("memoryChunks", atomic.LoadInt64(&numMemChunks)).
				With("maxToleratedMemChunks", int(float64(s.maxMemoryChunks)*toleranceFactorMemChunks)).
				Info("Storage does not need throttling anymore.")
		case <-s.loopStopping:
			return
		}
	}
}
Пример #4
0
func (k *KafkaInput) ReportMsg(msg *message.Message) error {
	message.NewInt64Field(msg, "ProcessMessageCount",
		atomic.LoadInt64(&k.processMessageCount), "count")
	message.NewInt64Field(msg, "ProcessMessageFailures",
		atomic.LoadInt64(&k.processMessageFailures), "count")
	return nil
}
Пример #5
0
// Stats returns current aggregate statistics about an ongoing or completed run.
// It is safe to call from concurrent goroutines.
func (f *Fetcher) Stats() FetcherStats {
	return FetcherStats{
		ItemsRead:    atomic.LoadInt64(&f.itemsRead),
		BytesRead:    atomic.LoadInt64(&f.bytesRead),
		CapacityUsed: float64(atomic.LoadInt64(&f.capacityUsed)) / 10,
	}
}
Пример #6
0
// GetServerStatus represents a tracker status request
func GetServerStatus() ServerStatus {
	// Get system hostname
	hostname, _ := os.Hostname()

	// Get current memory profile
	mem := &runtime.MemStats{}
	runtime.ReadMemStats(mem)

	// Report memory usage in MB
	memMb := float64((float64(mem.Alloc) / 1000) / 1000)

	// Build status struct
	status := ServerStatus{
		os.Getpid(),
		hostname,
		runtime.GOOS,
		runtime.GOARCH,
		runtime.NumCPU(),
		runtime.NumGoroutine(),
		memMb,
		atomic.LoadInt64(&Static.HTTP.Total),
		atomic.LoadInt64(&Static.HTTP.Current),
	}

	// Return status struct
	return status
}
Пример #7
0
func statusHandler(w http.ResponseWriter, r *http.Request) {
	if r.Method != "GET" {
		http.Error(w, "Method Not Allowed", 405)
		return
	}
	stat := statusInfo{
		AppStartTime:  startTime,
		Connections:   getLoginCount(),
		Querys:        getQueryCount(false),
		QuerysPerSec:  atomic.LoadInt64(&querysPerSec),
		Recvs:         getRecvCount(false),
		RecvsPerSec:   atomic.LoadInt64(&recvsPerSec),
		ConnConfig:    int64(_Count),
		ConnStepOn:    int64(atomic.LoadInt64(&connIndex)),
		Communicating: 1 == atomic.LoadInt32(&allGo),
	}

	body, err := json.MarshalIndent(stat, "", "\t")

	if err != nil {
		glog.Errorf("Failed to Marshal, content: %v, error: %v\n", stat, err)
		w.WriteHeader(500)
		return
	}

	w.Header().Add("Content-Type", "application/json;charset=UTF-8")
	if _, err := w.Write(body); err != nil {
		glog.Errorf("w.Write(\"%s\") error(%v)\n", string(body), err)
	}
}
Пример #8
0
// returns ticks of current and previous seconds
func (t *QpsTracker) Ticks() (c1, e1, c2, e2 int32) {
	c1 = atomic.LoadInt32(&t.c[int((atomic.LoadInt64(&t.active)+int64(1))%2)])
	e1 = atomic.LoadInt32(&t.e[int((atomic.LoadInt64(&t.active)+int64(1))%2)])
	c2 = atomic.LoadInt32(&t.c[int((atomic.LoadInt64(&t.active))%2)])
	e2 = atomic.LoadInt32(&t.e[int((atomic.LoadInt64(&t.active))%2)])
	return
}
Пример #9
0
//
// for test only, run GC for profile GC in huge map
func (eng *Engine) gcRunner(interval int) {
	tk := time.NewTicker(time.Duration(interval) * time.Second)
	defer tk.Stop()
	var pre, cnt int64
	var iopre, iocnt int64
	var rwpre, rwcnt int64
	var norepeatgc bool
	for {
		//
		cnt = atomic.LoadInt64(eng.aliveCount)
		iocnt = atomic.LoadInt64(eng.newCount)
		rwcnt = atomic.LoadInt64(eng.rwCount)
		if cnt > 0 {
			if eng.conf.gctest && (pre != cnt || iopre != iocnt || rwpre != rwcnt) {
				fmt.Printf("GC with %d connections.\n", cnt)
				runtime.GC()
				//fmt.Printf("GC done.\n")
				pre = cnt
			}
			norepeatgc = false
		} else if norepeatgc == false {
			// even if eng.conf.gctest == false, still call FreeOSMemory when connections == 0
			norepeatgc = true
			fmt.Printf("FreeOSMemory with %d connections.\n", cnt)
			// free memory
			debug.FreeOSMemory()
			//fmt.Printf("FreeOSMemory done.\n")
			pre = cnt
		}
		<-tk.C
	}
}
Пример #10
0
func (r *Retryable) tryWithTimeout() error {
	errors := &multierror.Error{}
	errChan := make(chan error, r.maxAttemptTimes)
	timer := time.NewTimer(r.maxDelay)
	count := r.maxAttemptTimes

	go func() {
		for atomic.LoadInt64(&count) > 0 {
			atomic.AddInt64(&count, -1)
			errChan <- r.f()
			r.wait()
		}
	}()

	for {
		select {
		case err := <-errChan:
			errors = multierror.Append(errors, err)

			if err == nil {
				atomic.StoreInt64(&count, 0)
				return nil
			}

			if atomic.LoadInt64(&count) <= 0 {
				return errors.ErrorOrNil()
			}
		case <-timer.C:
			return ErrTimeout
		}
	}
}
Пример #11
0
// Accepted checks if query at this moment should be accepted or rejected.
// If accepted, the EMA rate limiter updates its current EMA
func (e *emaRateLimiter) Accepted() bool {
	now := time.Now().UnixNano()
	instWaiting := now - atomic.LoadInt64(&e.timeOfLastRequest)
	for {
		avgWaitingNs := atomic.LoadInt64(&e.avgWaitingNs)
		newavgWaitingNs := int64((1.-wq)*float64(avgWaitingNs) + wq*float64(instWaiting))
		// glog.V(3).Infof("avgWaitingNs %d newavgWaitingNs %d", avgWaitingNs, newavgWaitingNs)
		if newavgWaitingNs < e.targetWaitingNs {
			atomic.AddInt64(&e.requestThrottledCount, 1)
			return false
		}
		// if(pendingRequests.size()>maxPendingQueueLength) {
		// pendingTooLongDiscarded.incrementAndGet();
		// return false;
		// }
		atomic.StoreInt64(&e.timeOfLastRequest, now)
		newavgWaitingNs2 := newavgWaitingNs
		if !atomic.CompareAndSwapInt64(&e.avgWaitingNs, avgWaitingNs, newavgWaitingNs) {
			continue
		}
		if newavgWaitingNs2 < e.minWaitingNs {
			e.minWaitingNs = newavgWaitingNs2
		}
		atomic.AddInt64(&e.requestAcceptedCount, 1)
		break

	}
	return true
}
Пример #12
0
// Get will return the next available resource. If none is available, and capacity
// has not been reached, it will create a new one using the factory. Otherwise,
// it will return nil with no error.
func (self *RoundRobin) TryGet() (resource Resource, err error) {
	self.mu.RLock()
	defer self.mu.RUnlock()
	idleTimeout := time.Duration(atomic.LoadInt64(&self.idleTimeout))
	for {
		select {
		case fw := <-self.resources:
			if idleTimeout > 0 && fw.timeUsed.Add(idleTimeout).Sub(time.Now()) < 0 {
				fw.resource.Close()
				atomic.AddInt64(&self.size, -1)
				continue
			}
			return fw.resource, nil
		default:
			if atomic.LoadInt64(&self.size) >= int64(cap(self.resources)) {
				return nil, nil
			}
			// Prevent thundering herd: optimistically increment
			// size before creating resource
			atomic.AddInt64(&self.size, 1)
			if resource, err = self.factory(); err != nil {
				atomic.AddInt64(&self.size, -1)
			}
			return resource, err
		}
	}
	panic("unreachable")
}
Пример #13
0
// request a token; returns true if token obtained, false otherwise
func (tbq *TBucketQ) GetTok() bool {
	// attempt to obtain token from bucket
	for {
		if toks := atomic.LoadInt64(&tbq.tokens); toks > 0 {
			if atomic.CompareAndSwapInt64(&tbq.tokens, toks, toks-1) {
				return true
			}
			continue
		}
		break
	}
	// no tokens in the bucket, attempt to get on the queue
	var done bool
	for !done {
		if qcnt := atomic.LoadInt64(&tbq.qcnt); qcnt < tbq.maxq {
			done = atomic.CompareAndSwapInt64(&tbq.qcnt, qcnt, qcnt+1)
		} else {
			// queue is full, return false
			return false
		}
	}
	// on queue, wait until token received
	<-tbq.qch
	return true
}
Пример #14
0
func (input *S3OffsetInput) ReportMsg(msg *message.Message) error {
	message.NewInt64Field(msg, "ProcessMessageCount", atomic.LoadInt64(&input.processMessageCount), "count")
	message.NewInt64Field(msg, "ProcessMessageFailures", atomic.LoadInt64(&input.processMessageFailures), "count")
	message.NewInt64Field(msg, "ProcessMessageBytes", atomic.LoadInt64(&input.processMessageBytes), "B")

	return nil
}
Пример #15
0
func getStatus(w http.ResponseWriter, r *http.Request) {
	status := make(map[string]interface{})

	sessionMut.Lock()
	status["numSessions"] = len(sessions)
	sessionMut.Unlock()
	status["numConnections"] = atomic.LoadInt64(&numConnections)
	status["numProxies"] = atomic.LoadInt64(&numProxies)
	status["bytesProxied"] = atomic.LoadInt64(&bytesProxied)
	status["goVersion"] = runtime.Version()
	status["goOS"] = runtime.GOOS
	status["goAarch"] = runtime.GOARCH
	status["goMaxProcs"] = runtime.GOMAXPROCS(-1)
	status["kbps10s1m5m15m30m60m"] = []int64{
		rc.rate(10/10) * 8 / 1000,
		rc.rate(60/10) * 8 / 1000,
		rc.rate(5*60/10) * 8 / 1000,
		rc.rate(15*60/10) * 8 / 1000,
		rc.rate(30*60/10) * 8 / 1000,
		rc.rate(60*60/10) * 8 / 1000,
	}

	bs, err := json.MarshalIndent(status, "", "    ")
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	w.Header().Set("Content-Type", "application/json")
	w.Write(bs)
}
Пример #16
0
func (eng *Engine) stat() {
	tk := time.NewTicker(time.Duration(eng.conf.interval) * time.Second)
	defer tk.Stop()
	var preTs = time.Now()

	var pre, cnt int64
	var iopre, iocnt int64
	var rwpre, rwcnt int64

	for {
		<-tk.C
		//
		cnt = atomic.LoadInt64(eng.aliveCount)
		iocnt = atomic.LoadInt64(eng.newCount)
		rwcnt = atomic.LoadInt64(eng.rwCount)
		if pre != cnt || iopre != iocnt || rwpre != rwcnt {
			//
			esp := time.Now().Sub(preTs)
			if esp <= 0 {
				esp = 1
			}
			qps := float64((cnt-pre)*int64(time.Second)) / float64(esp)
			ioqps := float64((iocnt-iopre)*int64(time.Second)) / float64(esp)
			rwqps := float64((rwcnt-rwpre)*int64(time.Second)) / float64(esp)
			fmt.Printf("concurrent %d/%d, esp %v, connection change %f/s, new connection %f/s, read-write %f/s.\n", cnt, eng.conf.maxConcurrent, esp, qps, ioqps, rwqps)
			pre = cnt
			iopre = iocnt
			rwpre = rwcnt
			preTs = time.Now()
		}
	}
}
Пример #17
0
func (w *WorkPool) work(id int) {
	go func() {
		cur := atomic.LoadInt64(&w.currentWorkers)
		maxo := atomic.LoadInt64(&w.MaxWorkers)

		if cur > maxo {
			atomic.StoreInt64(&w.MaxWorkers, cur)
		}

		//decrement that an addition is done
		atomic.AddInt64(&w.updatePending, -1)
	worker:
		for {
			select {
			case do := <-w.tasks:
				atomic.AddInt64(&w.activeWork, 1)
				w.execute(id, do)
				atomic.AddInt64(&w.executedWork, 1)
				atomic.AddInt64(&w.activeWork, -1)
			case <-w.kill:
				break worker
			}
		}

		atomic.AddInt64(&w.currentWorkers, -1)
		//decrement that the removal is done
		atomic.AddInt64(&w.updatePending, -1)
		w.man.Done()
	}()
}
Пример #18
0
func TestRoutinePool(t *testing.T) {
	tt := testing2.Wrap(t)

	var done int64
	var jobs int64
	pool := New(func(job Job) {
		time.Sleep(1 * time.Millisecond)
		atomic.AddInt64(&done, 1)

	}, 20, 20, 0)
	for i := 0; i < 10; i++ {
		go func(i int) {
			for j := 0; j < 1000; j++ {
				atomic.AddInt64(&jobs, 1)
				tt.True(pool.Do(i*10 + j))
			}
		}(i)
	}

	time.Sleep(2 * time.Second)
	pool.Close()
	time.Sleep(2 * time.Second)
	t.Log(pool.Info())
	t.Log(atomic.LoadInt64(&jobs) - atomic.LoadInt64(&done))

	tt.False(pool.Do(123))
}
Пример #19
0
// Returns true if the bucket is closed.
func (b *Buckets) maybeQuiesce(name string) bool {
	bucket := b.Get(name)
	if bucket == nil {
		return true
	}

	lb, ok := bucket.(*livebucket)
	if !ok {
		b.Close(name, false)
		return true
	}

	if atomic.LoadInt64(&lb.observers) > 0 {
		return false
	}
	val := atomic.LoadInt64(&lb.activity)
	if val > 0 {
		atomic.AddInt64(&lb.activity, -val)
		return false
	}

	log.Printf("quiescing bucket: %v", name)
	defaultEventManager.sendEvent(name, "state",
		map[string]interface{}{"state": "quiesced"})
	b.Close(name, false)
	return true
}
Пример #20
0
func (s *Service) Stats() ServiceStat {
	s.Lock()
	defer s.Unlock()

	stats := ServiceStat{
		Name:          s.Name,
		Addr:          s.Addr,
		VirtualHosts:  s.VirtualHosts,
		Balance:       s.Balance,
		CheckInterval: s.CheckInterval,
		Fall:          s.Fall,
		Rise:          s.Rise,
		ClientTimeout: int(s.ClientTimeout / time.Millisecond),
		ServerTimeout: int(s.ServerTimeout / time.Millisecond),
		DialTimeout:   int(s.DialTimeout / time.Millisecond),
		HTTPConns:     s.HTTPConns,
		HTTPErrors:    s.HTTPErrors,
		HTTPActive:    atomic.LoadInt64(&s.HTTPActive),
		Rcvd:          atomic.LoadInt64(&s.Rcvd),
		Sent:          atomic.LoadInt64(&s.Sent),
	}

	for _, b := range s.Backends {
		stats.Backends = append(stats.Backends, b.Stats())
		stats.Sent += b.Sent
		stats.Rcvd += b.Rcvd
		stats.Errors += b.Errors
		stats.Conns += b.Conns
		stats.Active += b.Active
	}

	return stats
}
Пример #21
0
// Hearbeating to ensure our connection ngrokd is still live
func (c *ClientModel) heartbeat(lastPongAddr *int64, conn conn.Conn) {
	lastPing := time.Unix(atomic.LoadInt64(lastPongAddr)-1, 0)
	ping := time.NewTicker(pingInterval)
	pongCheck := time.NewTicker(time.Second)

	defer func() {
		conn.Close()
		ping.Stop()
		pongCheck.Stop()
	}()

	for {
		select {
		case <-pongCheck.C:
			lastPong := time.Unix(0, atomic.LoadInt64(lastPongAddr))
			needPong := lastPong.Sub(lastPing) < 0
			pongLatency := time.Since(lastPing)

			if needPong && pongLatency > maxPongLatency {
				c.Info("Last ping: %v, Last pong: %v", lastPing, lastPong)
				c.Info("Connection stale, haven't gotten PongMsg in %d seconds", int(pongLatency.Seconds()))
				return
			}

		case <-ping.C:
			err := msg.WriteMsg(conn, &msg.Ping{})
			if err != nil {
				conn.Debug("Got error %v when writing PingMsg", err)
				return
			}
			lastPing = time.Now()
		}
	}
}
Пример #22
0
func TestListenerLoader(t *testing.T) {
	listenFrom := &config.ListenFrom{
		ListenAddr:           workarounds.GolangDoesnotAllowPointerToStringLiteral("127.0.0.1:0"),
		ServerAcceptDeadline: workarounds.GolangDoesnotAllowPointerToTimeLiteral(time.Millisecond),
	}
	ctx := context.Background()
	forwardTo := dptest.NewBasicSink()
	listener, err := ListenerLoader(ctx, forwardTo, listenFrom)
	defer listener.Close()
	assert.Equal(t, nil, err, "Should be ok to make")
	defer listener.Close()
	listeningDialAddress := fmt.Sprintf("127.0.0.1:%d", nettest.TCPPort(listener.psocket))
	assert.Equal(t, numStats, len(listener.Stats()), "Should have no stats")
	assert.NoError(t, err, "Should be ok to make")

	conn, err := net.Dial("tcp", listeningDialAddress)
	assert.NoError(t, err, "Should be ok to make")
	assert.Equal(t, int64(0), listener.stats.invalidDatapoints)
	var buf bytes.Buffer
	fmt.Fprintf(&buf, "%s %d %d\n\nINVALIDLINE", "ametric", 2, 2)
	_, err = buf.WriteTo(conn)
	conn.Close()
	assert.Equal(t, nil, err, "Should be ok to write")
	dp := forwardTo.Next()
	assert.Equal(t, "ametric", dp.Metric, "Should be metric")
	i := dp.Value.(datapoint.IntValue).Int()
	assert.Equal(t, int64(2), i, "Should get 2")

	for atomic.LoadInt64(&listener.stats.retriedListenErrors) == 0 {
		time.Sleep(time.Millisecond)
	}
	assert.Equal(t, int64(1), atomic.LoadInt64(&listener.stats.invalidDatapoints))
}
Пример #23
0
// Fill calls fn with the available capacity remaining (capacity-fill) and
// fills the bucket with the number of tokens returned by fn. If the remaining
// capacity is 0, Fill returns 0, nil. If the remaining capacity is < 0, Fill
// returns 0, ErrBucketOverflow.
//
// If fn returns an error, it will be returned by Fill along with the remaining
// capacity.
//
// fn is provided the remaining capacity as a soft maximum, fn is allowed to
// use more than the remaining capacity without incurring spillage, though this
// will cause subsequent calls to Fill to return ErrBucketOverflow until the
// next drain.
//
// If the bucket is closed when Fill is called, fn will not be executed and
// Fill will return with an error.
func (b *Bucket) Fill(fn func(int64) (int64, error)) (int64, error) {
	if b.closed() {
		log.Errorf("trafficshape: fill on closed bucket")
		return 0, errFillClosedBucket
	}

	fill := atomic.LoadInt64(&b.fill)
	capacity := atomic.LoadInt64(&b.capacity)

	switch {
	case fill < capacity:
		log.Debugf("trafficshape: under capacity (%d/%d)", fill, capacity)

		n, err := fn(capacity - fill)
		fill = atomic.AddInt64(&b.fill, n)

		return n, err
	case fill > capacity:
		log.Debugf("trafficshape: bucket overflow (%d/%d)", fill, capacity)

		return 0, ErrBucketOverflow
	}

	log.Debugf("trafficshape: bucket full (%d/%d)", fill, capacity)
	return 0, nil
}
Пример #24
0
func TestCounterSinkEvent(t *testing.T) {
	es := []*event.Event{
		{},
		{},
	}
	ctx := context.Background()
	bs := dptest.NewBasicSink()
	count := &Counter{}
	middleSink := NextWrap(count)(bs)
	go func() {
		// Allow time for us to get in the middle of a call
		time.Sleep(time.Millisecond)
		assert.Equal(t, int64(1), atomic.LoadInt64(&count.CallsInFlight), "After a sleep, should be in flight")
		datas := <-bs.EventsChan
		assert.Equal(t, 2, len(datas), "Original datas should be sent")
	}()
	middleSink.AddEvents(ctx, es)
	assert.Equal(t, int64(0), atomic.LoadInt64(&count.CallsInFlight), "Call is finished")
	assert.Equal(t, int64(0), atomic.LoadInt64(&count.TotalProcessErrors), "No errors so far (see above)")
	assert.Equal(t, numTests, len(count.Stats(map[string]string{})), "Just checking stats len()")

	bs.RetError(errors.New("nope"))
	middleSink.AddEvents(ctx, es)
	assert.Equal(t, int64(1), atomic.LoadInt64(&count.TotalProcessErrors), "Error should be sent through")
}
Пример #25
0
func (node *Node) addChar(c *byte, w *wrk) *Node {
	*w.i = int64(*c - 'a')
	if *w.i < 0 || *w.i > 25 {
		return node
	}
	if w.tmp = (atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&(node.ptrs[*w.i]))))); w.tmp == nil {
		atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer((&node.ptrs[*w.i]))), w.tmp, unsafe.Pointer(newNode()))
		w.mn, w.mx = atomic.LoadInt64(&(node.minIdx)), atomic.LoadInt64(&(node.maxIdx))
		for {
			switch {
			case w.mn > *w.i:
				if !atomic.CompareAndSwapInt64(&(node.minIdx), w.mn, *w.i) {
					w.mn = atomic.LoadInt64(&(node.minIdx))
				} else {
					w.mn = *w.i
				}
			case w.mx < *w.i:
				if !atomic.CompareAndSwapInt64(&(node.maxIdx), w.mx, *w.i) {
					w.mx = atomic.LoadInt64(&(node.maxIdx))
				} else {
					w.mx = *w.i
				}
			default:
				return node.ptrs[*w.i]
			}
		}
	}
	return node.ptrs[*w.i]
}
Пример #26
0
func (bt *BenchmarkTester) Run() error {

	if bt.task == nil {
		panic("BenchmarkTester.testing must be assigned!!!")
	}

	for i := 0; i < bt.concurrentLevel; i++ {
		bt.ready.Add(1)
		bt.finish.Add(1)
		go bt.taskLoop(i)
	}
	bt.ready.Wait()
	close(bt.testStart)

	for i := 0; i < bt.duration; i++ {
		now := time.Now().UnixNano()
		count1 := atomic.LoadInt64(&bt.Count)
		time.Sleep(time.Second)
		dlt := time.Now().UnixNano() - now
		count2 := atomic.LoadInt64(&bt.Count)
		if dlt <= 0 {
			dlt = 1
		}
		fmt.Printf("At %dth second : %d ops/s\n", i+1, (count2-count1)*int64(time.Second)/dlt)
	}

	atomic.StoreInt64(&bt.running, 0) //Notify taskLoop break
	bt.finish.Wait()                  //Wait taskLoop break
	bt.result()
	return nil
}
Пример #27
0
// Stats returns stats on total connections, active connections, and total processing time
func (m *RequestCounter) Stats(dimensions map[string]string) []*datapoint.Datapoint {
	ret := []*datapoint.Datapoint{}
	stats := map[string]int64{
		"total_connections": atomic.LoadInt64(&m.TotalConnections),
		"total_time_ns":     atomic.LoadInt64(&m.TotalProcessingTimeNs),
	}
	for k, v := range stats {
		ret = append(
			ret,
			datapoint.New(
				k,
				dimensions,
				datapoint.NewIntValue(v),
				datapoint.Counter,
				time.Now()))
	}
	ret = append(
		ret,
		datapoint.New(
			"active_connections",
			dimensions,
			datapoint.NewIntValue(atomic.LoadInt64(&m.ActiveConnections)),
			datapoint.Gauge,
			time.Now()))
	return ret
}
Пример #28
0
func TestAdd(t *testing.T) {
	a := &TestAggregator{}
	ra := NewRunningAggregator(a, &AggregatorConfig{
		Name: "TestRunningAggregator",
		Filter: Filter{
			NamePass: []string{"*"},
		},
		Period: time.Millisecond * 500,
	})
	assert.NoError(t, ra.Config.Filter.Compile())
	acc := testutil.Accumulator{}
	go ra.Run(&acc, make(chan struct{}))

	m := ra.MakeMetric(
		"RITest",
		map[string]interface{}{"value": int(101)},
		map[string]string{},
		telegraf.Untyped,
		time.Now().Add(time.Millisecond*150),
	)
	assert.False(t, ra.Add(m))

	for {
		time.Sleep(time.Millisecond)
		if atomic.LoadInt64(&a.sum) > 0 {
			break
		}
	}
	assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum))
}
Пример #29
0
func (c *ClientV2) Stats() ClientStats {
	c.RLock()
	// TODO: deprecated, remove in 1.0
	name := c.ClientID

	clientId := c.ClientID
	hostname := c.Hostname
	userAgent := c.UserAgent
	c.RUnlock()
	return ClientStats{
		// TODO: deprecated, remove in 1.0
		Name: name,

		Version:       "V2",
		RemoteAddress: c.RemoteAddr().String(),
		ClientID:      clientId,
		Hostname:      hostname,
		UserAgent:     userAgent,
		State:         atomic.LoadInt32(&c.State),
		ReadyCount:    atomic.LoadInt64(&c.ReadyCount),
		InFlightCount: atomic.LoadInt64(&c.InFlightCount),
		MessageCount:  atomic.LoadUint64(&c.MessageCount),
		FinishCount:   atomic.LoadUint64(&c.FinishCount),
		RequeueCount:  atomic.LoadUint64(&c.RequeueCount),
		ConnectTime:   c.ConnectTime.Unix(),
		SampleRate:    atomic.LoadInt32(&c.SampleRate),
		TLS:           atomic.LoadInt32(&c.TLS) == 1,
		Deflate:       atomic.LoadInt32(&c.Deflate) == 1,
		Snappy:        atomic.LoadInt32(&c.Snappy) == 1,
	}
}
Пример #30
0
func updateStat() {
	for {
		time.Sleep(60 * time.Second)

		glog.V(2).Infoln("updateStat")

		save := make(map[int]*stat)

		liveIdMap.RLock()
		for liveId, st := range liveIdMap.m {
			save[liveId] = st
		}
		liveIdMap.RUnlock()

		for liveId, st := range save {
			if atomic.LoadInt64(&st.isSync) == 0 {
				continue
			}
			var online int64
			if st.cate == 0 {
				st.ipLock.Lock()
				online = int64(len(st.ipMap))
				st.ipMap = make(map[string]int, online)
				st.ipLock.Unlock()
			} else if st.cate == 1 {
				online = atomic.LoadInt64(&st.online)
			}
			db.Update(liveId, online,
				atomic.LoadInt64(&st.cacheSize),
				atomic.LoadInt64(&st.serviceSize))
		}
	}
}