Esempio n. 1
0
// NewMovingAverageWithGranularity makes a new MovingAverage
// using the interval and granularity settings provided. Granularity controls
// how accurate the moving average is within an interval, at the expense of
// increased memory usage (two int64 per gran number of "buckets").
func NewMovingAverageWithGranularity(interval time.Duration, gran int) *MovingAverage {
	if interval <= time.Duration(0) || gran <= 1 {
		return &MovingAverage{
			sums:   []int64{0},
			counts: []int64{0},
		}
	}

	r := &MovingAverage{
		sums:   make([]int64, gran),
		counts: make([]int64, gran),
	}

	go func() {
		i := 0
		t := time.NewTicker(interval / time.Duration(gran))
		for range t.C {
			i = r.index
			r.index = (r.index + 1) % gran

			// this is "as atomic" as easily possible...
			s := atomic.SwapInt64(&r.sums[r.index], 0)
			n := atomic.SwapInt64(&r.counts[r.index], 0)
			r.otherSums += r.sums[i] - s
			r.otherCounts += r.counts[i] - n
		}
	}()

	return r
}
Esempio n. 2
0
File: runner.go Progetto: hqr/surge
func (r *NodeRunnerBase) GetStats(reset bool) RunnerStats {
	s := r.rxqueue.GetStats(reset)
	if reset {
		s["txbytes"] = atomic.SwapInt64(&r.txbytestats, 0)
		s["rxbytes"] = atomic.SwapInt64(&r.rxbytestats, 0)
	} else {
		s["txbytes"] = atomic.LoadInt64(&r.txbytestats)
		s["rxbytes"] = atomic.LoadInt64(&r.rxbytestats)
	}
	return s
}
Esempio n. 3
0
File: lfs.go Progetto: allmad/madq
func init() {
	go func() {
		for _ = range time.Tick(time.Second) {
			a := atomic.SwapInt64(&a, 0)
			b := atomic.SwapInt64(&b, 0)
			c := atomic.SwapInt64(&c, 0)
			if b != 0 {
				println(time.Duration(a/b).String(), c/b, b)
			}
		}
	}()
}
Esempio n. 4
0
func (c *Counter) Flush(f FlusherSink) {
	val := atomic.SwapInt64(&c.val, 0)
	if val != 0 {
		n := Numeric64{Type: Int64, value: uint64(val)}
		f.EmitNumeric64(c.name, MeterCounter, n)
	}
}
Esempio n. 5
0
func requestCountReporter() {
	for {
		time.Sleep(time.Second)
		cur := atomic.SwapInt64(&counter, int64(0))
		log.Printf("%v requests", cur)
	}
}
Esempio n. 6
0
func getQueryCount(reset bool) int64 {
	if reset {
		return atomic.SwapInt64(&queryCount, 0)
	} else {
		return atomic.LoadInt64(&queryCount)
	}
}
func (conn *ThrottledConn) Write(buffer []byte) (int, error) {

	// See comments in Read.

	conn.writeLock.Lock()
	defer conn.writeLock.Unlock()

	if atomic.LoadInt64(&conn.writeUnthrottledBytes) > 0 {
		n, err := conn.Conn.Write(buffer)
		atomic.AddInt64(&conn.writeUnthrottledBytes, -int64(n))
		return n, err
	}

	if atomic.LoadInt32(&conn.closeAfterExhausted) == 1 {
		conn.Conn.Close()
		return 0, errors.New("throttled conn exhausted")
	}

	rate := atomic.SwapInt64(&conn.writeBytesPerSecond, -1)

	if rate != -1 {
		if rate == 0 {
			conn.throttledWriter = conn.Conn
		} else {
			conn.throttledWriter = ratelimit.Writer(
				conn.Conn,
				ratelimit.NewBucketWithRate(float64(rate), rate))
		}
	}

	return conn.throttledWriter.Write(buffer)
}
Esempio n. 8
0
func getRecvCount(reset bool) int64 {
	if reset {
		return atomic.SwapInt64(&recvCount, 0)
	} else {
		return atomic.LoadInt64(&recvCount)
	}
}
Esempio n. 9
0
func cycleHandle() {
	var t1, t2 int64
	t2 = time.Now().UnixNano()

	for {
		time.Sleep(1 * time.Minute)

		t1 = t2
		t2 = time.Now().UnixNano()
		infoArray := proxy.HandleAccountInfo(t2 - t1)

		b, _ := json.Marshal(&infoArray)
		resp, err := client.Post("https://speedmao.com/userinfo", "application/json", bytes.NewReader(b))
		if err != nil {
			log.Println(err)
			continue
		}

		resp.Body.Close()
		if resp.StatusCode == 200 {
			for _, info := range infoArray {
				atomic.SwapInt64(&info.Transfer, 0)
			}
		} else {
			log.Println("post user info fail:", resp.Status)
		}
	}
}
Esempio n. 10
0
func (t *Terminal) Close() error {
	if atomic.SwapInt64(&t.closed, 1) != 0 {
		return nil
	}
	t.stopChan <- struct{}{}
	t.wg.Wait()
	return Restore(syscall.Stdin, t.state)
}
Esempio n. 11
0
// report number of incoming zmq messages every second
func statsReporter() {
	for !interrupted {
		time.Sleep(1 * time.Second)
		msg_count := atomic.SwapInt64(&processed, 0)
		conn_count := atomic.LoadInt64(&ws_connections)
		logInfo("processed: %d, ws connections: %d", msg_count, conn_count)
	}
}
Esempio n. 12
0
func (t *Terminal) Close() error {
	if atomic.SwapInt64(&t.closed, 1) != 0 {
		return nil
	}
	t.stopChan <- struct{}{}
	t.wg.Wait()
	return t.ExitRawMode()
}
Esempio n. 13
0
File: node.go Progetto: hqr/surge
//
// stats
//
// GetStats implements the corresponding NodeRunnerInterface method for the
// GatewayUch common counters. Some of them are inc-ed inside this module,
// others - elsewhere, for instance in the concrete gateway's instance
// that embeds this GatewayUch
// The caller (such as, e.g., stats.go) will typically collect all the
// atomic counters and reset them to zeros to collect new values with the
// next iteration..
func (r *GatewayCommon) GetStats(reset bool) RunnerStats {
	var s = make(map[string]int64, 8)
	if reset {
		s["txbytes"] = atomic.SwapInt64(&r.txbytestats, 0)
		s["rxbytes"] = atomic.SwapInt64(&r.rxbytestats, 0)
		s["tio"] = atomic.SwapInt64(&r.tiostats, 0)
		s["chunk"] = atomic.SwapInt64(&r.chunkstats, 0)
		s["replica"] = atomic.SwapInt64(&r.replicastats, 0)
	} else {
		s["txbytes"] = atomic.LoadInt64(&r.txbytestats)
		s["rxbytes"] = atomic.LoadInt64(&r.rxbytestats)
		s["tio"] = atomic.LoadInt64(&r.tiostats)
		s["chunk"] = atomic.LoadInt64(&r.chunkstats)
		s["replica"] = atomic.LoadInt64(&r.replicastats)
	}
	return s
}
Esempio n. 14
0
func (r *RateLimit) Reset() {
	r.Lock()
	if atomic.SwapInt64(&r.waiting, 0) != 0 {
		r.wg.Done()
	}
	atomic.StoreInt64(&r.written, 0)
	r.Unlock()
}
Esempio n. 15
0
File: m4.go Progetto: hqr/surge
func (r *gatewayFour) GetStats(reset bool) RunnerStats {
	s := r.NodeRunnerBase.GetStats(true)
	if reset {
		s["tio"] = atomic.SwapInt64(&r.tiostats, 0)
	} else {
		s["tio"] = atomic.LoadInt64(&r.tiostats)
	}
	return s
}
Esempio n. 16
0
// Snapshot returns the number of values per second since the last snapshot,
// and reset the count to zero.
func (r *Rate) Snapshot() int64 {
	r.m.Lock()
	defer r.m.Unlock()
	now := time.Now().UnixNano()
	t := atomic.SwapInt64(&r.time, now)
	c := r.count.Snapshot()
	s := float64(c) / rateScale / float64(now-t)
	return Ceil(s * Scale(r.unit, time.Second))
}
Esempio n. 17
0
func runReporter(t time.Duration) {
	ticker := time.Tick(t)
	for _ = range ticker {
		c := atomic.SwapInt64(&Counter, 0)
		if c > 0 {
			log.Println("count:", c, "cps:", float64(c)/float64(t/time.Second))
		}
	}
}
Esempio n. 18
0
// Snapshot returns sample as a sorted array.
func (r *Reservoir) Snapshot() []int64 {
	r.m.Lock()
	defer r.m.Unlock()
	s := atomic.SwapInt64(&r.size, 0)
	v := make([]int64, min(int(s), len(r.values)))
	copy(v, r.values)
	r.values = make([]int64, cap(r.values))
	sorted(v)
	return v
}
Esempio n. 19
0
func (prod *ElasticSearch) updateMetrics() {
	duration := time.Since(prod.lastMetricUpdate)
	prod.lastMetricUpdate = time.Now()

	for index, counter := range prod.counters {
		count := atomic.SwapInt64(counter, 0)
		shared.Metric.Add(elasticMetricMessages+index, count)
		shared.Metric.SetF(elasticMetricMessagesSec+index, float64(count)/duration.Seconds())
	}
}
Esempio n. 20
0
func (p *BaseComponent) startReceivers() {
	atomic.SwapInt64(&p.inboxMessage, 0)
	atomic.SwapInt64(&p.inboxError, 0)

	wg := sync.WaitGroup{}
	for _, typedReceivers := range p.receivers {
		for _, receiver := range typedReceivers {
			wg.Add(1)
			go func(receiver MessageReceiver) {
				defer wg.Done()
				receiver.Start()
				for !receiver.IsRunning() {
					time.Sleep(time.Millisecond * 100)
				}
				EventCenter.PushEvent(EVENT_RECEIVER_STARTED, receiver.Metadata())
			}(receiver)
		}
	}
	wg.Wait()
}
Esempio n. 21
0
func (p *CounterMetric) Get() []dtests.Metric {
	now := time.Now()
	r := atomic.SwapInt64(&okcount, 0)
	e := atomic.SwapInt64(&errcount, 0)
	var times int64Slice

	throughput := float64(r*1000000000) / float64(now.Sub(p.time).Nanoseconds())
	p.time = now

	timeSlideLock.Lock()
	times = int64Slice(make([]int64, len(timesSlide)))
	copy(times, timesSlide)
	timesSlide = make([]int64, 0)
	timeSlideLock.Unlock()

	metrics := make([]dtests.Metric, 4)
	metrics[0] = dtests.Metric{Name: "throughput", Value: throughput, Time: now}
	metrics[3] = dtests.Metric{Name: "errors", Value: e, Time: now}
	var p95 int64 = 0
	var avr float64 = 0

	sort.Sort(times)
	size := len(times)
	if size > 0 {
		pos := int(0.95 * float64(size+1))
		if pos < 1 {
			p95 = times[0]
		} else if pos >= size {
			p95 = times[size-1]
		} else {
			p95 = times[pos]
		}
		for _, t := range times {
			avr += float64(t)
		}
		avr = avr / float64(size)
	}
	metrics[1] = dtests.Metric{Name: "AvrLatency", Value: avr, Time: now}
	metrics[2] = dtests.Metric{Name: "95Latency", Value: p95, Time: now}
	return metrics
}
Esempio n. 22
0
File: gauge.go Progetto: ACPK/atc
func (c *Gauge) Max() int {
	cur := atomic.LoadInt64(&c.cur)
	max := atomic.SwapInt64(&c.max, -1)

	if max == -1 {
		// no call to .Inc has occurred since last call to .Max;
		// highest value must be the current value
		return int(cur)
	}

	return int(max)
}
Esempio n. 23
0
// Close closes the connections.
func (h *GoHost) Close() {
	log.Printf("closing gohost: %p", h)
	h.dir.Close()

	h.PeerLock.Lock()
	for _, c := range h.peers {
		c.Close()
	}
	h.PeerLock.Unlock()

	atomic.SwapInt64(&h.closed, 1)
}
// refresh updates the current store.  It double checks expired under lock with the assumption
// of optimistic concurrency with the other functions.
func (c *CachingMinionRegistry) refresh(force bool) error {
	c.lock.Lock()
	defer c.lock.Unlock()
	if force || c.expired() {
		var err error
		c.minions, err = c.delegate.List()
		time := c.clock.Now()
		atomic.SwapInt64(&c.lastUpdate, time.Unix())
		return err
	}
	return nil
}
// refresh updates the current store.  It double checks expired under lock with the assumption
// of optimistic concurrency with the other functions.
func (r *CachingRegistry) refresh(force bool) error {
	r.lock.Lock()
	defer r.lock.Unlock()
	if force || r.expired() {
		var err error
		r.minions, err = r.delegate.List()
		time := r.clock.Now()
		atomic.SwapInt64(&r.lastUpdate, time.Unix())
		return err
	}
	return nil
}
Esempio n. 26
0
// Get a snapsnot of this route's current stats
// Getting a snapshot resets all statistics
func (s *RouteStats) Snapshot() Snapshot {
	hits := atomic.SwapInt64(&s.hits, 0)
	s.snapshot["2xx"] = atomic.SwapInt64(&s.oks, 0)
	s.snapshot["4xx"] = atomic.SwapInt64(&s.errors, 0)
	s.snapshot["5xx"] = atomic.SwapInt64(&s.failures, 0)
	s.snapshot["slow"] = atomic.SwapInt64(&s.slow, 0)
	s.snapshot["cached"] = atomic.SwapInt64(&s.cached, 0)
	s.snapshot["hits"] = hits

	s.sampleLock.Lock()
	sampleCount := int(s.sampleCount)
	s.sampleCount = 0
	s.samplesA, s.samplesB = s.samplesB, s.samplesA
	s.sampleLock.Unlock()

	if sampleCount > 0 {
		samples := s.samplesB[:sampleCount]
		sort.Ints(samples)
		for key, value := range STATS_PERCENTILES {
			s.snapshot[key] = percentile(samples, value, sampleCount)
		}
	} else {
		for key, _ := range STATS_PERCENTILES {
			s.snapshot[key] = 0
		}
	}
	return s.snapshot
}
Esempio n. 27
0
File: node.go Progetto: hqr/surge
//
// stats
//
// GetStats implements the corresponding NodeRunnerInterface method for the
// ServerUch common counters. Some of them may be inc-ed inside this module,
// others - elsewhere, for instance in the concrete server's instance
// that embeds this GatewayUch
// The caller (such as, e.g., stats.go) will typically collect all the
// atomic counters and reset them to zeros to collect new values with the
// next iteration..
func (r *ServerUch) GetStats(reset bool) RunnerStats {
	var a, d, w int64
	var s = make(map[string]int64, 8)
	elapsed := int64(Now.Sub(time.Time{})) // run time

	num, _ := r.disk.queueDepth(DqdBuffers)
	s["disk-frame-bufs"] = int64(num)

	if reset {
		s["txbytes"] = atomic.SwapInt64(&r.txbytestats, 0)
		s["rxbytes"] = atomic.SwapInt64(&r.rxbytestats, 0)
		r.timeResetStats = Now
	} else {
		s["txbytes"] = atomic.LoadInt64(&r.txbytestats)
		s["rxbytes"] = atomic.LoadInt64(&r.rxbytestats)
	}
	// cumulative
	d = atomic.LoadInt64(&r.rxbusydata)
	a = atomic.LoadInt64(&r.rxbusydatactrl)
	w = atomic.LoadInt64(&r.diskbusy)

	if d >= elapsed {
		s["rxbusydata"] = 100
	} else {
		s["rxbusydata"] = (d + 5) * 100 / elapsed
	}
	if a >= elapsed {
		s["rxbusy"] = 100
	} else {
		s["rxbusy"] = (a + 5) * 100 / elapsed
	}
	if w >= elapsed {
		s["diskbusy"] = 100
	} else {
		s["diskbusy"] = (w + 5) * 100 / elapsed
	}
	return s
}
Esempio n. 28
0
func ReportMetrics(clusterID string, kubeClient *kube.Client) {
	metrics.ID = clusterID
	metrics.PodID = uuid.NewWithoutDashes()
	metrics.Version = version.PrettyPrintVersion(version.Version)
	for {
		write := atomic.SwapInt64(&modified, 0)
		if write == 1 {
			externalMetrics(kubeClient, metrics)
			protolion.Info(metrics)
			reportSegment(metrics)
		}
		<-time.After(15 * time.Second)
	}
}
Esempio n. 29
0
func (m *Miner) getJob(s *StratumServer) *JobReplyData {
	t := s.currentBlockTemplate()
	height := atomic.SwapInt64(&m.LastBlockHeight, t.Height)

	if height == t.Height {
		return &JobReplyData{}
	}

	blob, extraNonce := t.nextBlob()
	job := &Job{Id: util.Random(), ExtraNonce: extraNonce, Height: t.Height, Difficulty: m.Difficulty}
	job.Submissions = make(map[string]bool)
	m.pushJob(job)
	reply := &JobReplyData{JobId: job.Id, Blob: blob, Target: m.TargetHex}
	return reply
}
Esempio n. 30
0
// Set updates the value from a string representation in a thread-safe manner.
// This operation may return an error if the provided `input` doesn't parse, or the resulting value doesn't pass an
// optional validator.
// If a notifier is set on the value, it will be invoked in a separate go-routine.
func (d *DynInt64Value) Set(input string) error {
	val, err := strconv.ParseInt(input, 0, 64)
	if err != nil {
		return err
	}
	if d.validator != nil {
		if err := d.validator(val); err != nil {
			return err
		}
	}
	oldVal := atomic.SwapInt64(d.ptr, val)
	if d.notifier != nil {
		go d.notifier(oldVal, val)
	}
	return nil
}