Ejemplo n.º 1
0
func (t *batchTx) commit(stop bool) {
	var err error
	// commit the last tx
	if t.tx != nil {
		if t.pending == 0 && !stop {
			t.backend.mu.RLock()
			defer t.backend.mu.RUnlock()
			atomic.StoreInt64(&t.backend.size, t.tx.Size())
			return
		}
		err = t.tx.Commit()
		atomic.AddInt64(&t.backend.commits, 1)

		t.pending = 0
		if err != nil {
			log.Fatalf("mvcc: cannot commit tx (%s)", err)
		}
	}

	if stop {
		return
	}

	t.backend.mu.RLock()
	defer t.backend.mu.RUnlock()
	// begin a new tx
	t.tx, err = t.backend.db.Begin(true)
	if err != nil {
		log.Fatalf("mvcc: cannot begin tx (%s)", err)
	}
	atomic.StoreInt64(&t.backend.size, t.tx.Size())
}
Ejemplo n.º 2
0
// SetLimits modifies the rate limits of an existing
// ThrottledConn. It is safe to call SetLimits while
// other goroutines are calling Read/Write. This function
// will not block, and the new rate limits will be
// applied within Read/Write, but not necessarily until
// some futher I/O at previous rates.
func (conn *ThrottledConn) SetLimits(limits RateLimits) {

	// Using atomic instead of mutex to avoid blocking
	// this function on throttled I/O in an ongoing
	// read or write. Precise synchronized application
	// of the rate limit values is not required.

	// Negative rates are invalid and -1 is a special
	// value to used to signal throttling initialized
	// state. Silently normalize negative values to 0.
	rate := limits.ReadBytesPerSecond
	if rate < 0 {
		rate = 0
	}
	atomic.StoreInt64(&conn.readBytesPerSecond, rate)
	atomic.StoreInt64(&conn.readUnthrottledBytes, limits.ReadUnthrottledBytes)

	rate = limits.WriteBytesPerSecond
	if rate < 0 {
		rate = 0
	}
	atomic.StoreInt64(&conn.writeBytesPerSecond, rate)
	atomic.StoreInt64(&conn.writeUnthrottledBytes, limits.WriteUnthrottledBytes)

	closeAfterExhausted := int32(0)
	if limits.CloseAfterExhausted {
		closeAfterExhausted = 1
	}
	atomic.StoreInt32(&conn.closeAfterExhausted, closeAfterExhausted)
}
Ejemplo n.º 3
0
// cronPrintCurrentStatus logs the regular status check banner
func cronPrintCurrentStatus() {
	// Grab server status
	stat := common.GetServerStatus()
	if stat == (common.ServerStatus{}) {
		log.Println("Could not print current status")
		return
	}

	// Regular status banner
	log.Printf("status - [goroutines: %d] [memory: %02.3f MB]", stat.NumGoroutine, stat.MemoryMB)

	// HTTP stats
	if common.Static.Config.HTTP {
		log.Printf("  http - [current: %d] [total: %d]", stat.HTTP.Current, stat.HTTP.Total)

		// Reset current HTTP counter
		atomic.StoreInt64(&common.Static.HTTP.Current, 0)
	}

	// UDP stats
	if common.Static.Config.UDP {
		log.Printf("   udp - [current: %d] [total: %d]", stat.UDP.Current, stat.UDP.Total)

		// Reset current UDP counter
		atomic.StoreInt64(&common.Static.UDP.Current, 0)
	}
}
Ejemplo n.º 4
0
// newSegmentFile will close the current segment file and open a new one, updating bookkeeping info on the log
func (l *WAL) newSegmentFile() error {
	l.currentSegmentID++
	if l.currentSegmentWriter != nil {
		if err := l.currentSegmentWriter.close(); err != nil {
			return err
		}
		atomic.StoreInt64(&l.stats.OldBytes, int64(l.currentSegmentWriter.size))
	}

	fileName := filepath.Join(l.path, fmt.Sprintf("%s%05d.%s", WALFilePrefix, l.currentSegmentID, WALFileExtension))
	fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666)
	if err != nil {
		return err
	}
	l.currentSegmentWriter = NewWALSegmentWriter(fd)

	if stat, err := fd.Stat(); err == nil {
		l.lastWriteTime = stat.ModTime()
	}

	// Reset the current segment size stat
	atomic.StoreInt64(&l.stats.CurrentBytes, 0)

	return nil
}
Ejemplo n.º 5
0
func (pc *KafkaPartitionConsumer) initOffset() bool {
	log.Infof("Initializing offset for topic %s, partition %d", pc.topic, pc.partition)
	for {
		offset, err := pc.client.GetOffset(pc.config.Group, pc.topic, pc.partition)
		if err != nil {
			if err == siesta.ErrUnknownTopicOrPartition {
				return pc.resetOffset()
			}
			log.Warning("Cannot get offset for group %s, topic %s, partition %d: %s\n", pc.config.Group, pc.topic, pc.partition, err)
			select {
			case <-pc.stop:
				{
					log.Warning("PartitionConsumer told to stop trying to get offset, returning")
					return false
				}
			default:
			}
		} else {
			validOffset := offset + 1
			log.Infof("Initialized offset to %d", validOffset)
			atomic.StoreInt64(&pc.offset, validOffset)
			atomic.StoreInt64(&pc.highwaterMarkOffset, validOffset)
			return true
		}
		time.Sleep(pc.config.InitOffsetBackoff)
	}
}
Ejemplo n.º 6
0
func (d *Datum) stamp(timestamp time.Time) {
	if timestamp.IsZero() {
		atomic.StoreInt64(&d.Time, time.Now().UTC().UnixNano())
	} else {
		atomic.StoreInt64(&d.Time, timestamp.UnixNano())
	}
}
Ejemplo n.º 7
0
func (c *Client) HealthCheck(url string) (err error) {
	r, err := defaults.Client(c.Client).Get(url)
	if err != nil {
		atomic.StoreInt64(&c.count, 0)
		//trace.Error(ctx, "Errors.Fail", err)
		log.Println("forensiq:", err)
		return
	}

	body, err := ioutil.ReadAll(r.Body)
	if err != nil {
		atomic.StoreInt64(&c.count, 0)
		//trace.Error(ctx, "Errors.Down", err)
		log.Println("forensiq: down")
		return
	}

	if result := string(body); r.StatusCode != http.StatusOK || result[0] != '1' {
		atomic.StoreInt64(&c.count, 0)
		err = fmt.Errorf("%s: %s", r.Status, result)
		//trace.Error(ctx, "Errors.Status", err)
		log.Println("forensiq:", err)
		return
	}

	atomic.StoreInt64(&c.count, defaults.Int64(c.FailCount, 10))
	return
}
Ejemplo n.º 8
0
func (r *Consumer) backoff() {

	if atomic.LoadInt32(&r.stopFlag) == 1 {
		atomic.StoreInt64(&r.backoffDuration, 0)
		return
	}

	// pick a random connection to test the waters
	conns := r.conns()
	if len(conns) == 0 {
		// backoff again
		backoffDuration := 1 * time.Second
		atomic.StoreInt64(&r.backoffDuration, backoffDuration.Nanoseconds())
		time.AfterFunc(backoffDuration, r.backoff)
		return
	}
	idx := r.rng.Intn(len(conns))
	choice := conns[idx]

	r.log(LogLevelWarning,
		"(%s) backoff timeout expired, sending RDY 1",
		choice.String())
	// while in backoff only ever let 1 message at a time through
	err := r.updateRDY(choice, 1)
	if err != nil {
		r.log(LogLevelWarning, "(%s) error updating RDY - %s", choice.String(), err)
		backoffDuration := 1 * time.Second
		atomic.StoreInt64(&r.backoffDuration, backoffDuration.Nanoseconds())
		time.AfterFunc(backoffDuration, r.backoff)
		return
	}

	atomic.StoreInt64(&r.backoffDuration, 0)
}
Ejemplo n.º 9
0
func (s *intSampler) Clear() {
	atomic.StoreInt64(&s.count, 0)
	atomic.StoreInt64(&s.sum, 0)
	for i := range s.cache {
		atomic.StoreInt64(&s.cache[i], 0)
	}
}
Ejemplo n.º 10
0
Archivo: speed.go Proyecto: hidu/tool
func (sp *Speed) printLog(now time.Time) {
	now_used := now.Sub(sp.LastTime).Seconds()
	sp.LastTime = now
	if now_used == 0 || sp.TotalSec == 0 {
		return
	}
	log_format := "total=%d,qps=%d,total_%ds=%d,speed=%.2fMps,total_size=%s,total_suc=%d," + fmt.Sprintf("total_%ds_suc", int64(now_used)) + "=%d"
	size_speed := float64(sp.TotalSecSize) / now_used / (1024 * 1024)

	TotalSize_str := ""
	if sp.TotalSize > gsize {
		TotalSize_str = fmt.Sprintf("%.2fG", float64(sp.TotalSize)/float64(gsize))
	} else if sp.TotalSize > msize {
		TotalSize_str = fmt.Sprintf("%.2fM", float64(sp.TotalSize)/float64(msize))
	} else {
		TotalSize_str = fmt.Sprintf("%.2fK", float64(sp.TotalSize)/float64(1024))
	}

	logMsg := fmt.Sprintf(log_format, sp.Total, int64(float64(sp.TotalSec)/now_used), int64(now_used), sp.TotalSec, size_speed, TotalSize_str, sp.TotalSuccess, sp.TotalSecSuccess)
	sp.PrintFn(logMsg, sp)

	atomic.StoreInt64(&sp.TotalSec, 0)
	atomic.StoreInt64(&sp.TotalSecSize, 0)
	atomic.StoreInt64(&sp.TotalSecSuccess, 0)
}
Ejemplo n.º 11
0
func (t *batchTx) commit(stop bool) {
	var err error
	// commit the last tx
	if t.tx != nil {
		if t.pending == 0 && !stop {
			t.backend.mu.RLock()
			defer t.backend.mu.RUnlock()
			atomic.StoreInt64(&t.backend.size, t.tx.Size())
			return
		}
		start := time.Now()
		err = t.tx.Commit()
		commitDurations.Observe(time.Since(start).Seconds())
		atomic.AddInt64(&t.backend.commits, 1)

		t.pending = 0
		if err != nil {
			plog.Fatalf("cannot commit tx (%s)", err)
		}
	}

	if stop {
		return
	}

	t.backend.mu.RLock()
	defer t.backend.mu.RUnlock()
	// begin a new tx
	t.tx, err = t.backend.db.Begin(true)
	if err != nil {
		plog.Fatalf("cannot begin tx (%s)", err)
	}
	atomic.StoreInt64(&t.backend.size, t.tx.Size())
}
Ejemplo n.º 12
0
func midiLoop(s *portmidi.Stream) {
	noteOn := make([]int64, 0, 128)
	for e := range s.Listen() {
		switch e.Status {
		case 144: // note on
			on := false
			for _, n := range noteOn {
				if n == e.Data1 {
					on = true
				}
			}
			if !on {
				noteOn = append(noteOn, e.Data1)
			}
			atomic.StoreInt64(&midiNote, e.Data1)
			atomic.StoreInt64(&midiGate, 1)
		case 128: // note off
			for i, n := range noteOn {
				if n == e.Data1 {
					copy(noteOn[i:], noteOn[i+1:])
					noteOn = noteOn[:len(noteOn)-1]
				}
			}
			if len(noteOn) > 0 {
				n := noteOn[len(noteOn)-1]
				atomic.StoreInt64(&midiNote, n)
			} else {
				atomic.StoreInt64(&midiGate, 0)
			}
		}
	}
}
Ejemplo n.º 13
0
func (r *Consumer) resume() {
	if atomic.LoadInt32(&r.stopFlag) == 1 {
		atomic.StoreInt64(&r.backoffDuration, 0)
		return
	}

	// pick a random connection to test the waters
	conns := r.conns()
	if len(conns) == 0 {
		r.log(LogLevelWarning, "no connection available to resume")
		r.log(LogLevelWarning, "backing off for %.04f seconds", 1)
		r.backoff(time.Second)
		return
	}
	idx := r.rng.Intn(len(conns))
	choice := conns[idx]

	r.log(LogLevelWarning,
		"(%s) backoff timeout expired, sending RDY 1",
		choice.String())

	// while in backoff only ever let 1 message at a time through
	err := r.updateRDY(choice, 1)
	if err != nil {
		r.log(LogLevelWarning, "(%s) error resuming RDY 1 - %s", choice.String(), err)
		r.log(LogLevelWarning, "backing off for %.04f seconds", 1)
		r.backoff(time.Second)
		return
	}

	atomic.StoreInt64(&r.backoffDuration, 0)
}
Ejemplo n.º 14
0
func (t *Task) Run() {
	var tag int64
	t.wg.Add(1)
	defer t.wg.Done()
LOOP:
	for {
		select {
		case <-t.pauseCh:
			atomic.StoreInt64(&tag, 1)
		case <-t.continueCh:
			atomic.StoreInt64(&tag, 0)
		case <-t.stopCh:
			break LOOP
		default:
			if atomic.LoadInt64(&tag) == 0 {
				t.do(t.index + 1)
				t.index++
				if t.index >= t.cnt {
					atomic.StoreInt64(&t.state, FINISH)
					break LOOP
				}
			} else {
				time.Sleep(time.Second * 1)
			}
		}
	}
}
Ejemplo n.º 15
0
func (w *worker) work(messages chan *Msg) {
	for {
		select {
		case message := <-messages:
			atomic.StoreInt64(&w.startedAt, time.Now().UTC().Unix())
			w.currentMsg = message

			if w.process(message) {
				w.manager.confirm <- message
			}

			atomic.StoreInt64(&w.startedAt, 0)
			w.currentMsg = nil

			// Attempt to tell fetcher we're finished.
			// Can be used when the fetcher has slept due
			// to detecting an empty queue to requery the
			// queue immediately if we finish work.
			select {
			case w.manager.fetch.FinishedWork() <- true:
			default:
			}
		case w.manager.fetch.Ready() <- true:
			// Signaled to fetcher that we're
			// ready to accept a message
		case <-w.stop:
			w.exit <- true
			return
		}
	}
}
Ejemplo n.º 16
0
//flush the count per second
func (counter *Counter) FlushCounter() {
	atomic.StoreInt64(&counter.OldClientQPS, counter.ClientQPS)
	atomic.StoreInt64(&counter.OldErrLogTotal, counter.ErrLogTotal)
	atomic.StoreInt64(&counter.OldSlowLogTotal, counter.SlowLogTotal)

	atomic.StoreInt64(&counter.ClientQPS, 0)
}
Ejemplo n.º 17
0
func (f *File) Open() error {
	atomic.StoreInt64(&f.at, 0)
	atomic.StoreInt64(&f.readDirCount, 0)
	f.fileData.Lock()
	f.closed = false
	f.fileData.Unlock()
	return nil
}
Ejemplo n.º 18
0
func (self *ResultData) Clear() {
	atomic.StoreInt64(&self.end_time, 0)
	atomic.StoreInt64(&self.begin_time, 0)
	atomic.StoreInt32(&self.msg_send, 0)
	atomic.StoreInt32(&self.msg_succ, 0)
	atomic.StoreInt32(&self.msg_failed, 0)
	atomic.StoreInt32(&self.msg_error, 0)
}
Ejemplo n.º 19
0
Archivo: raf.go Proyecto: influx6/gu
// Toggle switches the state of the clock from paused to resume and vise versa.
func (c *Clock) Toggle() {
	if atomic.LoadInt64(&c.paused) < 0 {
		atomic.StoreInt64(&c.paused, 1)
		return
	}

	atomic.StoreInt64(&c.paused, -1)
}
Ejemplo n.º 20
0
func Run() {
	atomic.StoreInt64(&now, time.Now().Unix())
	log.Println("  Starting internal clock")
	go func() {
		for t := range time.Tick(time.Second) {
			atomic.StoreInt64(&now, t.Unix())
		}
	}()
}
Ejemplo n.º 21
0
Archivo: m7.go Proyecto: hqr/surge
// Similar to unicast models in this package,
// startNewChunk is in effect a minimalistic embedded client.
// The client, via startNewChunk, generates a new random chunk if and once
// the required number of replicas (configStorage.numReplicas)
// of the previous chunk are transmitted,
// written to disks and then ACK-ed by the respective servers.
//
// Since all the gateways in the model run the same code, the cumulative
// effect of randomly generated chunks by many dozens or hundreds
// gateways means that there's probably no need, simulation-wise,
// to have each gateway generate multiple chunks simultaneously..
//
func (r *gatewaySeven) startNewChunk() {
	assert(r.chunk == nil)
	r.chunk = NewChunk(r, configStorage.sizeDataChunk*1024)

	r.numreplicas = 0
	r.rzvgroup.init(0, true) // cleanup

	// given a pseudo-random chunk id, select a multicast group
	// to store this chunk
	ngid := r.selectNgtGroup(r.chunk.cid, r.prevgroupid)
	ngobj := NewNgtGroup(ngid)

	// create flow and tios
	targets := ngobj.getmembers()
	assert(len(targets) == configReplicast.sizeNgtGroup)

	// new IO request and new multicast flow
	tioparent := NewTioRr(r, r.putpipeline, r.chunk)
	mcastflow := NewFlow(r, r.chunk.cid, tioparent, ngobj)

	// children tios; each server must see its own copy of
	// the message
	for _, srv := range targets {
		NewTioRr(r, r.putpipeline, tioparent, r.chunk, mcastflow, srv)
	}
	assert(tioparent.GetNumTioChildren() == ngobj.getCount(), fmt.Sprintf("%d != %d", len(tioparent.children), ngobj.getCount()))

	mcastflow.setbw(0)
	mcastflow.totalbytes = r.chunk.sizeb
	mcastflow.rb = &DummyRateBucket{}

	log("gwy-new-mcastflow-tio", mcastflow.String(), tioparent.String())

	// start negotiating; each server in the negotiating group
	// will subsequently respond with a bid
	//
	// atomic on/off here and elsewhere is done to make sure that
	// all the put-request messages are issued at
	// the same exact system tick, to simulate multicast IP router
	// simultaneously transmitting a buffered frame on all designated ports
	//
	atomic.StoreInt64(&r.NowMcasting, 1)
	for _, srv := range targets {
		tio := tioparent.GetTioChild(srv)
		ev := newMcastChunkPutRequestEvent(r, ngobj, r.chunk, srv, tio)
		tio.next(ev, SmethodDirectInsert)
	}
	atomic.StoreInt64(&r.NowMcasting, 0)

	r.rb.use(int64(configNetwork.sizeControlPDU * 8))
	mcastflow.GetRb().use(int64(configNetwork.sizeControlPDU * 8))
	mcastflow.timeTxDone = Now.Add(configNetwork.durationControlPDU)

	atomic.AddInt64(&r.txbytestats, int64(configNetwork.sizeControlPDU))
	r.prevgroupid = ngid
}
Ejemplo n.º 22
0
// Reset resets all recorded data.
func (f *FuncStats) Reset() {
	atomic.StoreInt64(&f.current, 0)
	atomic.StoreInt64(&f.highwater, 0)
	f.parentsAndMutex.Lock()
	f.errors = make(map[string]int64, len(f.errors))
	f.panics = 0
	f.successTimes.Reset()
	f.failureTimes.Reset()
	f.parentsAndMutex.Unlock()
}
Ejemplo n.º 23
0
func (cache *Cache) Clear() {
	for i := 0; i < 256; i++ {
		cache.locks[i].Lock()
		newSeg := newSegment(len(cache.segments[i].rb.data), i)
		cache.segments[i] = newSeg
		cache.locks[i].Unlock()
	}
	atomic.StoreInt64(&cache.hitCount, 0)
	atomic.StoreInt64(&cache.missCount, 0)
}
Ejemplo n.º 24
0
func (t *Terminal) ioloop() {
	t.wg.Add(1)
	defer t.wg.Done()
	var (
		isEscape       bool
		isEscapeEx     bool
		expectNextChar bool
	)

	buf := bufio.NewReader(Stdin)
	for {
		if !expectNextChar {
			atomic.StoreInt64(&t.isReading, 0)
			select {
			case <-t.kickChan:
				atomic.StoreInt64(&t.isReading, 1)
			case <-t.stopChan:
				return
			}
		}
		expectNextChar = false
		r, _, err := buf.ReadRune()
		if err != nil {
			break
		}

		if isEscape {
			isEscape = false
			if r == CharEscapeEx {
				expectNextChar = true
				isEscapeEx = true
				continue
			}
			r = escapeKey(r)
		} else if isEscapeEx {
			isEscapeEx = false
			r = escapeExKey(r, buf)
		}

		expectNextChar = true
		switch r {
		case CharEsc:
			if t.cfg.VimMode {
				t.outchan <- r
				break
			}
			isEscape = true
		case CharInterrupt, CharEnter, CharCtrlJ:
			expectNextChar = false
			fallthrough
		default:
			t.outchan <- r
		}
	}
}
Ejemplo n.º 25
0
// cronStatsReset triggers a reset of certain statistic counters at regular intervals
func cronStatsReset() {
	// Trigger events every hour
	hour := time.NewTicker(1 * time.Hour)

	// Trigger events every half hour
	halfHour := time.NewTicker(30 * time.Minute)

	// Trigger events every minute
	minute := time.NewTicker(1 * time.Minute)

	for {
		select {
		case <-hour.C:
			// Reset hourly stats counters
			atomic.StoreInt64(&common.Static.API.Hour, 0)
			atomic.StoreInt64(&common.Static.HTTP.Hour, 0)
			atomic.StoreInt64(&common.Static.UDP.Hour, 0)
		case <-halfHour.C:
			// Reset half-hourly stats counters
			atomic.StoreInt64(&common.Static.API.HalfHour, 0)
			atomic.StoreInt64(&common.Static.HTTP.HalfHour, 0)
			atomic.StoreInt64(&common.Static.UDP.HalfHour, 0)
		case <-minute.C:
			// Reset minute stats counters
			atomic.StoreInt64(&common.Static.API.Minute, 0)
			atomic.StoreInt64(&common.Static.HTTP.Minute, 0)
			atomic.StoreInt64(&common.Static.UDP.Minute, 0)
		}
	}
}
Ejemplo n.º 26
0
// issueSLOKs checks client progress against each candidate seed spec
// and seeds SLOKs when the client traffic levels are achieved. After
// checking progress, and if the SLOK time period has changed since
// progress was last recorded, progress is reset. Partial, insufficient
// progress is intentionally dropped when the time period rolls over.
// Derived SLOKs are cached to avoid redundant CPU intensive operations.
// All issued SLOKs are retained in the client state for the duration
// of the client's session.
func (state *ClientSeedState) issueSLOKs() {

	// Concurrency: the caller must lock state.mutex.

	if state.scheme == nil {
		return
	}

	progressSLOKTime := time.Unix(0, state.progressSLOKTime)

	for index, progress := range state.progress {

		seedSpec := state.scheme.SeedSpecs[index]

		if progress.exceeds(&seedSpec.Targets) {

			ref := &slokReference{
				PropagationChannelID: state.propagationChannelID,
				SeedSpecID:           string(seedSpec.ID),
				Time:                 progressSLOKTime,
			}

			state.scheme.derivedSLOKCacheMutex.RLock()
			slok, ok := state.scheme.derivedSLOKCache[*ref]
			state.scheme.derivedSLOKCacheMutex.RUnlock()
			if !ok {
				slok = deriveSLOK(state.scheme, ref)
				state.scheme.derivedSLOKCacheMutex.Lock()
				state.scheme.derivedSLOKCache[*ref] = slok
				state.scheme.derivedSLOKCacheMutex.Unlock()
			}

			// Previously issued SLOKs are not re-added to
			// the payload.
			if state.issuedSLOKs[string(slok.ID)] == nil {
				state.issuedSLOKs[string(slok.ID)] = slok
				state.payloadSLOKs = append(state.payloadSLOKs, slok)
			}
		}
	}

	slokTime := getSLOKTime(state.scheme.SeedPeriodNanoseconds)

	if slokTime != atomic.LoadInt64(&state.progressSLOKTime) {
		atomic.StoreInt64(&state.progressSLOKTime, slokTime)
		// The progress map structure is not reset or modifed; instead
		// the mapped accumulator values are zeroed. Concurrently, port
		// forward relay goroutines continue to add to these accumulators.
		for _, progress := range state.progress {
			atomic.StoreInt64(&progress.BytesRead, 0)
			atomic.StoreInt64(&progress.BytesWritten, 0)
			atomic.StoreInt64(&progress.PortForwardDurationNanoseconds, 0)
		}
	}
}
Ejemplo n.º 27
0
func (t *tsoTask) Run() error {
	if t.Interrupted() {
		return errors.New("task has been interrupted already")
	}

	// enter here means I am the leader tso.
	tso := t.tso

	log.Debugf("tso leader %s run task", tso)

	if err := tso.syncTimestamp(); err != nil {
		// interrupt here
		log.Errorf("sync timestmap err %v, interrupt this task", err)

		atomic.StoreInt64(&t.interrupted, 1)
		return errors.Trace(err)
	}

	atomic.StoreInt64(&tso.isLeader, 1)

	tsTicker := time.NewTicker(time.Duration(updateTimestampStep) * time.Millisecond)

	defer func() {
		atomic.StoreInt64(&tso.isLeader, 0)

		tsTicker.Stop()

		// we should close all connections.
		t.closeAllConns()
		log.Debugf("tso leader %s task end", tso)
	}()

	for {
		select {
		case err := <-t.interruptCh:
			log.Debugf("tso leader %s is interrupted, err %v", tso, err)

			// we will interrupt this task, and we can't run this task again.
			atomic.StoreInt64(&t.interrupted, 1)
			return errors.Trace(err)
		case <-t.stopCh:
			log.Debugf("tso leader %s is stopped", tso)

			// we will stop this task, maybe run again later.
			return errors.New("task is stopped")
		case conn := <-t.connCh:
			// handle connection below
			tso.handleConn(conn)
		case <-tsTicker.C:
			if err := tso.updateTimestamp(); err != nil {
				return errors.Trace(err)
			}
		}
	}
}
Ejemplo n.º 28
0
// the slave should also update the pub size stat,
// since the slave need sync with leader (which will cost the write performance)
func (self *TopicHistoryStatsInfo) UpdateHourlySize(curPubSize int64) {
	now := int32(time.Now().Hour())
	lastBucket := self.lastHour % 24
	if now != self.lastHour {
		lastBucket = now % 24
		atomic.StoreInt64(&self.HourlyPubSize[lastBucket], 0)
		atomic.StoreInt32(&self.lastHour, now)
	}
	atomic.AddInt64(&self.HourlyPubSize[lastBucket], curPubSize-self.lastPubSize)
	atomic.StoreInt64(&self.lastPubSize, curPubSize)
}
Ejemplo n.º 29
0
func (q *Reader) sendRDY(c *nsqConn, count int64) error {
	var buf bytes.Buffer
	atomic.AddInt64(&q.totalRdyCount, atomic.LoadInt64(&c.rdyCount)*-1+count)
	atomic.StoreInt64(&c.rdyCount, count)
	atomic.StoreInt64(&c.lastRdyCount, count)
	err := c.sendCommand(&buf, Ready(int(count)))
	if err != nil {
		handleError(q, c, fmt.Sprintf("[%s] error sending RDY %d - %s", c, count, err.Error()))
		return err
	}
	return nil
}
Ejemplo n.º 30
0
func (c *controlConn) connect(endpoints []string) error {
	// intial connection attmept, try to connect to each endpoint to get an initial
	// list of nodes.

	// shuffle endpoints so not all drivers will connect to the same initial
	// node.
	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	perm := r.Perm(len(endpoints))
	shuffled := make([]string, len(endpoints))

	for i, endpoint := range endpoints {
		shuffled[perm[i]] = endpoint
	}

	// store that we are not connected so that reconnect wont happen if we error
	atomic.StoreInt64(&c.connecting, -1)

	var (
		conn *Conn
		err  error
	)

	for _, addr := range shuffled {
		conn, err = c.session.connect(JoinHostPort(addr, c.session.cfg.Port), c)
		if err != nil {
			log.Printf("gocql: unable to control conn dial %v: %v\n", addr, err)
			continue
		}

		if err = c.registerEvents(conn); err != nil {
			conn.Close()
			continue
		}

		// we should fetch the initial ring here and update initial host data. So that
		// when we return from here we have a ring topology ready to go.
		break
	}

	if conn == nil {
		// this is fatal, not going to connect a session
		return err
	}

	c.conn.Store(conn)
	atomic.StoreInt64(&c.connecting, 0)

	c.closeWg.Add(1)
	go c.heartBeat()

	return nil
}