コード例 #1
0
ファイル: reader.go プロジェクト: xchapter7x/govmomi
func bpsLoop(ch <-chan Report, dst *uint64) {
	l := list.New()

	for {
		var tch <-chan time.Time

		// Setup timer for front of list to become stale.
		if e := l.Front(); e != nil {
			dt := time.Second - time.Now().Sub(e.Value.(readerReport).t)
			tch = time.After(dt)
		}

		select {
		case q, ok := <-ch:
			if !ok {
				return
			}

			l.PushBack(q)
		case <-tch:
			l.Remove(l.Front())
		}

		// Compute new bps
		if l.Len() == 0 {
			atomic.StoreUint64(dst, 0)
		} else {
			f := l.Front().Value.(readerReport)
			b := l.Back().Value.(readerReport)
			atomic.StoreUint64(dst, uint64(b.pos-f.pos))
		}
	}
}
コード例 #2
0
ファイル: main.go プロジェクト: freeformz/log-shuttle
func read(r io.ReadCloser, lines chan<- string) {
	var drops uint64 = 0
	var reads uint64 = 0
	go func() {
		for _ = range time.Tick(time.Second) {
			d := atomic.LoadUint64(&drops)
			r := atomic.LoadUint64(&reads)
			atomic.StoreUint64(&drops, 0)
			atomic.StoreUint64(&reads, 0)
			fmt.Fprintf(os.Stdout, "reads=%d drops=%d\n", r, d)
		}
	}()
	rdr := bufio.NewReader(r)
	for {
		line, err := rdr.ReadString('\n')
		//Drop the line if the lines buffer is full.
		//Set buffSize to reduce drops.
		if err == nil {
			select {
			case lines <- line:
				atomic.AddUint64(&reads, 1)
			default:
				atomic.AddUint64(&drops, 1)
			}
		} else {
			r.Close()
			return
		}
	}
}
コード例 #3
0
ファイル: logger.go プロジェクト: christianbak/sawmill
// SendEvent queues the given event.
// The event's `Id` field will be updated with a value that can be used by
// Sync(). This value is also provided as the return value for convenience.
func (logger *Logger) SendEvent(logEvent *event.Event) uint64 {
	logEvent.Id = atomic.AddUint64(&logger.lastEventId, 1)

	logger.mutex.RLock()
	for _, eventHandlerSpec := range logger.eventHandlerMap {
		if true { //TODO make dropping configurable per-handler
			select {
			case eventHandlerSpec.eventChannel <- logEvent:
				atomic.StoreUint64(&eventHandlerSpec.lastSentEventId, logEvent.Id)
			default:
				fmt.Fprintf(os.Stderr, "Unable to send event to handler. Buffer full. handler=%s\n", eventHandlerSpec.name)
				//TODO generate an event for this, but put in a time-last-dropped so we don't send the message to the handler which is dropping
				// basically if we are dropping, and we last dropped < X seconds ago, don't generate another "event dropped" message
			}
		} else {
			eventHandlerSpec.eventChannel <- logEvent
			atomic.StoreUint64(&eventHandlerSpec.lastSentEventId, logEvent.Id)
		}
	}
	logger.mutex.RUnlock()

	if logger.GetSync() {
		logger.Sync(logEvent.Id)
	}

	return logEvent.Id
}
コード例 #4
0
// InitialState implements the raft.Storage interface.
func (r *Range) InitialState() (raftpb.HardState, raftpb.ConfState, error) {
	var hs raftpb.HardState
	found, err := engine.MVCCGetProto(r.rm.Engine(), keys.RaftHardStateKey(r.Desc().RaftID),
		proto.ZeroTimestamp, true, nil, &hs)
	if err != nil {
		return raftpb.HardState{}, raftpb.ConfState{}, err
	}
	if !found {
		// We don't have a saved HardState, so set up the defaults.
		if r.isInitialized() {
			// Set the initial log term.
			hs.Term = raftInitialLogTerm
			hs.Commit = raftInitialLogIndex

			atomic.StoreUint64(&r.lastIndex, raftInitialLogIndex)
		} else {
			// This is a new range we are receiving from another node. Start
			// from zero so we will receive a snapshot.
			atomic.StoreUint64(&r.lastIndex, 0)
		}
	}

	var cs raftpb.ConfState
	// For uninitalized ranges, membership is unknown at this point.
	if found || r.isInitialized() {
		for _, rep := range r.Desc().Replicas {
			cs.Nodes = append(cs.Nodes, uint64(proto.MakeRaftNodeID(rep.NodeID, rep.StoreID)))
		}
	}

	return hs, cs, nil
}
コード例 #5
0
ファイル: server.go プロジェクト: robszumski/etcd
// apply takes entries received from Raft (after it has been committed) and
// applies them to the current state of the EtcdServer.
// The given entries should not be empty.
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint64, bool) {
	var applied uint64
	for i := range es {
		e := es[i]
		switch e.Type {
		case raftpb.EntryNormal:
			var r pb.Request
			pbutil.MustUnmarshal(&r, e.Data)
			s.w.Trigger(r.ID, s.applyRequest(r))
		case raftpb.EntryConfChange:
			var cc raftpb.ConfChange
			pbutil.MustUnmarshal(&cc, e.Data)
			shouldstop, err := s.applyConfChange(cc, confState)
			s.w.Trigger(cc.ID, err)
			if shouldstop {
				return applied, true
			}
		default:
			log.Panicf("entry type should be either EntryNormal or EntryConfChange")
		}
		atomic.StoreUint64(&s.raftIndex, e.Index)
		atomic.StoreUint64(&s.raftTerm, e.Term)
		applied = e.Index
	}
	return applied, false
}
コード例 #6
0
ファイル: range.go プロジェクト: huaxling/cockroach
// NewRange initializes the range using the given metadata.
func NewRange(desc *proto.RangeDescriptor, rm rangeManager) (*Range, error) {
	r := &Range{
		rm:          rm,
		cmdQ:        NewCommandQueue(),
		tsCache:     NewTimestampCache(rm.Clock()),
		respCache:   NewResponseCache(desc.RaftID, rm.Engine()),
		pendingCmds: map[cmdIDKey]*pendingCmd{},
	}
	r.setDescWithoutProcessUpdate(desc)

	lastIndex, err := r.loadLastIndex()
	if err != nil {
		return nil, err
	}
	atomic.StoreUint64(&r.lastIndex, lastIndex)

	appliedIndex, err := r.loadAppliedIndex(r.rm.Engine())
	if err != nil {
		return nil, err
	}
	atomic.StoreUint64(&r.appliedIndex, appliedIndex)

	lease, err := loadLeaderLease(r.rm.Engine(), desc.RaftID)
	if err != nil {
		return nil, err
	}
	atomic.StorePointer(&r.lease, unsafe.Pointer(lease))

	if r.stats, err = newRangeStats(desc.RaftID, rm.Engine()); err != nil {
		return nil, err
	}

	return r, nil
}
コード例 #7
0
ファイル: server.go プロジェクト: johnmccawley/origin
// apply takes entries received from Raft (after it has been committed) and
// applies them to the current state of the EtcdServer.
// The given entries should not be empty.
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint64, bool) {
	var applied uint64
	var shouldstop bool
	var err error
	for i := range es {
		e := es[i]
		switch e.Type {
		case raftpb.EntryNormal:
			// raft state machine may generate noop entry when leader confirmation.
			// skip it in advance to avoid some potential bug in the future
			if len(e.Data) == 0 {
				select {
				case s.forceVersionC <- struct{}{}:
				default:
				}
				break
			}
			var r pb.Request
			pbutil.MustUnmarshal(&r, e.Data)
			s.w.Trigger(r.ID, s.applyRequest(r))
		case raftpb.EntryConfChange:
			var cc raftpb.ConfChange
			pbutil.MustUnmarshal(&cc, e.Data)
			shouldstop, err = s.applyConfChange(cc, confState)
			s.w.Trigger(cc.ID, err)
		default:
			plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
		}
		atomic.StoreUint64(&s.r.index, e.Index)
		atomic.StoreUint64(&s.r.term, e.Term)
		applied = e.Index
	}
	return applied, shouldstop
}
コード例 #8
0
ファイル: conn.go プロジェクト: nordicdyno/gophers
func (c *Conn) ResetStats() {
	atomic.StoreUint64(&c.s.Recv, 0)
	atomic.StoreUint64(&c.s.Sent, 0)
	c.s.FirstRecv = time.Time{}
	c.s.FirstSend = time.Time{}
	c.s.LastRecv = time.Time{}
	c.s.LastSend = time.Time{}
	return
}
コード例 #9
0
ファイル: cache.go プロジェクト: kjplatz/vic
// Reset resets the cache
func (c *Cache) Reset() {
	c.Lock()
	defer c.Unlock()

	// drop the old map for GC and reset counters
	c.m = make(map[string]*Item, c.capacity)
	atomic.StoreUint64(&c.hits, 0)
	atomic.StoreUint64(&c.misses, 0)

}
コード例 #10
0
func (f *fakeWriter) Write(message *events.Envelope) {
	if message.GetEventType() == events.Envelope_ValueMetric {

		switch message.GetValueMetric().GetName() {
		case "Uptime":
			atomic.StoreUint64(&f.lastUptime, uint64(message.GetValueMetric().GetValue()))
		case "LinuxFileDescriptor":
			atomic.StoreUint64(&f.openFileDescriptors, uint64(message.GetValueMetric().GetValue()))
		}
	}
}
コード例 #11
0
ファイル: mrsw.go プロジェクト: catgatp/gol
// RLock readlock resurs from thread
// uses double check
func (t *TControl) RLock(threadId uint16, resursId uint64) {
	for {
		if atomic.LoadUint64(&t.writer) != resursId {
			atomic.StoreUint64(&t.readers[threadId], resursId)
			if atomic.LoadUint64(&t.writer) != resursId {
				return
			}
			atomic.StoreUint64(&t.readers[threadId], unlocked)
		}
		t.sleep()
	}
}
コード例 #12
0
ファイル: vbucket_state.go プロジェクト: scottcagno/cbgb
func (t *VBMeta) update(from *VBMeta) *VBMeta {
	t.State = parseVBState(from.State).String()
	fromCas := atomic.LoadUint64(&from.LastCas)
	if atomic.LoadUint64(&t.LastCas) < fromCas {
		atomic.StoreUint64(&t.LastCas, fromCas)
	}
	metaCas := atomic.LoadUint64(&from.MetaCas)
	if atomic.LoadUint64(&t.MetaCas) < metaCas {
		atomic.StoreUint64(&t.MetaCas, metaCas)
	}
	return t
}
コード例 #13
0
ファイル: sniffer.go プロジェクト: dilgerma/scope
func (s *Sniffer) loop(src gopacket.ZeroCopyPacketDataSource, on, off time.Duration) {
	var (
		process = uint64(1)               // initially enabled
		total   = uint64(0)               // total packets seen
		count   = uint64(0)               // count of packets captured
		packets = make(chan Packet, 1024) // decoded packets
		rpt     = report.MakeReport()     // the report we build
		turnOn  = (<-chan time.Time)(nil) // signal to start capture (initially enabled)
		turnOff = time.After(on)          // signal to stop capture
		done    = make(chan struct{})     // when src is finished, we're done too
	)

	// As a special case, if our off duty cycle is zero, i.e. 100% sample
	// rate, we simply disable the turn-off signal channel.
	if off == 0 {
		turnOff = nil
	}

	go func() {
		s.read(src, packets, &process, &total, &count)
		close(done)
	}()

	for {
		select {
		case p := <-packets:
			s.Merge(p, &rpt)

		case <-turnOn:
			atomic.StoreUint64(&process, 1) // enable packet capture
			turnOn = nil                    // disable the on switch
			turnOff = time.After(on)        // enable the off switch

		case <-turnOff:
			atomic.StoreUint64(&process, 0) // disable packet capture
			turnOn = time.After(off)        // enable the on switch
			turnOff = nil                   // disable the off switch

		case c := <-s.reports:
			rpt.Sampling.Count = atomic.LoadUint64(&count)
			rpt.Sampling.Total = atomic.LoadUint64(&total)
			interpolateCounts(rpt)
			c <- rpt
			atomic.StoreUint64(&count, 0)
			atomic.StoreUint64(&total, 0)
			rpt = report.MakeReport()

		case <-done:
			return
		}
	}
}
コード例 #14
0
ファイル: rule.go プロジェクト: Rocknrenew/filtron
func (r *Rule) Validate(ctx *fasthttp.RequestCtx, rs types.ResponseState) types.ResponseState {
	curTime := uint64(time.Now().Unix())
	if r.Limit != 0 && curTime-r.lastTick >= r.Interval {
		r.filterMatchCount = 0
		atomic.StoreUint64(&r.filterMatchCount, 0)
		atomic.StoreUint64(&r.lastTick, curTime)
		for _, a := range r.Aggregations {
			a.Lock()
			a.Values = make(map[string]uint64)
			a.Unlock()
		}
	}
	for _, t := range r.Filters {
		if _, found := t.Match(ctx); !found {
			return types.UNTOUCHED
		}
	}
	matched := false
	state := rs
	if len(r.Aggregations) == 0 {
		atomic.AddUint64(&r.filterMatchCount, 1)
		if r.filterMatchCount > r.Limit {
			matched = true
		}
	} else {
		for _, a := range r.Aggregations {
			if a.Get(ctx) > r.Limit {
				matched = true
			}
		}
	}
	if matched {
		atomic.AddUint64(&r.MatchCount, 1)
		for _, a := range r.Actions {
			// Skip serving actions if we already had one
			s := a.GetResponseState()
			if state == types.SERVED && s == types.SERVED {
				continue
			}
			err := a.Act(r.Name, ctx)
			// TODO error handling
			if err != nil {
				fmt.Println("meh", err)
			}
			if s > state {
				state = s
			}
		}
	}
	validateRuleList(&r.SubRules, &state, ctx)
	return state
}
コード例 #15
0
ファイル: server.go プロジェクト: RomainVabre/origin
// apply takes entries received from Raft (after it has been committed) and
// applies them to the current state of the EtcdServer.
// The given entries should not be empty.
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint64, bool) {
	var applied uint64
	var shouldstop bool
	for i := range es {
		e := es[i]
		// set the consistent index of current executing entry
		s.consistIndex.setConsistentIndex(e.Index)
		switch e.Type {
		case raftpb.EntryNormal:
			// raft state machine may generate noop entry when leader confirmation.
			// skip it in advance to avoid some potential bug in the future
			if len(e.Data) == 0 {
				select {
				case s.forceVersionC <- struct{}{}:
				default:
				}
				break
			}

			var raftReq pb.InternalRaftRequest
			if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
				var r pb.Request
				pbutil.MustUnmarshal(&r, e.Data)
				s.w.Trigger(r.ID, s.applyRequest(r))
			} else {
				switch {
				case raftReq.V2 != nil:
					req := raftReq.V2
					s.w.Trigger(req.ID, s.applyRequest(*req))
				default:
					s.w.Trigger(raftReq.ID, s.applyV3Request(&raftReq))
				}
			}
		case raftpb.EntryConfChange:
			var cc raftpb.ConfChange
			pbutil.MustUnmarshal(&cc, e.Data)
			removedSelf, err := s.applyConfChange(cc, confState)
			shouldstop = shouldstop || removedSelf
			s.w.Trigger(cc.ID, err)
		default:
			plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
		}
		atomic.StoreUint64(&s.r.index, e.Index)
		atomic.StoreUint64(&s.r.term, e.Term)
		applied = e.Index
	}
	return applied, shouldstop
}
コード例 #16
0
ファイル: histograms.go プロジェクト: RSellathurai/rend
func extractAndReset(h *hist) *hdat {
	h.lock.Lock()

	// flip and reset the count
	h.prim, h.sec = h.sec, h.prim

	atomic.StoreUint64(&h.prim.count, 0)
	atomic.StoreUint64(&h.prim.kept, 0)
	atomic.StoreUint64(&h.prim.total, 0)
	atomic.StoreUint64(&h.prim.max, 0)
	atomic.StoreUint64(&h.prim.min, math.MaxUint64)

	h.lock.Unlock()

	return h.sec
}
コード例 #17
0
ファイル: histograms.go プロジェクト: RSellathurai/rend
func newHdat() *hdat {
	ret := &hdat{
		buf: make([]uint64, buflen+1),
	}
	atomic.StoreUint64(&ret.min, math.MaxUint64)
	return ret
}
コード例 #18
0
ファイル: hello.go プロジェクト: tussion/FrameworkBenchmarks
func mainHandler(ctx *fasthttp.RequestCtx) {
	// Performance hack for prefork mode - periodically close keepalive
	// connections for evenly distributing connections among available
	// processes.
	if *prefork {
		maxDuration := maxConnDuration + time.Millisecond*time.Duration(atomic.LoadUint64(&connDurationJitter))
		if time.Since(ctx.ConnTime()) > maxDuration {
			atomic.StoreUint64(&connDurationJitter, uint64(rand.Intn(100)))
			ctx.SetConnectionClose()
		}
	}

	path := ctx.Path()
	switch string(path) {
	case "/plaintext":
		plaintextHandler(ctx)
	case "/json":
		jsonHandler(ctx)
	case "/db":
		dbHandler(ctx)
	case "/queries":
		queriesHandler(ctx)
	case "/fortune":
		fortuneHandler(ctx)
	case "/update":
		updateHandler(ctx)
	default:
		ctx.Error("unexpected path", fasthttp.StatusBadRequest)
	}
}
コード例 #19
0
ファイル: watcher_fsevents_cgo.go プロジェクト: kaocs/notify
//export gostream
func gostream(_, ctx unsafe.Pointer, n C.size_t, paths, flags, ids uintptr) {
	const (
		offchar = unsafe.Sizeof((*C.char)(nil))
		offflag = unsafe.Sizeof(C.FSEventStreamEventFlags(0))
		offid   = unsafe.Sizeof(C.FSEventStreamEventId(0))
	)
	if n == 0 {
		return
	}
	ev := make([]FSEvent, 0, int(n))
	for i := uintptr(0); i < uintptr(n); i++ {
		switch flags := *(*uint32)(unsafe.Pointer((flags + i*offflag))); {
		case flags&uint32(FSEventsEventIdsWrapped) != 0:
			atomic.StoreUint64(&since, uint64(C.FSEventsGetCurrentEventId()))
		default:
			ev = append(ev, FSEvent{
				Path:  C.GoString(*(**C.char)(unsafe.Pointer(paths + i*offchar))),
				Flags: flags,
				ID:    *(*uint64)(unsafe.Pointer(ids + i*offid)),
			})
		}

	}
	(*(*streamFunc)(ctx))(ev)
}
コード例 #20
0
func (nca *NoCommitArbiter) Run() error {
	if err := nca.ensureStart(); err != nil {
		return err
	}
	defer nca.markStop()

	nca.offsets = make(chan int64, 256)

	// reset stat variables
	atomic.StoreUint64(&nca.processed, 0)
	nca.startTime = time.Now().Local()

	nca.markReady()

arbiterLoop:
	for {
		select {
		case <-nca.WaitForCloseChannel():
			glog.V(1).Infof("Stop event triggered [url:%s]", nca.config.Url)
			break arbiterLoop
		case offset := <-nca.offsets:
			glog.V(1).Infof("Read offset from Transporter [topic:%s][partition:%d][url:%s][offset:%d]",
				nca.manager.Topic, nca.manager.Partition, nca.config.Url, offset)
			atomic.AddUint64(&nca.processed, 1)
		}
	}

	return nil
}
コード例 #21
0
// append the given entries to the raft log.
func (r *Replica) append(batch engine.Engine, entries []raftpb.Entry) error {
	if len(entries) == 0 {
		return nil
	}
	for _, ent := range entries {
		err := engine.MVCCPutProto(batch, nil, keys.RaftLogKey(r.RangeID, ent.Index),
			roachpb.ZeroTimestamp, nil, &ent)
		if err != nil {
			return err
		}
	}
	lastIndex := entries[len(entries)-1].Index
	prevLastIndex := atomic.LoadUint64(&r.lastIndex)
	// Delete any previously appended log entries which never committed.
	for i := lastIndex + 1; i <= prevLastIndex; i++ {
		err := engine.MVCCDelete(batch, nil,
			keys.RaftLogKey(r.RangeID, i), roachpb.ZeroTimestamp, nil)
		if err != nil {
			return err
		}
	}

	// Commit the batch and update the last index.
	if err := setLastIndex(batch, r.RangeID, lastIndex); err != nil {
		return err
	}
	batch.Defer(func() {
		atomic.StoreUint64(&r.lastIndex, lastIndex)
	})

	return nil
}
コード例 #22
0
ファイル: ring.go プロジェクト: amareshp/go-datastructures
// Get will return the next item in the queue.  This call will block
// if the queue is empty.  This call will unblock when an item is added
// to the queue or Dispose is called on the queue.  An error will be returned
// if the queue is disposed.
func (rb *RingBuffer) Get() (interface{}, error) {
	var n *node
	pos := atomic.LoadUint64(&rb.dequeue)
	i := 0
L:
	for {
		if atomic.LoadUint64(&rb.disposed) == 1 {
			return nil, ErrDisposed
		}

		n = rb.nodes[pos&rb.mask]
		seq := atomic.LoadUint64(&n.position)
		switch dif := seq - (pos + 1); {
		case dif == 0:
			if atomic.CompareAndSwapUint64(&rb.dequeue, pos, pos+1) {
				break L
			}
		case dif < 0:
			panic(`Ring buffer in compromised state during a get operation.`)
		default:
			pos = atomic.LoadUint64(&rb.dequeue)
		}

		if i == 10000 {
			runtime.Gosched() // free up the cpu before the next iteration
			i = 0
		} else {
			i++
		}
	}
	data := n.data
	n.data = nil
	atomic.StoreUint64(&n.position, pos+rb.mask+1)
	return data, nil
}
コード例 #23
0
// Dequeue removes and returns the `oldest` element from the ring buffer
// It also returns true if the operation is successful, false otherwise
// It blocks on an empty queue
func (rb *RingBuffer) Dequeue() (data interface{}, b bool) {
	var cell *ring_cell
	pos := atomic.LoadUint64(&rb.dequeue_pos_)
	i := 0
Loop:
	for {
		cell = rb.buffer_[pos&rb.buffer_mask_]
		seq := atomic.LoadUint64(&cell.sequence_)
		switch dif := seq - pos - 1; {
		case dif == 0:
			if atomic.CompareAndSwapUint64(&rb.dequeue_pos_, pos, pos+1) {
				break Loop
			}
		case dif < 0:
			return nil, false
		default:
			pos = atomic.LoadUint64(&rb.dequeue_pos_)
		}
		// freeup the cpu
		if i >= freeup_threshold {
			runtime.Gosched()
			i = 0
		} else {
			i++
		}
	}
	data = cell.data_
	atomic.StoreUint64(&cell.sequence_, pos+rb.buffer_mask_+1)
	b = true
	return data, b
}
コード例 #24
0
// Put adds the provided item to the queue.  If the queue is full, this
// call will block until an item is added to the queue or Dispose is called
// on the queue.  An error will be returned if the queue is disposed.
func (rb *RingBuffer) Put(item interface{}) error {
	var n *node
	pos := atomic.LoadUint64(&rb.queue)
	i := 0
L:
	for {
		if atomic.LoadUint64(&rb.disposed) == 1 {
			return disposedError
		}

		n = rb.nodes[pos&rb.mask]
		seq := atomic.LoadUint64(&n.position)
		switch dif := seq - pos; {
		case dif == 0:
			if atomic.CompareAndSwapUint64(&rb.queue, pos, pos+1) {
				break L
			}
		case dif < 0:
			panic(`Ring buffer in a compromised state during a put operation.`)
		default:
			pos = atomic.LoadUint64(&rb.queue)
		}

		if i == 10000 {
			runtime.Gosched() // free up the cpu before the next iteration
			i = 0
		} else {
			i++
		}
	}

	n.data = item
	atomic.StoreUint64(&n.position, pos+1)
	return nil
}
コード例 #25
0
ファイル: counter.go プロジェクト: lyuyun/loggregator
func (c *Counter) Reset() {
	c.lock.Lock()
	defer c.lock.Unlock()

	atomic.AddUint64(&c.total, atomic.LoadUint64(&c.value))
	atomic.StoreUint64(&c.value, 0)
}
コード例 #26
0
ファイル: gauges.go プロジェクト: Netflix/rend
// SetFloatGauge sets a gauge by the ID returned from AddFloatGauge to the value given.
func SetFloatGauge(id uint32, value float64) {
	// The float64 value needs to be converted into an int64 here because
	// there is no atomic store for float values. This is a literal
	// reinterpretation of the same exact bits.
	v2 := math.Float64bits(value)
	atomic.StoreUint64(&floatgauges[id], v2)
}
コード例 #27
0
// Enqueue adds a new element to the tail of the ring buffer
// It returns true if the operation is successful, false otherwise
// It blocks on a full queue
func (rb *RingBuffer) Enqueue(data interface{}) bool {
	var cell *ring_cell
	pos := atomic.LoadUint64(&rb.enqueue_pos_)
	i := 0
Loop:
	for {
		cell = rb.buffer_[pos&rb.buffer_mask_]
		seq := atomic.LoadUint64(&cell.sequence_)
		switch dif := seq - pos; {
		case dif == 0:
			if atomic.CompareAndSwapUint64(&rb.enqueue_pos_, pos, pos+1) {
				break Loop
			}
		case dif < 0:
			return false
		default:
			pos = atomic.LoadUint64(&rb.enqueue_pos_)
		}
		// freeup the cpu
		if i >= freeup_threshold {
			runtime.Gosched()
			i = 0
		} else {
			i++
		}
	}

	cell.data_ = data
	atomic.StoreUint64(&cell.sequence_, pos+1)
	return true
}
コード例 #28
0
ファイル: database.go プロジェクト: upper/db
func newTxID() uint64 {
	if atomic.LoadUint64(&lastTxID) == math.MaxUint64 {
		atomic.StoreUint64(&lastTxID, 0)
		return 0
	}
	return atomic.AddUint64(&lastTxID, 1)
}
コード例 #29
0
ファイル: cache.go プロジェクト: li-ang/influxdb
// Snapshot will take a snapshot of the current cache, add it to the slice of caches that
// are being flushed, and reset the current cache with new values
func (c *Cache) Snapshot() (*Cache, error) {
	c.mu.Lock()
	defer c.mu.Unlock()

	if c.snapshotting {
		return nil, ErrSnapshotInProgress
	}

	c.snapshotting = true
	c.snapshotAttempts++ // increment the number of times we tried to do this

	// If no snapshot exists, create a new one, otherwise update the existing snapshot
	if c.snapshot == nil {
		store, err := newring(ringShards)
		if err != nil {
			return nil, err
		}

		c.snapshot = &Cache{
			store: store,
		}
	}

	// Append the current cache values to the snapshot. Because we're accessing
	// the Cache we need to call f on each partition in serial.
	if err := c.store.applySerial(func(k string, e *entry) error {
		e.mu.RLock()
		defer e.mu.RUnlock()
		snapshotEntry, ok := c.snapshot.store.entry(k)
		if ok {
			if err := snapshotEntry.add(e.values); err != nil {
				return err
			}
		} else {
			c.snapshot.store.add(k, e)
			snapshotEntry = e
		}
		c.snapshotSize += uint64(Values(e.values).Size())
		if e.needSort {
			snapshotEntry.needSort = true
		}
		return nil
	}); err != nil {
		return nil, err
	}

	snapshotSize := c.Size() // record the number of bytes written into a snapshot

	// Reset the cache's store.
	c.store.reset()
	atomic.StoreUint64(&c.size, 0)
	c.lastSnapshot = time.Now()

	c.updateMemSize(-int64(snapshotSize)) // decrement the number of bytes in cache
	c.updateCachedBytes(snapshotSize)     // increment the number of bytes added to the snapshot
	c.updateSnapshots()

	return c.snapshot, nil
}
コード例 #30
0
ファイル: control.go プロジェクト: tnextday/seaweedfs
func (c *controlConn) reconnect(refreshring bool) {
	if !atomic.CompareAndSwapUint64(&c.connecting, 0, 1) {
		return
	}

	success := false
	defer func() {
		// debounce reconnect a little
		if success {
			go func() {
				time.Sleep(500 * time.Millisecond)
				atomic.StoreUint64(&c.connecting, 0)
			}()
		} else {
			atomic.StoreUint64(&c.connecting, 0)
		}
	}()

	oldConn := c.conn.Load().(*Conn)

	// TODO: should have our own roundrobbin for hosts so that we can try each
	// in succession and guantee that we get a different host each time.
	host, conn := c.session.pool.Pick(nil)
	if conn == nil {
		return
	}

	newConn, err := Connect(conn.addr, conn.cfg, c, c.session)
	if err != nil {
		host.Mark(err)
		// TODO: add log handler for things like this
		return
	}

	host.Mark(nil)
	c.conn.Store(newConn)
	success = true

	if oldConn != nil {
		oldConn.Close()
	}

	if refreshring && c.session.cfg.DiscoverHosts {
		c.session.hostSource.refreshRing()
	}
}