func (node *Node) addChar(c *byte, w *wrk) *Node { *w.i = int64(*c - 'a') if *w.i < 0 || *w.i > 25 { return node } if w.tmp = (atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&(node.ptrs[*w.i]))))); w.tmp == nil { atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer((&node.ptrs[*w.i]))), w.tmp, unsafe.Pointer(newNode())) w.mn, w.mx = atomic.LoadInt64(&(node.minIdx)), atomic.LoadInt64(&(node.maxIdx)) for { switch { case w.mn > *w.i: if !atomic.CompareAndSwapInt64(&(node.minIdx), w.mn, *w.i) { w.mn = atomic.LoadInt64(&(node.minIdx)) } else { w.mn = *w.i } case w.mx < *w.i: if !atomic.CompareAndSwapInt64(&(node.maxIdx), w.mx, *w.i) { w.mx = atomic.LoadInt64(&(node.maxIdx)) } else { w.mx = *w.i } default: return node.ptrs[*w.i] } } } return node.ptrs[*w.i] }
// request a token; returns true if token obtained, false otherwise func (tbq *TBucketQ) GetTok() bool { // attempt to obtain token from bucket for { if toks := atomic.LoadInt64(&tbq.tokens); toks > 0 { if atomic.CompareAndSwapInt64(&tbq.tokens, toks, toks-1) { return true } continue } break } // no tokens in the bucket, attempt to get on the queue var done bool for !done { if qcnt := atomic.LoadInt64(&tbq.qcnt); qcnt < tbq.maxq { done = atomic.CompareAndSwapInt64(&tbq.qcnt, qcnt, qcnt+1) } else { // queue is full, return false return false } } // on queue, wait until token received <-tbq.qch return true }
// Decrement decrements the value of c by 1. func (c *Counter) Decrement() { old := atomic.LoadInt64(&c.v) swapped := atomic.CompareAndSwapInt64(&c.v, old, old-1) for !swapped { old = atomic.LoadInt64(&c.v) swapped = atomic.CompareAndSwapInt64(&c.v, old, old-1) } }
// InsertRows inserts rows into the partition. func (p *MemoryPartition) InsertRows(rows []partition.Row) error { if p.readOnly { return errors.New("partition/memory: read only") } if p.wal != nil { _, err := p.wal.Append(wal.WALEntry{ Operation: wal.OperationInsert, Rows: rows, }) if err != nil { return err } } var ( minTS int64 maxTS int64 ) for i, row := range rows { if i == 0 { minTS = row.Timestamp maxTS = row.Timestamp } if row.Timestamp < minTS { minTS = row.Timestamp } if row.Timestamp > maxTS { maxTS = row.Timestamp } source := p.getOrCreateSource(row.Source) metric := source.getOrCreateMetric(row.Metric) metric.insertPoints([]partition.Point{row.Point}) } for min := atomic.LoadInt64(&p.minTS); min > minTS; min = atomic.LoadInt64(&p.minTS) { if atomic.CompareAndSwapInt64(&p.minTS, min, minTS) { break } } for max := atomic.LoadInt64(&p.maxTS); max < maxTS; max = atomic.LoadInt64(&p.maxTS) { if atomic.CompareAndSwapInt64(&p.maxTS, max, maxTS) { break } } return nil }
// grant tokens to requests waiting in the queue or add a token to the bucket // each time the ticker goes off func (tbq *TBucketQ) tick() { for { select { case <-tbq.cch: // close event received, stop timer and return tbq.ticker.Stop() return case <-tbq.prch: // pause event received, listen for stop or resume events select { case <-tbq.cch: // close event received, stop timer and return tbq.ticker.Stop() return case <-tbq.prch: // resume event received } case <-tbq.ticker.C: // add token to queue channel if there are any requests waiting burst := tbq.burst if atomic.LoadInt64(&tbq.qcnt) > 0 { for i := int64(0); i < tbq.burst; i++ { if qcnt := atomic.LoadInt64(&tbq.qcnt); qcnt > 0 { tbq.qch <- struct{}{} atomic.AddInt64(&tbq.qcnt, -1) burst -= 1 } else { continue } } } if burst == 0 { continue } // no requests remaining in queue, attempt to add // token(s) to the bucket var done bool for !done { // add token(s) to the bucket if not already full if toks := atomic.LoadInt64(&tbq.tokens); toks < tbq.bsize { if toks+burst >= tbq.bsize { done = atomic.CompareAndSwapInt64(&tbq.tokens, toks, tbq.bsize) } else { done = atomic.CompareAndSwapInt64(&tbq.tokens, toks, toks+burst) } } else { // bucket is full, throw token(s) away done = true } } } } }
// Take attempts to take n tokens out of the bucket. // If tokens == 0, nothing will be taken. // If n <= tokens, n tokens will be taken. // If n > tokens, all tokens will be taken. // // This method is thread-safe. func (b *Bucket) Take(n int64) (taken int64) { for { if tokens := atomic.LoadInt64(&b.tokens); tokens == 0 { return 0 } else if n <= tokens { if !atomic.CompareAndSwapInt64(&b.tokens, tokens, tokens-n) { continue } return n } else if atomic.CompareAndSwapInt64(&b.tokens, tokens, 0) { // Spill return tokens } } }
// Put attempts to add n tokens to the bucket. // If tokens == capacity, nothing will be added. // If n <= capacity - tokens, n tokens will be added. // If n > capacity - tokens, capacity - tokens will be added. // // This method is thread-safe. func (b *Bucket) Put(n int64) (added int64) { for { if tokens := atomic.LoadInt64(&b.tokens); tokens == b.capacity { return 0 } else if left := b.capacity - tokens; n <= left { if !atomic.CompareAndSwapInt64(&b.tokens, tokens, tokens+n) { continue } return n } else if atomic.CompareAndSwapInt64(&b.tokens, tokens, b.capacity) { return left } } }
// Run worker. Waits for tasks on channel. When receives one tries to reserve it. If succesfull // executes the work function and signal readiness after function returned. If not able to // reserve forgets the task completely. func (w *Worker) Run() { var t *Task var yield chan int = make(chan int) for true { t = <-w.queue if t == nil { // stop on nil task break } w.ntotal += 1 if atomic.CompareAndSwapInt64(&t.workerId, 0, w.id) { go func() { t.work(yield) sendOK(t.ready) }() // wait for current task to yield <-yield w.nexec += 1 if t.next != nil { tn := t.next // schedule next task in task chain go func() { w.sched.Schedule(tn) }() t.next = nil } } } sendOK(w.sched.done) }
//增加一个异步任务 func (m *longTimeTaskOperate) addAsync() { m.Add(1) defer func() { if err := recover(); err != nil { Tlog.Warn("异步任务["+m.taskname+"]异常退出", err) Tlog.Warn(string(debug.Stack())) atomic.AddInt64(&m.curAsyncPoolNum, -1) } //Tlog.Debug("异步任务["+m.taskname+"]退出:剩余", m.curAsyncPoolNum) m.Done() }() //Tlog.Debug( "异步任务["+m.taskname+"]增加:当前数量", m.curAsyncPoolNum) for { select { case task := <-m.asyncchan: m.execTask(task) runtime.Gosched() case <-time.After(m.asyncPoolIdelTime): if m.curAsyncPoolNum > m.minAsyncPoolNum { var poolnum = m.curAsyncPoolNum if !atomic.CompareAndSwapInt64(&m.curAsyncPoolNum, poolnum, poolnum-1) { continue } //Tlog.Debug("异步任务[" + m.taskname + "]空闲退出") return } } } }
func (p *TSimpleServer) Stop() error { if atomic.CompareAndSwapInt64(&p.stopped, 0, 1) { p.quit <- struct{}{} p.serverTransport.Interrupt() } return nil }
// Accepted checks if query at this moment should be accepted or rejected. // If accepted, the EMA rate limiter updates its current EMA func (e *emaRateLimiter) Accepted() bool { now := time.Now().UnixNano() instWaiting := now - atomic.LoadInt64(&e.timeOfLastRequest) for { avgWaitingNs := atomic.LoadInt64(&e.avgWaitingNs) newavgWaitingNs := int64((1.-wq)*float64(avgWaitingNs) + wq*float64(instWaiting)) // glog.V(3).Infof("avgWaitingNs %d newavgWaitingNs %d", avgWaitingNs, newavgWaitingNs) if newavgWaitingNs < e.targetWaitingNs { atomic.AddInt64(&e.requestThrottledCount, 1) return false } // if(pendingRequests.size()>maxPendingQueueLength) { // pendingTooLongDiscarded.incrementAndGet(); // return false; // } atomic.StoreInt64(&e.timeOfLastRequest, now) newavgWaitingNs2 := newavgWaitingNs if !atomic.CompareAndSwapInt64(&e.avgWaitingNs, avgWaitingNs, newavgWaitingNs) { continue } if newavgWaitingNs2 < e.minWaitingNs { e.minWaitingNs = newavgWaitingNs2 } atomic.AddInt64(&e.requestAcceptedCount, 1) break } return true }
func Close() { rw.Lock() defer rw.Unlock() atomic.CompareAndSwapInt64(&flag, 0, 1) close(c) }
func AtomicCompareAndSwapImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) { atomicObj := Car(args) if !ObjectP(atomicObj) || ObjectType(atomicObj) != "Atomic" { err = ProcessError(fmt.Sprintf("atomic-compare-and-swap! expects an Atomic object but received %s.", ObjectType(atomicObj)), env) return } pointer := (*int64)(ObjectValue(atomicObj)) oldObj := Cadr(args) if !IntegerP(oldObj) { err = ProcessError(fmt.Sprintf("atomic-compare-and-swap! expects an Integer as its second argument but received %s.", TypeName(TypeOf(oldObj))), env) return } newObj := Caddr(args) if !IntegerP(newObj) { err = ProcessError(fmt.Sprintf("atomic-compare-and-swap! expects an Integer as its third argument but received %s.", TypeName(TypeOf(newObj))), env) return } old := IntegerValue(oldObj) new := IntegerValue(newObj) swapped := atomic.CompareAndSwapInt64(pointer, old, new) return BooleanWithValue(swapped), nil }
func (c *Client) internalConnLost(err error) { var clost = c.lost if clost == _LOSTING { WARN.Println(CLI, c.options.ClientID+":internalConnLost closing quit 1") return } if !atomic.CompareAndSwapInt64(&c.lost, clost, _LOSTING) { WARN.Println(CLI, c.options.ClientID+":internalConnLost closing quit 2") return } select { case <-c.stop: //someone else has already closed the channel, must be error default: close(c.stop) } c.conn.Close() WARN.Println(CLI, c.options.ClientID+":internalConnLost close wait") c.workers.Wait() WARN.Println(CLI, c.options.ClientID+":internalConnLost wait end closed:", c.options.AutoReconnect) if c.IsConnected() { if c.options.OnConnectionLost != nil { go c.options.OnConnectionLost(c, err) } if c.options.AutoReconnect { DEBUG.Println(CLI, c.options.ClientID+":auto reconnect") go c.reconnect() } else { c.setConnected(false) } } }
// Tries to decrease the semaphore's value by n. Panics if n is negative. If it is smaller than n, waits until it grows // large enough. If the cancel channel becomes readable before that happens, the request is cancelled. Returns true // if the semaphore was decreased, false if the operation was cancelled. func (s *Semaphore) AcquireCancellable(n int, cancel <-chan struct{}) bool { if n < 0 { panic("Semaphore.Acquire called with negative decrement") } v := atomic.LoadInt64(&s.value) for v >= int64(n) { if atomic.CompareAndSwapInt64(&s.value, v, v-int64(n)) { return true } v = atomic.LoadInt64(&s.value) } select { case <-cancel: return false case <-s.acquireMu: } defer func() { s.acquireMu <- struct{}{} }() v = atomic.AddInt64(&s.value, int64(-n)) for v < 0 { select { case <-cancel: atomic.AddInt64(&s.value, int64(n)) return false case <-s.wake: v = atomic.LoadInt64(&s.value) } } return true }
func (df *myDataFile) Read() (rsn int64, d Data, err error) { // 读取并更新读偏移量 var offset int64 for { offset = atomic.LoadInt64(&df.roffset) if atomic.CompareAndSwapInt64(&df.roffset, offset, (offset + int64(df.dataLen))) { break } } //读取一个数据块 rsn = offset / int64(df.dataLen) bytes := make([]byte, df.dataLen) df.fmutex.RLock() defer df.fmutex.RUnlock() for { _, err = df.f.ReadAt(bytes, offset) if err != nil { if err == io.EOF { df.rcond.Wait() continue } return } d = bytes return } }
func (df *myDataFile) Write(d Data) (wsn int64, err error){ //读取并更新写的偏移量 var offset int64 for { offset = atomic.LoadInt64(&df.woffset) if atomic.CompareAndSwapInt64(&df.woffset, offset, (offset + int64(df.dataLen))){ break } } //写入一个数据块,最后写入数据块的序号 wsn = offset / int64(df.dataLen) var bytes []byte if len(d) > int(df.dataLen){ bytes = d[0:df.dataLen] }else{ bytes = d } df.fmutex.Lock() defer df.fmutex.Unlock() _, err = df.f.Write(bytes) //发送通知 df.rcond.Signal() return }
func (c *counter) Value() (val int64) { val = atomic.LoadInt64(&c.val) for !atomic.CompareAndSwapInt64(&c.val, val, 0) { val = atomic.LoadInt64(&c.val) } return val }
func (f *FakeFile) Seek(offset int64, whence int) (int64, error) { ncurrent, err := f.reader().Seek(offset, whence) old := atomic.LoadInt64(&f.current) for !atomic.CompareAndSwapInt64(&f.current, old, ncurrent) { old = atomic.LoadInt64(&f.current) } return ncurrent, err }
// CompareAndSwapWithTTL atomically compares the value at key to the // old value. If it matches, it sets it to the new value and returns // true. Otherwise, it returns false. If the key does not exist in the // store, it returns false with no error. It ignores the ttl. func (ms *MemStore) CompareAndSwapWithTTL(key string, old, new int64, _ time.Duration) (bool, error) { valP, ok := ms.get(key, false) if !ok { return false, nil } return atomic.CompareAndSwapInt64(valP, old, new), nil }
func getLock(source int64) bool { for i := 0; i <= sm.config.LockRetryTimes; i++ { if atomic.CompareAndSwapInt64(&sm.state, 0, source) { return true } time.Sleep(sm.config.LockRetryInterval) } return false }
func (c *ConsulBackend) AdvertiseActive(active bool) error { c.serviceLock.Lock() defer c.serviceLock.Unlock() // Vault is still bootstrapping if c.service == nil { return nil } // Save a cached copy of the active state: no way to query Core c.active = active // Ensure serial registration to the Consul agent. Allow for // concurrent calls to update active status while a single task // attempts, until successful, to update the Consul Agent. if !c.disableRegistration && atomic.CompareAndSwapInt64(&c.registrationLock, 0, 1) { defer atomic.CompareAndSwapInt64(&c.registrationLock, 1, 0) // Retry agent registration until successful for { c.service.Tags = serviceTags(c.active) agent := c.client.Agent() err := agent.ServiceRegister(c.service) if err == nil { // Success return nil } c.logger.Printf("[WARN] consul: service registration failed: %v", err) c.serviceLock.Unlock() time.Sleep(registrationRetryInterval) c.serviceLock.Lock() if !c.running { // Shutting down return err } } } // Successful concurrent update to active state return nil }
// Close releases all resources on this subscriber func (s *Server) Close() { if atomic.CompareAndSwapInt64(&s.closed, 0, 1) { s.lck.Lock() defer s.lck.Unlock() close(s.closeSig) s.srv.Close() s.srv.CloseClientConnections() return } }
func (l *Logger) updateLastLogTs(val int64) { for { prev := atomic.LoadInt64(&l.lastLogTs) if val <= prev { return } if atomic.CompareAndSwapInt64(&l.lastLogTs, prev, val) { return } } }
func (rl *rateLimiter) Check() bool { for { if v := atomic.LoadInt64(&rl.v); v > 0 { if atomic.CompareAndSwapInt64(&rl.v, v, v-1) { return true } } else { return false } } }
func (m *Master) increaseMinWorkersTo(num int64) { x := atomic.LoadInt64(&m.minWorkers) for x < num { success := atomic.CompareAndSwapInt64(&m.minWorkers, x, num) if success { return } else { x = atomic.LoadInt64(&m.minWorkers) } } }
// Update returns true if and only if 'current' is the highest value ever seen. func (hwm *HighWaterMark) Update(current int64) bool { for { old := atomic.LoadInt64((*int64)(hwm)) if current <= old { return false } if atomic.CompareAndSwapInt64((*int64)(hwm), old, current) { return true } } }
func (c *cursor) inc() int { for { v1 := atomic.LoadInt64(&c[0]) v2 := (v1 + 1) & c[1] if atomic.CompareAndSwapInt64(&c[0], v1, v2) { return int(v2) } } }
// Reuse given file number. func (s *session) reuseFileNum(num int64) { for { old, x := s.stNextFileNum, num if old != x+1 { x = old } if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) { break } } }
func (s cursorSlice) get(val int) *cursor { v1 := int64(-1) v2 := int64(val) for { for i := range s { if atomic.CompareAndSwapInt64(&s[i][0], v1, v2) { return s[i] } } } }