// RLock readlock resurs from thread // uses double check func (t *TControl) RLock(threadId uint16, resursId string) { var wlock *string for { wlock = (*string)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&t.writer)))) if wlock == nil || *wlock != resursId { atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&t.readers[threadId])), unsafe.Pointer(&resursId)) wlock = (*string)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&t.writer)))) if wlock == nil || *wlock != resursId { return } atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&t.readers[threadId])), nil) } t.sleep() } }
func (b *Bucket) Refresh() error { pool := b.pool tmpb := &Bucket{} err := pool.client.parseURLResponse(b.URI, tmpb) if err != nil { return err } newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList)) for i := range newcps { if b.ah != nil { newcps[i] = newConnectionPool( tmpb.VBSMJson.ServerList[i], b.ah, PoolSize, PoolOverflow) } else { newcps[i] = newConnectionPool( tmpb.VBSMJson.ServerList[i], b.authHandler(), PoolSize, PoolOverflow) } } b.replaceConnPools(newcps) tmpb.ah = b.ah atomic.StorePointer(&b.vBucketServerMap, unsafe.Pointer(&tmpb.VBSMJson)) atomic.StorePointer(&b.nodeList, unsafe.Pointer(&tmpb.NodesJSON)) return nil }
func (p *partitionstore) visitItems(start []byte, withValue bool, visitor func(*item) bool) (err error) { keys, changes := p.colls() var vErr error v := func(kItem *gkvlite.Item) bool { i := (*item)(atomic.LoadPointer(&kItem.Transient)) if i != nil { return visitor(i) } var cItem *gkvlite.Item cItem, vErr = changes.GetItem(kItem.Val, true) if vErr != nil { return false } if cItem == nil { return true // TODO: track this case; might have been compacted away. } i = (*item)(atomic.LoadPointer(&cItem.Transient)) if i != nil { atomic.StorePointer(&kItem.Transient, unsafe.Pointer(i)) return visitor(i) } i = &item{key: kItem.Key} if vErr = i.fromValueBytes(cItem.Val); vErr != nil { return false } atomic.StorePointer(&cItem.Transient, unsafe.Pointer(i)) atomic.StorePointer(&kItem.Transient, unsafe.Pointer(i)) return visitor(i) } if err := p.visit(keys, start, true, v); err != nil { return err } return vErr }
// Set background error. func (d *DB) seterr(err error) { if err == nil { atomic.StorePointer(&d.err, nil) } else { atomic.StorePointer(&d.err, unsafe.Pointer(&errWrap{err})) } }
func (nloc *nodeLoc) Copy(src *nodeLoc) *nodeLoc { if src == nil { return nloc.Copy(empty_nodeLoc) } atomic.StorePointer(&nloc.loc, unsafe.Pointer(src.Loc())) atomic.StorePointer(&nloc.node, unsafe.Pointer(src.Node())) return nloc }
func (i *itemLoc) Copy(src *itemLoc) { if src == nil { i.Copy(empty_itemLoc) return } atomic.StorePointer(&i.loc, unsafe.Pointer(src.Loc())) atomic.StorePointer(&i.item, unsafe.Pointer(src.Item())) }
func (p *partitionstore) collsPauseSwap(cb func() (keys, changes *gkvlite.Collection)) { p.lock.Lock() defer p.lock.Unlock() k, c := cb() // Update the changes first, so that readers see a key index that's older. atomic.StorePointer(&p.changes, unsafe.Pointer(c)) atomic.StorePointer(&p.keys, unsafe.Pointer(k)) }
func TestAtomicPointer() { b1 := &B{ B1: 3, B2: "33", } atomic.StorePointer(&bp, unsafe.Pointer(b1)) fmt.Println(*((*B)(bp))) // {3 33} // atomic.StorePointer(&unsafe.Pointer(b), unsafe.Pointer(b1)) // yonka_test\test_json.go:1472: cannot take the address of unsafe.Pointer(b) b := &B{1, "11"} p := unsafe.Pointer(b) atomic.StorePointer(&p, unsafe.Pointer(b1)) fmt.Println(*((*B)(p))) // {3 33} fmt.Println(*b) // {1 11} }
func (db *DB) Preparex(query string) (stmt Stmt, err error) { var m stmtCache if p := (*stmtCache)(atomic.LoadPointer(&db.stmtCachePtr)); p != nil { m = *p if stmt = m[query]; stmt.Stmt != nil { return } } db.stmtCachePtrMutex.Lock() defer db.stmtCachePtrMutex.Unlock() if p := (*stmtCache)(atomic.LoadPointer(&db.stmtCachePtr)); p != nil { m = *p if stmt = m[query]; stmt.Stmt != nil { return } } stmtx, err := db.DB.Preparex(query) if err != nil { return } stmt = Stmt{Stmt: stmtx} m2 := make(stmtCache, len(m)+1) for k, v := range m { m2[k] = v } m2[query] = stmt atomic.StorePointer(&db.stmtCachePtr, unsafe.Pointer(&m2)) return }
func (nloc *nodeLoc) write(o *Store) error { if nloc != nil && nloc.Loc().isEmpty() { node := nloc.Node() if node == nil { return nil } offset := atomic.LoadInt64(&o.size) length := ploc_length + ploc_length + ploc_length + 8 + 8 b := make([]byte, length) pos := 0 pos = node.item.Loc().write(b, pos) pos = node.left.Loc().write(b, pos) pos = node.right.Loc().write(b, pos) binary.BigEndian.PutUint64(b[pos:pos+8], node.numNodes) pos += 8 binary.BigEndian.PutUint64(b[pos:pos+8], node.numBytes) pos += 8 if pos != length { return fmt.Errorf("nodeLoc.write() pos: %v didn't match length: %v", pos, length) } if _, err := o.file.WriteAt(b, offset); err != nil { return err } atomic.StoreInt64(&o.size, offset+int64(length)) atomic.StorePointer(&nloc.loc, unsafe.Pointer(&ploc{Offset: offset, Length: uint32(length)})) } return nil }
func ParseConfig(cfg string) { if cfg == "" { log.Fatalln("config file not specified: use -c $filename") } if !file.IsExist(cfg) { log.Fatalln("config file specified not found:", cfg) } ConfigFile = cfg configContent, err := file.ToTrimString(cfg) if err != nil { log.Fatalln("read config file", cfg, "error:", err.Error()) } var c GlobalConfig err = json.Unmarshal([]byte(configContent), &c) if err != nil { log.Fatalln("parse config file", cfg, "error:", err.Error()) } if c.Migrate.Enabled && len(c.Migrate.Cluster) == 0 { c.Migrate.Enabled = false } // set config atomic.StorePointer(&ptr, unsafe.Pointer(&c)) log.Println("g.ParseConfig ok, file", cfg) }
func ReadFirst(b []byte, v interface{}) (read bool, total int, err error) { var t unsafe.Pointer var r *[2]marshal.Reader if t = readerCache; t != nil { if atomic.CompareAndSwapPointer(&readerCache, t, nil) { r = (*[2]marshal.Reader)(t) *r = [2]marshal.Reader{{Body: b}, {}} goto Got } } r = &[2]marshal.Reader{{Body: b}, {}} Got: total = r[0].IntUint32() if total > 0 { sz := r[0].IntUint32() if r[0].Err == nil { r[1].Body = r[0].Slice(sz + 4) err = ReadRawTuple(&r[1], v) read = true } else { err = r[0].Err } } atomic.StorePointer(&readerCache, unsafe.Pointer(r)) return }
func (m *SharedMap) SetValue(key KeyType, value ValType) { m.writeLock.Lock() defer m.writeLock.Unlock() newCurrent := m.Copy() (*newCurrent)[key] = value atomic.StorePointer(&m.current, unsafe.Pointer(newCurrent)) }
// Reload certificate func (c *certificate) reload() error { keystoreBytes, err := ioutil.ReadFile(c.keystorePath) if err != nil { return err } pemBlocks, err := pkcs12.ToPEM(keystoreBytes, c.keystorePass) if err != nil { return err } var pemBytes []byte for _, block := range pemBlocks { pemBytes = append(pemBytes, pem.EncodeToMemory(block)...) } certAndKey, err := tls.X509KeyPair(pemBytes, pemBytes) if err != nil { return err } certAndKey.Leaf, err = x509.ParseCertificate(certAndKey.Certificate[0]) if err != nil { return err } atomic.StorePointer(&c.cached, unsafe.Pointer(&certAndKey)) return nil }
// helper function for subdivide() // // places all points in the tree in the appropriate quadrant, // and clears the points of this tree. func (q *LockfreeQuadtree) disperse2() { for { oldPoints := (*PointList)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&q.Points)))) if oldPoints == nil || oldPoints.Length == 0 { break } newPoints := *oldPoints p := *newPoints.First.Point newPoints.First = newPoints.First.Next newPoints.Length-- ok := atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&q.Points)), unsafe.Pointer(oldPoints), unsafe.Pointer(&newPoints)) if !ok { continue } ok = q.Nw.Insert(&p) || q.Ne.Insert(&p) || q.Sw.Insert(&p) || q.Se.Insert(&p) // debug if !ok { panic("quadtree contained a point outside boundary") } } // we don't need to compare. We know it needs set at nil now; if someone else set it first, setting again doesn't hurt. // this does need to be atomic, however. Else, Query() might read a pointer which was half-set to nil atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&q.Points)), nil) }
func (s *bucketstore) compactSwapFile(bsf *bucketstorefile, compactPath string) error { idx, ver, err := parseStoreFileName(filepath.Base(bsf.path), STORE_FILE_SUFFIX) if err != nil { return err } nextName := makeStoreFileName(idx, ver+1, STORE_FILE_SUFFIX) nextPath := path.Join(filepath.Dir(bsf.path), nextName) if err = os.Rename(compactPath, nextPath); err != nil { return err } nextFile, err := fileService.OpenFile(nextPath, os.O_RDWR|os.O_CREATE) if err != nil { return err } nextBSF := NewBucketStoreFile(nextPath, nextFile, bsf.stats) nextStore, err := gkvlite.NewStore(nextBSF) if err != nil { // TODO: Rollback the previous *.orig rename. return err } nextBSF.store = nextStore atomic.StorePointer(&s.bsf, unsafe.Pointer(nextBSF)) bsf.apply(func() { bsf.purge = true // Mark the old file as purgable. }) return nil }
// Create new memdb and froze the old one; need external synchronization. func (d *DB) newMem() (m *memdb.DB, err error) { s := d.s num := s.allocFileNum() w, err := newJournalWriter(s.getJournalFile(num)) if err != nil { s.reuseFileNum(num) return } old := d.journal d.journal = w if old != nil { old.close() d.fjournal = old } d.fseq = d.seq m = memdb.New(s.cmp) mem := &memSet{cur: m} if old := d.getMem_NB(); old != nil { mem.froze = old.cur } atomic.StorePointer(&d.mem, unsafe.Pointer(mem)) return }
// Commit is used to finalize this transaction. // This is a noop for read transactions. func (txn *Txn) Commit() { // Noop for a read transaction if !txn.write { return } // Check if already aborted or committed if txn.rootTxn == nil { return } // Commit each sub-transaction scoped to (table, index) for key, subTxn := range txn.modified { path := indexPath(key.Table, key.Index) final := subTxn.Commit() txn.rootTxn.Insert(path, final) } // Update the root of the DB newRoot := txn.rootTxn.Commit() atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot)) // Clear the txn txn.rootTxn = nil txn.modified = nil // Release the writer lock since this is invalid txn.db.writer.Unlock() // Run the deferred functions, if any for i := len(txn.after); i > 0; i-- { fn := txn.after[i-1] fn() } }
// Truncate drops all data in BoundedTable. func (t *BoundedTable) Truncate() { // just reset everything. for i := int64(0); i < t.capacity; i++ { atomic.StorePointer(&t.records[i], unsafe.Pointer(nil)) } t.cursor = 0 }
// NewRange initializes the range using the given metadata. func NewRange(desc *proto.RangeDescriptor, rm rangeManager) (*Range, error) { r := &Range{ rm: rm, cmdQ: NewCommandQueue(), tsCache: NewTimestampCache(rm.Clock()), respCache: NewResponseCache(desc.RaftID, rm.Engine()), pendingCmds: map[cmdIDKey]*pendingCmd{}, } r.setDescWithoutProcessUpdate(desc) lastIndex, err := r.loadLastIndex() if err != nil { return nil, err } atomic.StoreUint64(&r.lastIndex, lastIndex) appliedIndex, err := r.loadAppliedIndex(r.rm.Engine()) if err != nil { return nil, err } atomic.StoreUint64(&r.appliedIndex, appliedIndex) lease, err := loadLeaderLease(r.rm.Engine(), desc.RaftID) if err != nil { return nil, err } atomic.StorePointer(&r.lease, unsafe.Pointer(lease)) if r.stats, err = newRangeStats(desc.RaftID, rm.Engine()); err != nil { return nil, err } return r, nil }
// copyToGen returns a copy of this I-node copied to the given generation. func (i *iNode) copyToGen(gen *generation, ctrie *ctrie) *iNode { nin := &iNode{gen: gen} main := gcasRead(i, ctrie) atomic.StorePointer( (*unsafe.Pointer)(unsafe.Pointer(&nin.main)), unsafe.Pointer(main)) return nin }
// Load loads DLL file d.Name into memory. It returns an error if fails. // Load will not try to load DLL, if it is already loaded into memory. func (d *LazyDLL) Load() error { // Non-racy version of: // if d.dll != nil { if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil { return nil } d.mu.Lock() defer d.mu.Unlock() if d.dll != nil { return nil } // kernel32.dll is special, since it's where LoadLibraryEx comes from. // The kernel already special-cases its name, so it's always // loaded from system32. var dll *DLL var err error if d.Name == "kernel32.dll" { dll, err = LoadDLL(d.Name) } else { dll, err = loadLibraryEx(d.Name, d.System) } if err != nil { return err } // Non-racy version of: // d.dll = dll atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) return nil }
func (v *VBucket) setVBMeta(newMeta *VBMeta) (err error) { // This should only be called when holding the bucketstore // service/apply "lock", to ensure a Flush between changes stream // update and COLL_VBMETA update is atomic. var j []byte j, err = json.Marshal(newMeta) if err != nil { return err } k := []byte(fmt.Sprintf("%d", v.vbid)) i := &item{ key: nil, // A nil key means it's a VBMeta change. cas: newMeta.MetaCas, data: j, } deltaItemBytes, err := v.ps.set(i, nil) if err != nil { return err } if err = v.bs.collMeta(COLL_VBMETA).Set(k, j); err != nil { return err } atomic.StorePointer(&v.meta, unsafe.Pointer(newMeta)) atomic.AddInt64(&v.stats.ItemBytes, deltaItemBytes) atomic.AddInt64(v.bucketItemBytes, deltaItemBytes) return nil }
// updateTicket 从微信服务器获取新的 jsapi_ticket 并存入缓存, 同时返回该 jsapi_ticket. func (srv *DefaultTicketServer) updateTicket(currentTicket string) (ticket *jsapiTicket, cached bool, err error) { if currentTicket != "" { if p := (*jsapiTicket)(atomic.LoadPointer(&srv.ticketCache)); p != nil && currentTicket != p.Ticket { return p, true, nil // 无需更改 p.ExpiresIn 参数值, cached == true 时用不到 } } var incompleteURL = "https://api.weixin.qq.com/cgi-bin/ticket/getticket?type=jsapi&access_token=" var result struct { core.Error jsapiTicket } if err = srv.coreClient.GetJSON(incompleteURL, &result); err != nil { atomic.StorePointer(&srv.ticketCache, nil) return } if result.ErrCode != core.ErrCodeOK { atomic.StorePointer(&srv.ticketCache, nil) err = &result.Error return } // 由于网络的延时, jsapi_ticket 过期时间留有一个缓冲区 switch { case result.ExpiresIn > 31556952: // 60*60*24*365.2425 atomic.StorePointer(&srv.ticketCache, nil) err = errors.New("expires_in too large: " + strconv.FormatInt(result.ExpiresIn, 10)) return case result.ExpiresIn > 60*60: result.ExpiresIn -= 60 * 10 case result.ExpiresIn > 60*30: result.ExpiresIn -= 60 * 5 case result.ExpiresIn > 60*5: result.ExpiresIn -= 60 case result.ExpiresIn > 60: result.ExpiresIn -= 10 default: atomic.StorePointer(&srv.ticketCache, nil) err = errors.New("expires_in too small: " + strconv.FormatInt(result.ExpiresIn, 10)) return } ticketCopy := result.jsapiTicket atomic.StorePointer(&srv.ticketCache, unsafe.Pointer(&ticketCopy)) ticket = &ticketCopy return }
func (s *Server) updateNow() { tc := calculateTimeCounter(true) // write atomically atomic.StorePointer(&s.tcPool, unsafe.Pointer(&tc)) if log.V(4) { log.Infoln("updateTimeCounterThread", len(tc)) } }
// Unmarshals JSON representation of root node file location. func (t *Collection) UnmarshalJSON(d []byte) error { p := ploc{} if err := json.Unmarshal(d, &p); err != nil { return err } atomic.StorePointer(&t.root, unsafe.Pointer(&nodeLoc{loc: unsafe.Pointer(&p)})) return nil }
// NewDistSender returns a batch.Sender instance which connects to the // Cockroach cluster via the supplied gossip instance. Supplying a // DistSenderContext or the fields within is optional. For omitted values, sane // defaults will be used. func NewDistSender(ctx *DistSenderContext, gossip *gossip.Gossip) *DistSender { if ctx == nil { ctx = &DistSenderContext{} } clock := ctx.Clock if clock == nil { clock = hlc.NewClock(hlc.UnixNano) } ds := &DistSender{ clock: clock, gossip: gossip, } if ctx.nodeDescriptor != nil { atomic.StorePointer(&ds.nodeDescriptor, unsafe.Pointer(ctx.nodeDescriptor)) } rcSize := ctx.RangeDescriptorCacheSize if rcSize <= 0 { rcSize = defaultRangeDescriptorCacheSize } rdb := ctx.RangeDescriptorDB if rdb == nil { rdb = ds } ds.rangeCache = newRangeDescriptorCache(rdb, int(rcSize)) lcSize := ctx.LeaderCacheSize if lcSize <= 0 { lcSize = defaultLeaderCacheSize } ds.leaderCache = newLeaderCache(int(lcSize)) if ctx.RangeLookupMaxRanges <= 0 { ds.rangeLookupMaxRanges = defaultRangeLookupMaxRanges } if ctx.TransportFactory != nil { ds.transportFactory = ctx.TransportFactory } ds.rpcRetryOptions = base.DefaultRetryOptions() if ctx.RPCRetryOptions != nil { ds.rpcRetryOptions = *ctx.RPCRetryOptions } if ctx.RPCContext != nil { ds.rpcContext = ctx.RPCContext if ds.rpcRetryOptions.Closer == nil { ds.rpcRetryOptions.Closer = ds.rpcContext.Stopper.ShouldDrain() } } if ctx.Tracer != nil { ds.Tracer = ctx.Tracer } else { ds.Tracer = tracing.NewTracer() } if ctx.SendNextTimeout != 0 { ds.sendNextTimeout = ctx.SendNextTimeout } else { ds.sendNextTimeout = defaultSendNextTimeout } return ds }
func (s *Server) reloadConf() error { if cfg, err := readConfig(s.conffile); err != nil { return err } else { atomic.StorePointer(&s.conf, unsafe.Pointer(cfg)) log.Printf("reload config file ok") return nil } }
// Atomically update the service_roots field. Enables you to update // service_roots without disrupting any GET or PUT operations that might // already be in progress. func (this *KeepClient) SetServiceRoots(svc []string) { // Must be sorted for ShuffledServiceRoots() to produce consistent // results. roots := make([]string, len(svc)) copy(roots, svc) sort.Strings(roots) atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&this.service_roots)), unsafe.Pointer(&roots)) }
func (s *Stack) tryPushExchanger(v interface{}) bool { // TODO time out slotptr := &s.exchangers[rand.Intn(nExchangers)] for i := 0; i < 100; i++ { oldSlot := atomic.LoadPointer(slotptr) switch (*exchanger)(oldSlot).state { case stateFree: newSlot := unsafe.Pointer(&exchanger{ value: v, state: statePushing, }) if !atomic.CompareAndSwapPointer(slotptr, oldSlot, newSlot) { break } for j := 0; j < 100; j++ { currentSlot := atomic.LoadPointer(slotptr) if (*exchanger)(currentSlot).state != stateExchanging { continue } atomic.StorePointer(slotptr, unsafe.Pointer(newExchanger())) return true } if !atomic.CompareAndSwapPointer(slotptr, newSlot, oldSlot) { atomic.StorePointer(slotptr, unsafe.Pointer(newExchanger())) return true } return false case statePushing: return false case statePoping: newSlot := unsafe.Pointer(&exchanger{ value: v, state: stateExchanging, }) if !atomic.CompareAndSwapPointer(slotptr, oldSlot, newSlot) { break } return true case stateExchanging: return false } } return false }