func (l Lease) removeFrom(b backend.Backend) { key := int64ToBytes(int64(l.ID)) b.BatchTx().Lock() b.BatchTx().UnsafeDelete(leaseBucketName, key) b.BatchTx().Unlock() }
func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) { ckey := backendClusterVersionKey() tx := be.BatchTx() tx.Lock() defer tx.Unlock() tx.UnsafePut(clusterBucketName, ckey, []byte(ver.String())) }
func mustCreateBackendBuckets(be backend.Backend) { tx := be.BatchTx() tx.Lock() defer tx.Unlock() tx.UnsafeCreateBucket(membersBucketName) tx.UnsafeCreateBucket(membersRemovedBuckedName) tx.UnsafeCreateBucket(clusterBucketName) }
func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) { mkey := backendMemberKey(id) tx := be.BatchTx() tx.Lock() tx.UnsafeDelete(membersBucketName, mkey) tx.UnsafePut(membersRemovedBuckedName, mkey, []byte("removed")) tx.Unlock() }
func mustSaveMemberToBackend(be backend.Backend, m *Member) { mkey := backendMemberKey(m.ID) mvalue, err := json.Marshal(m) if err != nil { plog.Panicf("marshal raftAttributes should never fail: %v", err) } tx := be.BatchTx() tx.Lock() tx.UnsafePut(membersBucketName, mkey, mvalue) tx.Unlock() }
func (l Lease) persistTo(b backend.Backend) { key := int64ToBytes(int64(l.ID)) lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.TTL)} val, err := lpb.Marshal() if err != nil { panic("failed to marshal lease proto item") } b.BatchTx().Lock() b.BatchTx().UnsafePut(leaseBucketName, key, val) b.BatchTx().Unlock() }
func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { ibytes := newRevBytes() revToBytes(revision{main: kv.ModRevision}, ibytes) d, err := kv.Marshal() if err != nil { plog.Fatalf("cannot marshal event: %v", err) } be.BatchTx().Lock() be.BatchTx().UnsafePut(keyBucketName, ibytes, d) be.BatchTx().Unlock() }
func NewAuthStore(be backend.Backend) *authStore { tx := be.BatchTx() tx.Lock() tx.UnsafeCreateBucket(authBucketName) tx.UnsafeCreateBucket(authUsersBucketName) tx.UnsafeCreateBucket(authRolesBucketName) tx.Unlock() be.ForceCommit() return &authStore{ be: be, } }
func NewAuthStore(be backend.Backend) *authStore { tx := be.BatchTx() tx.Lock() tx.UnsafeCreateBucket(authBucketName) tx.UnsafeCreateBucket(authUsersBucketName) tx.UnsafeCreateBucket(authRolesBucketName) tx.Unlock() be.ForceCommit() return &authStore{ be: be, simpleTokens: make(map[string]string), } }
func (as *authStore) Recover(be backend.Backend) { enabled := false as.be = be tx := be.BatchTx() tx.Lock() _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) if len(vs) == 1 { if bytes.Equal(vs[0], authEnabled) { enabled = true } } tx.Unlock() as.enabledMu.Lock() as.enabled = enabled as.enabledMu.Unlock() }
func (s *store) Restore(b backend.Backend) error { s.mu.Lock() defer s.mu.Unlock() close(s.stopc) s.fifoSched.Stop() s.b = b s.kvindex = newTreeIndex() s.currentRev = revision{main: 1} s.compactMainRev = -1 s.tx = b.BatchTx() s.txnID = -1 s.fifoSched = schedule.NewFIFOScheduler() s.stopc = make(chan struct{}) return s.restore() }
func UpdateConsistentIndex(be backend.Backend, index uint64) { tx := be.BatchTx() tx.Lock() defer tx.Unlock() var oldi uint64 _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) if len(vs) != 0 { oldi = binary.BigEndian.Uint64(vs[0]) } if index <= oldi { return } bs := make([]byte, 8) binary.BigEndian.PutUint64(bs, index) tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) }
func prepareBackend() backend.Backend { var be backend.Backend bch := make(chan struct{}) dbpath := path.Join(migrateDatadir, "member", "snap", "db") go func() { defer close(bch) be = backend.New(dbpath, time.Second, 10000) }() select { case <-bch: case <-time.After(time.Second): fmt.Fprintf(os.Stderr, "waiting for etcd to close and release its lock on %q\n", dbpath) <-bch } tx := be.BatchTx() tx.Lock() tx.UnsafeCreateBucket([]byte("key")) tx.UnsafeCreateBucket([]byte("meta")) tx.Unlock() return be }