func ExampleLease_create() { cli, err := clientv3.New(clientv3.Config{ Endpoints: endpoints, DialTimeout: dialTimeout, }) if err != nil { log.Fatal(err) } defer cli.Close() kvc := clientv3.NewKV(cli) lapi := clientv3.NewLease(cli) defer lapi.Close() // minimum lease TTL is 5-second resp, err := lapi.Create(context.TODO(), 5) if err != nil { log.Fatal(err) } // after 5 seconds, the key 'foo' will be removed _, err = kvc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID))) if err != nil { log.Fatal(err) } }
func ExampleLease_keepAlive() { cli, err := clientv3.New(clientv3.Config{ Endpoints: endpoints, DialTimeout: dialTimeout, }) if err != nil { log.Fatal(err) } defer cli.Close() kvc := clientv3.NewKV(cli) lapi := clientv3.NewLease(cli) defer lapi.Close() resp, err := lapi.Create(context.TODO(), 5) if err != nil { log.Fatal(err) } _, err = kvc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID))) if err != nil { log.Fatal(err) } // the key 'foo' will be kept forever _, err = lapi.KeepAlive(context.TODO(), lease.LeaseID(resp.ID)) if err != nil { log.Fatal(err) } }
func TestLeaseRevoke(t *testing.T) { defer testutil.AfterTest(t) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clientv3.NewLease(clus.RandClient()) defer lapi.Close() kv := clientv3.NewKV(clus.RandClient()) resp, err := lapi.Create(context.Background(), 10) if err != nil { t.Errorf("failed to create lease %v", err) } _, err = lapi.Revoke(context.Background(), lease.LeaseID(resp.ID)) if err != nil { t.Errorf("failed to revoke lease %v", err) } _, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID))) if err != v3rpc.ErrLeaseNotFound { t.Fatalf("err = %v, want %v", err, v3rpc.ErrLeaseNotFound) } }
func ExampleLease_revoke() { cli, err := clientv3.New(clientv3.Config{ Endpoints: endpoints, DialTimeout: dialTimeout, }) if err != nil { log.Fatal(err) } defer cli.Close() resp, err := cli.Create(context.TODO(), 5) if err != nil { log.Fatal(err) } _, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID))) if err != nil { log.Fatal(err) } // revoking lease expires the key attached to its lease ID _, err = cli.Revoke(context.TODO(), lease.LeaseID(resp.ID)) if err != nil { log.Fatal(err) } gresp, err := cli.Get(context.TODO(), "foo") if err != nil { log.Fatal(err) } fmt.Println("number of keys:", len(gresp.Kvs)) // number of keys: 0 }
func ExampleLease_keepAliveOnce() { cli, err := clientv3.New(clientv3.Config{ Endpoints: endpoints, DialTimeout: dialTimeout, }) if err != nil { log.Fatal(err) } defer cli.Close() resp, err := cli.Create(context.TODO(), 5) if err != nil { log.Fatal(err) } _, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID))) if err != nil { log.Fatal(err) } // to renew the lease only once _, err = cli.KeepAliveOnce(context.TODO(), lease.LeaseID(resp.ID)) if err != nil { log.Fatal(err) } }
func checkRequestLeases(le lease.Lessor, reqs []*pb.RequestUnion) error { for _, requ := range reqs { preq := requ.RequestPut if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease { continue } if l := le.Lookup(lease.LeaseID(preq.Lease)); l == nil { return lease.ErrLeaseNotFound } } return nil }
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { id := lease.LeaseID(resp.ID) l.mu.Lock() defer l.mu.Unlock() ka, ok := l.keepAlives[id] if !ok { return } if resp.TTL <= 0 { // lease expired; close all keep alive channels delete(l.keepAlives, id) ka.Close() return } // send update to all channels nextDeadline := time.Now().Add(1 + time.Duration(resp.TTL/3)*time.Second) for _, ch := range ka.chs { select { case ch <- (*LeaseKeepAliveResponse)(resp): ka.deadline = nextDeadline default: } } }
func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) return } b, err := ioutil.ReadAll(r.Body) if err != nil { http.Error(w, "error reading body", http.StatusBadRequest) return } lreq := pb.LeaseKeepAliveRequest{} if err := lreq.Unmarshal(b); err != nil { http.Error(w, "error unmarshalling request", http.StatusBadRequest) return } ttl, err := h.l.Renew(lease.LeaseID(lreq.ID)) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } // TODO: fill out ResponseHeader resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl} v, err := resp.Marshal() if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/protobuf") w.Write(v) }
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { for { req, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } ttl, err := ls.le.LeaseRenew(lease.LeaseID(req.ID)) if err != nil { if err == lease.ErrLeaseNotFound { return ErrLeaseNotFound } // TODO: handle not primary error by forwarding renew requests to leader panic("TODO: handle not primary error by forwarding renew requests to leader") } resp := &pb.LeaseKeepAliveResponse{ID: req.ID, TTL: ttl} err = stream.Send(resp) if err != nil { return err } } }
func checkRequestLeases(le lease.Lessor, reqs []*etcdserverpb.RequestUnion) error { for _, requ := range reqs { tv, ok := requ.Request.(*etcdserverpb.RequestUnion_RequestPut) if !ok { continue } preq := tv.RequestPut if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease { continue } if l := le.Lookup(lease.LeaseID(preq.Lease)); l == nil { return lease.ErrLeaseNotFound } } return nil }
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { for { req, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } ttl, err := ls.le.LeaseRenew(lease.LeaseID(req.ID)) if err == lease.ErrLeaseNotFound { return rpctypes.ErrLeaseNotFound } if err != nil && err != lease.ErrLeaseNotFound { return err } resp := &pb.LeaseKeepAliveResponse{ID: req.ID, TTL: ttl} err = stream.Send(resp) if err != nil { return err } } }
func TestLeaseKeepAlive(t *testing.T) { defer testutil.AfterTest(t) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clientv3.NewLease(clus.RandClient()) resp, err := lapi.Create(context.Background(), 10) if err != nil { t.Errorf("failed to create lease %v", err) } rc, kerr := lapi.KeepAlive(context.Background(), lease.LeaseID(resp.ID)) if kerr != nil { t.Errorf("failed to keepalive lease %v", kerr) } kresp, ok := <-rc if !ok { t.Errorf("chan is closed, want not closed") } if kresp.ID != resp.ID { t.Errorf("ID = %x, want %x", kresp.ID, resp.ID) } lapi.Close() _, ok = <-rc if ok { t.Errorf("chan is not closed, want lease Close() closes chan") } }
func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error { for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestOp_RequestPut) if !ok { continue } preq := tv.RequestPut if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease { continue } if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { return lease.ErrLeaseNotFound } } return nil }
func testKVPutMultipleTimes(t *testing.T, f putFunc) { b, tmpPath := backend.NewDefaultTmpBackend() s := NewStore(b, &lease.FakeLessor{}, nil) defer cleanup(s, b, tmpPath) for i := 0; i < 10; i++ { base := int64(i + 1) rev := f(s, []byte("foo"), []byte("bar"), lease.LeaseID(base)) if rev != base+1 { t.Errorf("#%d: rev = %d, want %d", i, rev, base+1) } kvs, _, err := s.Range([]byte("foo"), nil, 0, 0) if err != nil { t.Fatal(err) } wkvs := []storagepb.KeyValue{ {Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: base + 1, Version: base, Lease: base}, } if !reflect.DeepEqual(kvs, wkvs) { t.Errorf("#%d: kvs = %+v, want %+v", i, kvs, wkvs) } } }
func applyLeaseCreate(le lease.Lessor, lc *pb.LeaseCreateRequest) (*pb.LeaseCreateResponse, error) { l, err := le.Grant(lease.LeaseID(lc.ID), lc.TTL) resp := &pb.LeaseCreateResponse{} if err == nil { resp.ID = int64(l.ID) resp.TTL = l.TTL } return resp, err }
func applyLeaseGrant(lessor lease.Lessor, req *etcdserverpb.LeaseGrantRequest) (*etcdserverpb.LeaseGrantResponse, error) { l, err := lessor.Grant(lease.LeaseID(req.ID), req.TTL) resp := &etcdserverpb.LeaseGrantResponse{} if err == nil { resp.ID = int64(l.ID) resp.TTL = l.TTL } return resp, err }
func applyPut(txnID int64, kv dstorage.KV, p *pb.PutRequest) (*pb.PutResponse, error) { resp := &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} var ( rev int64 err error ) if txnID != noTxn { rev, err = kv.TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease)) if err != nil { return nil, err } } else { rev = kv.Put(p.Key, p.Value, lease.LeaseID(p.Lease)) } resp.Header.Revision = rev return resp, nil }
func (a *applierV3backend) LeaseCreate(lc *pb.LeaseCreateRequest) (*pb.LeaseCreateResponse, error) { l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) resp := &pb.LeaseCreateResponse{} if err == nil { resp.ID = int64(l.ID) resp.TTL = l.TTL } return resp, err }
func TestKVPut(t *testing.T) { defer testutil.AfterTest(t) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) lapi := clientv3.NewLease(clus.RandClient()) defer lapi.Close() kv := clientv3.NewKV(clus.RandClient()) ctx := context.TODO() resp, err := lapi.Create(context.Background(), 10) if err != nil { t.Fatalf("failed to create lease %v", err) } tests := []struct { key, val string leaseID lease.LeaseID }{ {"foo", "bar", lease.NoLease}, {"hello", "world", lease.LeaseID(resp.ID)}, } for i, tt := range tests { if _, err := kv.Put(ctx, tt.key, tt.val, clientv3.WithLease(tt.leaseID)); err != nil { t.Fatalf("#%d: couldn't put %q (%v)", i, tt.key, err) } resp, err := kv.Get(ctx, tt.key) if err != nil { t.Fatalf("#%d: couldn't get key (%v)", i, err) } if len(resp.Kvs) != 1 { t.Fatalf("#%d: expected 1 key, got %d", i, len(resp.Kvs)) } if !bytes.Equal([]byte(tt.val), resp.Kvs[0].Value) { t.Errorf("#%d: val = %s, want %s", i, tt.val, resp.Kvs[0].Value) } if tt.leaseID != lease.LeaseID(resp.Kvs[0].Lease) { t.Errorf("#%d: val = %d, want %d", i, tt.leaseID, resp.Kvs[0].Lease) } } }
func (a *applierV3backend) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { resp := &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} var ( rev int64 err error ) var rr *mvcc.RangeResult if p.PrevKv { if txnID != noTxn { rr, err = a.s.KV().TxnRange(txnID, p.Key, nil, mvcc.RangeOptions{}) if err != nil { return nil, err } } else { rr, err = a.s.KV().Range(p.Key, nil, mvcc.RangeOptions{}) if err != nil { return nil, err } } } if txnID != noTxn { rev, err = a.s.KV().TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease)) if err != nil { return nil, err } } else { leaseID := lease.LeaseID(p.Lease) if leaseID != lease.NoLease { if l := a.s.lessor.Lookup(leaseID); l == nil { return nil, lease.ErrLeaseNotFound } } rev = a.s.KV().Put(p.Key, p.Value, leaseID) } resp.Header.Revision = rev if rr != nil && len(rr.KVs) != 0 { resp.PrevKv = &rr.KVs[0] } return resp, nil }
func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { if s.Leader() == s.ID() { // primary; timetolive directly from leader le := s.lessor.Lookup(lease.LeaseID(r.ID)) if le == nil { return nil, lease.ErrLeaseNotFound } // TODO: fill out ResponseHeader resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()} if r.Keys { ks := le.Keys() kbs := make([][]byte, len(ks)) for i := range ks { kbs[i] = []byte(ks[i]) } resp.Keys = kbs } return resp, nil } cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) defer cancel() // forward to leader for cctx.Err() == nil { leader, err := s.waitLeader(cctx) if err != nil { return nil, err } for _, url := range leader.PeerURLs { lurl := url + leasehttp.LeaseInternalPrefix resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt) if err == nil { return resp.LeaseTimeToLiveResponse, nil } if err == lease.ErrLeaseNotFound { return nil, err } } } return nil, ErrTimeout }
func (clm *clientLeaseMgr) sessionLease(client *clientv3.Client, ttl int64) (lease.LeaseID, error) { clm.mu.Lock() defer clm.mu.Unlock() if lka, ok := clm.leases[client]; ok { return lka.id, nil } resp, err := client.Lease.LeaseCreate(context.TODO(), &pb.LeaseCreateRequest{TTL: ttl}) if err != nil { return lease.NoLease, err } id := lease.LeaseID(resp.ID) ctx, cancel := context.WithCancel(context.Background()) keepAlive, err := client.Lease.LeaseKeepAlive(ctx) if err != nil || keepAlive == nil { return lease.NoLease, err } lka := &leaseKeepAlive{id: id, donec: make(chan struct{})} clm.leases[client] = lka // keep the lease alive until clientection error go func() { defer func() { keepAlive.CloseSend() clm.mu.Lock() delete(clm.leases, client) clm.mu.Unlock() cancel() close(lka.donec) }() ttl := resp.TTL for { lreq := &pb.LeaseKeepAliveRequest{ID: int64(id)} select { case <-lka.donec: return case <-time.After(time.Duration(ttl/2) * time.Second): } if err := keepAlive.Send(lreq); err != nil { break } resp, err := keepAlive.Recv() if err != nil { break } ttl = resp.TTL } }() return id, nil }
func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) resp := &pb.LeaseGrantResponse{} if err == nil { resp.ID = int64(l.ID) resp.TTL = l.TTL resp.Header = &pb.ResponseHeader{Revision: a.s.KV().Rev()} } return resp, err }
func (l *lessor) recvKeepAliveLoop() { if !l.initStream() { l.Close() return } for { stream := l.getKeepAliveStream() resp, err := stream.Recv() if err != nil { err = l.switchRemoteAndStream(err) if err != nil { l.Close() return } continue } l.mu.Lock() lch, ok := l.keepAlives[lease.LeaseID(resp.ID)] if !ok { l.mu.Unlock() continue } if resp.TTL <= 0 { close(lch) delete(l.deadlines, lease.LeaseID(resp.ID)) delete(l.keepAlives, lease.LeaseID(resp.ID)) } else { select { case lch <- (*LeaseKeepAliveResponse)(resp): l.deadlines[lease.LeaseID(resp.ID)] = time.Now().Add(1 + time.Duration(resp.TTL/3)*time.Second) default: } } l.mu.Unlock() } }
func (s *store) delete(key []byte, rev revision) { mainrev := s.currentRev.main + 1 ibytes := newRevBytes() revToBytes(revision{main: mainrev, sub: s.currentRev.sub}, ibytes) ibytes = appendMarkTombstone(ibytes) kv := storagepb.KeyValue{ Key: key, } d, err := kv.Marshal() if err != nil { log.Fatalf("storage: cannot marshal event: %v", err) } s.tx.UnsafePut(keyBucketName, ibytes, d) err = s.kvindex.Tombstone(key, revision{main: mainrev, sub: s.currentRev.sub}) if err != nil { log.Fatalf("storage: cannot tombstone an existing key (%s): %v", string(key), err) } s.changes = append(s.changes, kv) s.currentRev.sub += 1 ibytes = newRevBytes() revToBytes(rev, ibytes) _, vs := s.tx.UnsafeRange(keyBucketName, ibytes, nil, 0) kv.Reset() if err = kv.Unmarshal(vs[0]); err != nil { log.Fatalf("storage: cannot unmarshal value: %v", err) } if lease.LeaseID(kv.Lease) != lease.NoLease { err = s.le.Detach(lease.LeaseID(kv.Lease), []lease.LeaseItem{{Key: string(kv.Key)}}) if err != nil { log.Fatalf("storage: cannot detach %v", err) } } }
func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { if s.Leader() == s.ID() { // primary; timetolive directly from leader le := s.lessor.Lookup(lease.LeaseID(r.ID)) if le == nil { return nil, lease.ErrLeaseNotFound } // TODO: fill out ResponseHeader resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL} if r.Keys { ks := le.Keys() kbs := make([][]byte, len(ks)) for i := range ks { kbs[i] = []byte(ks[i]) } resp.Keys = kbs } return resp, nil } // manually request to leader leader, err := s.waitLeader() if err != nil { return nil, err } var lresp *pb.LeaseTimeToLiveResponse for _, url := range leader.PeerURLs { lurl := url + leasehttp.LeaseInternalPrefix var iresp *leasepb.LeaseInternalResponse iresp, err = leasehttp.TimeToLiveHTTP(ctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt) if err == nil { lresp = iresp.LeaseTimeToLiveResponse break } } return lresp, nil }
// TODO: add a client that can connect to all the members of cluster via unix sock. // TODO: test handle more complicated failures. func TestLeaseKeepAliveHandleFailure(t *testing.T) { t.Skip("test it when we have a cluster client") defer testutil.AfterTest(t) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) // TODO: change this line to get a cluster client lapi := clientv3.NewLease(clus.RandClient()) resp, err := lapi.Create(context.Background(), 10) if err != nil { t.Errorf("failed to create lease %v", err) } rc, kerr := lapi.KeepAlive(context.Background(), lease.LeaseID(resp.ID)) if kerr != nil { t.Errorf("failed to keepalive lease %v", kerr) } kresp := <-rc if kresp.ID != resp.ID { t.Errorf("ID = %x, want %x", kresp.ID, resp.ID) } // restart the connected member. clus.Members[0].Stop(t) select { case <-rc: t.Fatalf("unexpected keepalive") case <-time.After(10*time.Second/3 + 1): } // recover the member. clus.Members[0].Restart(t) kresp = <-rc if kresp.ID != resp.ID { t.Errorf("ID = %x, want %x", kresp.ID, resp.ID) } lapi.Close() _, ok := <-rc if ok { t.Errorf("chan is not closed, want lease Close() closes chan") } }
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { l.mu.Lock() defer l.mu.Unlock() lch, ok := l.keepAlives[lease.LeaseID(resp.ID)] if !ok { return } if resp.TTL <= 0 { close(lch) delete(l.deadlines, lease.LeaseID(resp.ID)) delete(l.keepAlives, lease.LeaseID(resp.ID)) return } select { case lch <- (*LeaseKeepAliveResponse)(resp): l.deadlines[lease.LeaseID(resp.ID)] = time.Now().Add(1 + time.Duration(resp.TTL/3)*time.Second) default: } }
func (a *applierV3backend) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { resp := &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} var ( rev int64 err error ) if txnID != noTxn { rev, err = a.s.KV().TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease)) if err != nil { return nil, err } } else { leaseID := lease.LeaseID(p.Lease) if leaseID != lease.NoLease { if l := a.s.lessor.Lookup(leaseID); l == nil { return nil, lease.ErrLeaseNotFound } } rev = a.s.KV().Put(p.Key, p.Value, leaseID) } resp.Header.Revision = rev return resp, nil }
func applyPut(txnID int64, kv dstorage.KV, le lease.Lessor, p *pb.PutRequest) (*pb.PutResponse, error) { resp := &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} var ( rev int64 err error ) if txnID != noTxn { rev, err = kv.TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease)) if err != nil { return nil, err } } else { leaseID := lease.LeaseID(p.Lease) if leaseID != lease.NoLease { if l := le.Lookup(leaseID); l == nil { return nil, lease.ErrLeaseNotFound } } rev = kv.Put(p.Key, p.Value, leaseID) } resp.Header.Revision = rev return resp, nil }