Beispiel #1
0
func ExampleLease_revoke() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	resp, err := cli.Create(context.TODO(), 5)
	if err != nil {
		log.Fatal(err)
	}

	_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(resp.ID)))
	if err != nil {
		log.Fatal(err)
	}

	// revoking lease expires the key attached to its lease ID
	_, err = cli.Revoke(context.TODO(), clientv3.LeaseID(resp.ID))
	if err != nil {
		log.Fatal(err)
	}

	gresp, err := cli.Get(context.TODO(), "foo")
	if err != nil {
		log.Fatal(err)
	}
	fmt.Println("number of keys:", len(gresp.Kvs))
	// number of keys: 0
}
Beispiel #2
0
func ExampleLease_keepAliveOnce() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	resp, err := cli.Grant(context.TODO(), 5)
	if err != nil {
		log.Fatal(err)
	}

	_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(resp.ID)))
	if err != nil {
		log.Fatal(err)
	}

	// to renew the lease only once
	_, err = cli.KeepAliveOnce(context.TODO(), clientv3.LeaseID(resp.ID))
	if err != nil {
		log.Fatal(err)
	}
}
Beispiel #3
0
func ExampleLease_keepAlive() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	resp, err := cli.Create(context.TODO(), 5)
	if err != nil {
		log.Fatal(err)
	}

	_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(resp.ID)))
	if err != nil {
		log.Fatal(err)
	}

	// the key 'foo' will be kept forever
	_, err = cli.KeepAlive(context.TODO(), clientv3.LeaseID(resp.ID))
	if err != nil {
		log.Fatal(err)
	}
}
Beispiel #4
0
func TestLeaseRevoke(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	kv := clientv3.NewKV(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	_, err = lapi.Revoke(context.Background(), clientv3.LeaseID(resp.ID))
	if err != nil {
		t.Errorf("failed to revoke lease %v", err)
	}

	_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(resp.ID)))
	if err != rpctypes.ErrLeaseNotFound {
		t.Fatalf("err = %v, want %v", err, rpctypes.ErrLeaseNotFound)
	}
}
Beispiel #5
0
func getPutOp(cmd *cobra.Command, args []string) (string, string, []clientv3.OpOption) {
	if len(args) == 0 {
		ExitWithError(ExitBadArgs, fmt.Errorf("put command needs 1 argument and input from stdin or 2 arguments."))
	}

	key := args[0]
	value, err := argOrStdin(args, os.Stdin, 1)
	if err != nil {
		ExitWithError(ExitBadArgs, fmt.Errorf("put command needs 1 argument and input from stdin or 2 arguments."))
	}

	id, err := strconv.ParseInt(leaseStr, 16, 64)
	if err != nil {
		ExitWithError(ExitBadArgs, fmt.Errorf("bad lease ID (%v), expecting ID in Hex", err))
	}

	opts := []clientv3.OpOption{}
	if id != 0 {
		opts = append(opts, clientv3.WithLease(clientv3.LeaseID(id)))
	}
	if putPrevKV {
		opts = append(opts, clientv3.WithPrevKV())
	}

	return key, value, opts
}
Beispiel #6
0
// NewSession gets the leased session for a client.
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
	ops := &sessionOptions{ttl: defaultSessionTTL}
	for _, opt := range opts {
		opt(ops)
	}

	resp, err := client.Grant(client.Ctx(), int64(ops.ttl))
	if err != nil {
		return nil, err
	}
	id := v3.LeaseID(resp.ID)

	ctx, cancel := context.WithCancel(client.Ctx())
	keepAlive, err := client.KeepAlive(ctx, id)
	if err != nil || keepAlive == nil {
		return nil, err
	}

	donec := make(chan struct{})
	s := &Session{client: client, id: id, cancel: cancel, donec: donec}

	// keep the lease alive until client error or cancelled context
	go func() {
		defer close(donec)
		for range keepAlive {
			// eat messages until keep alive channel closes
		}
	}()

	return s, nil
}
Beispiel #7
0
func TestLeaseKeepAlive(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	rc, kerr := lapi.KeepAlive(context.Background(), clientv3.LeaseID(resp.ID))
	if kerr != nil {
		t.Errorf("failed to keepalive lease %v", kerr)
	}

	kresp, ok := <-rc
	if !ok {
		t.Errorf("chan is closed, want not closed")
	}

	if kresp.ID != resp.ID {
		t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
	}

	lapi.Close()

	_, ok = <-rc
	if ok {
		t.Errorf("chan is not closed, want lease Close() closes chan")
	}
}
Beispiel #8
0
func leaseFromArgs(arg string) v3.LeaseID {
	id, err := strconv.ParseInt(arg, 16, 64)
	if err != nil {
		ExitWithError(ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err))
	}
	return v3.LeaseID(id)
}
Beispiel #9
0
func TestKVPut(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	resp, err := lapi.Grant(context.Background(), 10)
	if err != nil {
		t.Fatalf("failed to create lease %v", err)
	}

	tests := []struct {
		key, val string
		leaseID  clientv3.LeaseID
	}{
		{"foo", "bar", clientv3.NoLease},
		{"hello", "world", clientv3.LeaseID(resp.ID)},
	}

	for i, tt := range tests {
		if _, err := kv.Put(ctx, tt.key, tt.val, clientv3.WithLease(tt.leaseID)); err != nil {
			t.Fatalf("#%d: couldn't put %q (%v)", i, tt.key, err)
		}
		resp, err := kv.Get(ctx, tt.key)
		if err != nil {
			t.Fatalf("#%d: couldn't get key (%v)", i, err)
		}
		if len(resp.Kvs) != 1 {
			t.Fatalf("#%d: expected 1 key, got %d", i, len(resp.Kvs))
		}
		if !bytes.Equal([]byte(tt.val), resp.Kvs[0].Value) {
			t.Errorf("#%d: val = %s, want %s", i, tt.val, resp.Kvs[0].Value)
		}
		if tt.leaseID != clientv3.LeaseID(resp.Kvs[0].Lease) {
			t.Errorf("#%d: val = %d, want %d", i, tt.leaseID, resp.Kvs[0].Lease)
		}
	}
}
func (c *etcdCtx) RenewLease(lease int64) error {
	lid := etcdv3.LeaseID(lease)
	resp, err := c.etcd.Client.KeepAliveOnce(c.getContext(), lid)
	if err != nil {
		if err == rpctypes.ErrLeaseNotFound {
			return torus.ErrLeaseNotFound
		}
		return err
	}
	clog.Tracef("updated lease for %d, TTL %d", resp.ID, resp.TTL)
	return nil
}
Beispiel #11
0
// ttlOpts returns client options based on given ttl.
// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length
func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {
	if ttl == 0 {
		return nil, nil
	}
	// TODO: one lease per ttl key is expensive. Based on current use case, we can have a long window to
	// put keys within into same lease. We shall benchmark this and optimize the performance.
	lcr, err := s.client.Lease.Grant(ctx, ttl)
	if err != nil {
		return nil, err
	}
	return []clientv3.OpOption{clientv3.WithLease(clientv3.LeaseID(lcr.ID))}, nil
}
Beispiel #12
0
// TODO: add a client that can connect to all the members of cluster via unix sock.
// TODO: test handle more complicated failures.
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
	t.Skip("test it when we have a cluster client")

	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// TODO: change this line to get a cluster client
	lapi := clientv3.NewLease(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	rc, kerr := lapi.KeepAlive(context.Background(), clientv3.LeaseID(resp.ID))
	if kerr != nil {
		t.Errorf("failed to keepalive lease %v", kerr)
	}

	kresp := <-rc
	if kresp.ID != resp.ID {
		t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
	}

	// restart the connected member.
	clus.Members[0].Stop(t)

	select {
	case <-rc:
		t.Fatalf("unexpected keepalive")
	case <-time.After(10*time.Second/3 + 1):
	}

	// recover the member.
	clus.Members[0].Restart(t)

	kresp = <-rc
	if kresp.ID != resp.ID {
		t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
	}

	lapi.Close()

	_, ok := <-rc
	if ok {
		t.Errorf("chan is not closed, want lease Close() closes chan")
	}
}
Beispiel #13
0
func TestLeaseNotFoundError(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	kv := clientv3.NewKV(clus.RandClient())

	_, err := kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(500)))
	if err != rpctypes.ErrLeaseNotFound {
		t.Fatalf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err)
	}
}
func (c *etcdCtx) RegisterPeer(lease int64, p *models.PeerInfo) error {
	if lease == 0 {
		return errors.New("no lease")
	}
	promOps.WithLabelValues("register-peer").Inc()
	p.LastSeen = time.Now().UnixNano()
	data, err := p.Marshal()
	if err != nil {
		return err
	}

	lid := etcdv3.LeaseID(lease)
	_, err = c.etcd.Client.Put(
		c.getContext(), MkKey("nodes", p.UUID), string(data), etcdv3.WithLease(lid))
	return err
}
Beispiel #15
0
// leaseRevokeCommandFunc executes the "lease create" command.
func leaseRevokeCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) != 1 {
		ExitWithError(ExitBadArgs, fmt.Errorf("lease revoke command needs 1 argument"))
	}

	id, err := strconv.ParseInt(args[0], 16, 64)
	if err != nil {
		ExitWithError(ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err))
	}

	_, err = mustClientFromCmd(cmd).Revoke(context.TODO(), v3.LeaseID(id))
	if err != nil {
		fmt.Fprintf(os.Stderr, "failed to revoke lease (%v)\n", err)
		return
	}
	fmt.Printf("lease %016x revoked\n", id)
}
Beispiel #16
0
// leaseRevokeCommandFunc executes the "lease grant" command.
func leaseRevokeCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) != 1 {
		ExitWithError(ExitBadArgs, fmt.Errorf("lease revoke command needs 1 argument"))
	}

	id, err := strconv.ParseInt(args[0], 16, 64)
	if err != nil {
		ExitWithError(ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err))
	}

	ctx, cancel := commandCtx(cmd)
	_, err = mustClientFromCmd(cmd).Revoke(ctx, v3.LeaseID(id))
	cancel()
	if err != nil {
		ExitWithError(ExitError, fmt.Errorf("failed to revoke lease (%v)\n", err))
	}
	fmt.Printf("lease %016x revoked\n", id)
}
Beispiel #17
0
func TestLeaseKeepAliveOnce(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	_, err = lapi.KeepAliveOnce(context.Background(), clientv3.LeaseID(resp.ID))
	if err != nil {
		t.Errorf("failed to keepalive lease %v", err)
	}
}
func (b *blockEtcd) Lock(lease int64) error {
	if lease == 0 {
		return torus.ErrInvalid
	}
	k := etcd.MkKey("volumemeta", etcd.Uint64ToHex(uint64(b.vid)), "blocklock")
	tx := b.Etcd.Client.Txn(b.getContext()).If(
		etcdv3.Compare(etcdv3.Version(k), "=", 0),
	).Then(
		etcdv3.OpPut(k, b.Etcd.UUID(), etcdv3.WithLease(etcdv3.LeaseID(lease))),
	)
	resp, err := tx.Commit()
	if err != nil {
		return err
	}
	if !resp.Succeeded {
		return torus.ErrLocked
	}
	return nil
}
Beispiel #19
0
// leaseKeepAliveCommandFunc executes the "lease keep-alive" command.
func leaseKeepAliveCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) != 1 {
		ExitWithError(ExitBadArgs, fmt.Errorf("lease keep-alive command needs lease ID as argument"))
	}

	id, err := strconv.ParseInt(args[0], 16, 64)
	if err != nil {
		ExitWithError(ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err))
	}

	respc, kerr := mustClientFromCmd(cmd).KeepAlive(context.TODO(), v3.LeaseID(id))
	if kerr != nil {
		ExitWithError(ExitBadConnection, kerr)
	}

	for resp := range respc {
		fmt.Printf("lease %016x keepalived with TTL(%d)\n", resp.ID, resp.TTL)
	}
	fmt.Printf("lease %016x expired or revoked.\n", id)
}
Beispiel #20
0
func ExampleLease_create() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	// minimum lease TTL is 5-second
	resp, err := cli.Create(context.TODO(), 5)
	if err != nil {
		log.Fatal(err)
	}

	// after 5 seconds, the key 'foo' will be removed
	_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(resp.ID)))
	if err != nil {
		log.Fatal(err)
	}
}
Beispiel #21
0
// NewSession gets the leased session for a client.
func NewSession(client *v3.Client) (*Session, error) {
	clientSessions.mu.Lock()
	defer clientSessions.mu.Unlock()
	if s, ok := clientSessions.sessions[client]; ok {
		return s, nil
	}

	resp, err := client.Grant(client.Ctx(), sessionTTL)
	if err != nil {
		return nil, err
	}
	id := v3.LeaseID(resp.ID)

	ctx, cancel := context.WithCancel(client.Ctx())
	keepAlive, err := client.KeepAlive(ctx, id)
	if err != nil || keepAlive == nil {
		return nil, err
	}

	donec := make(chan struct{})
	s := &Session{client: client, id: id, cancel: cancel, donec: donec}
	clientSessions.sessions[client] = s

	// keep the lease alive until client error or cancelled context
	go func() {
		defer func() {
			clientSessions.mu.Lock()
			delete(clientSessions.sessions, client)
			clientSessions.mu.Unlock()
			close(donec)
		}()
		for range keepAlive {
			// eat messages until keep alive channel closes
		}
	}()

	return s, nil
}
Beispiel #22
0
func TestWatchEventType(t *testing.T) {
	cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer cluster.Terminate(t)

	client := cluster.RandClient()
	ctx := context.Background()
	watchChan := client.Watch(ctx, "/", clientv3.WithPrefix())

	if _, err := client.Put(ctx, "/toDelete", "foo"); err != nil {
		t.Fatalf("Put failed: %v", err)
	}
	if _, err := client.Put(ctx, "/toDelete", "bar"); err != nil {
		t.Fatalf("Put failed: %v", err)
	}
	if _, err := client.Delete(ctx, "/toDelete"); err != nil {
		t.Fatalf("Delete failed: %v", err)
	}
	lcr, err := client.Lease.Grant(ctx, 1)
	if err != nil {
		t.Fatalf("lease create failed: %v", err)
	}
	if _, err := client.Put(ctx, "/toExpire", "foo", clientv3.WithLease(clientv3.LeaseID(lcr.ID))); err != nil {
		t.Fatalf("Put failed: %v", err)
	}

	tests := []struct {
		et       storagepb.Event_EventType
		isCreate bool
		isModify bool
	}{{
		et:       clientv3.EventTypePut,
		isCreate: true,
	}, {
		et:       clientv3.EventTypePut,
		isModify: true,
	}, {
		et: clientv3.EventTypeDelete,
	}, {
		et:       clientv3.EventTypePut,
		isCreate: true,
	}, {
		et: clientv3.EventTypeDelete,
	}}

	var res []*clientv3.Event

	for {
		select {
		case wres := <-watchChan:
			res = append(res, wres.Events...)
		case <-time.After(10 * time.Second):
			t.Fatalf("Should receive %d events and then break out loop", len(tests))
		}
		if len(res) == len(tests) {
			break
		}
	}

	for i, tt := range tests {
		ev := res[i]
		if tt.et != ev.Type {
			t.Errorf("#%d: event type want=%s, get=%s", i, tt.et, ev.Type)
		}
		if tt.isCreate && !ev.IsCreate() {
			t.Errorf("#%d: event should be CreateEvent", i)
		}
		if tt.isModify && !ev.IsModify() {
			t.Errorf("#%d: event should be ModifyEvent", i)
		}
	}
}
Beispiel #23
0
func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
	opts := []clientv3.OpOption{}
	opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease)))

	return clientv3.OpPut(string(r.Key), string(r.Value), opts...)
}