Beispiel #1
1
func ExampleLease_create() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)
	lapi := clientv3.NewLease(cli)
	defer lapi.Close()

	// minimum lease TTL is 5-second
	resp, err := lapi.Create(context.TODO(), 5)
	if err != nil {
		log.Fatal(err)
	}

	// after 5 seconds, the key 'foo' will be removed
	_, err = kvc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID)))
	if err != nil {
		log.Fatal(err)
	}
}
Beispiel #2
0
func TestKVPutError(t *testing.T) {
	defer testutil.AfterTest(t)

	var (
		maxReqBytes = 1.5 * 1024 * 1024
		quota       = int64(maxReqBytes * 1.2)
	)
	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	_, err := kv.Put(ctx, "", "bar")
	if err != rpctypes.ErrEmptyKey {
		t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err)
	}

	_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100))) // 1.5MB
	if err != rpctypes.ErrRequestTooLarge {
		t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err)
	}

	_, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50)))
	if err != nil { // below quota
		t.Fatal(err)
	}

	time.Sleep(500 * time.Millisecond) // give enough time for commit

	_, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50)))
	if err != rpctypes.ErrNoSpace { // over quota
		t.Fatalf("expected %v, got %v", rpctypes.ErrNoSpace, err)
	}
}
Beispiel #3
0
func TestKVCompactError(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	for i := 0; i < 5; i++ {
		if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
			t.Fatalf("couldn't put 'foo' (%v)", err)
		}
	}
	_, err := kv.Compact(ctx, 6)
	if err != nil {
		t.Fatalf("couldn't compact 6 (%v)", err)
	}

	_, err = kv.Compact(ctx, 6)
	if err != rpctypes.ErrCompacted {
		t.Fatalf("expected %v, got %v", rpctypes.ErrCompacted, err)
	}

	_, err = kv.Compact(ctx, 100)
	if err != rpctypes.ErrFutureRev {
		t.Fatalf("expected %v, got %v", rpctypes.ErrFutureRev, err)
	}
}
Beispiel #4
0
func TestLeaseRevoke(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	kv := clientv3.NewKV(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	_, err = lapi.Revoke(context.Background(), lease.LeaseID(resp.ID))
	if err != nil {
		t.Errorf("failed to revoke lease %v", err)
	}

	_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID)))
	if err != v3rpc.ErrLeaseNotFound {
		t.Fatalf("err = %v, want %v", err, v3rpc.ErrLeaseNotFound)
	}
}
Beispiel #5
0
func ExampleLease_keepAlive() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)
	lapi := clientv3.NewLease(cli)
	defer lapi.Close()

	resp, err := lapi.Create(context.TODO(), 5)
	if err != nil {
		log.Fatal(err)
	}

	_, err = kvc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID)))
	if err != nil {
		log.Fatal(err)
	}

	// the key 'foo' will be kept forever
	_, err = lapi.KeepAlive(context.TODO(), lease.LeaseID(resp.ID))
	if err != nil {
		log.Fatal(err)
	}
}
Beispiel #6
0
func TestKVDelete(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	presp, err := kv.Put(ctx, "foo", "")
	if err != nil {
		t.Fatalf("couldn't put 'foo' (%v)", err)
	}
	if presp.Header.Revision != 2 {
		t.Fatalf("presp.Header.Revision got %d, want %d", presp.Header.Revision, 2)
	}
	resp, err := kv.Delete(ctx, "foo")
	if err != nil {
		t.Fatalf("couldn't delete key (%v)", err)
	}
	if resp.Header.Revision != 3 {
		t.Fatalf("resp.Header.Revision got %d, want %d", resp.Header.Revision, 3)
	}
	gresp, err := kv.Get(ctx, "foo")
	if err != nil {
		t.Fatalf("couldn't get key (%v)", err)
	}
	if len(gresp.Kvs) > 0 {
		t.Fatalf("gresp.Kvs got %+v, want none", gresp.Kvs)
	}
}
Beispiel #7
0
func TestTxnWriteFail(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.Client(0))
	clus.Members[0].Stop(t)
	<-clus.Members[0].StopNotify()

	resp, err := kv.Txn().Then(clientv3.OpPut("foo", "bar", 0)).Commit()
	if err == nil {
		t.Fatalf("expected error, got response %v", resp)
	}

	// reconnect so cluster terminate doesn't complain about double-close
	clus.Members[0].Restart(t)

	// and ensure the put didn't take
	gresp, gerr := kv.Get("foo", 0)
	if gerr != nil {
		t.Fatal(gerr)
	}
	if len(gresp.Kvs) != 0 {
		t.Fatalf("expected no keys, got %v", gresp.Kvs)
	}
}
Beispiel #8
0
func ExampleKV_compact() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := kvc.Get(ctx, "foo")
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	compRev := resp.Header.Revision // specify compact revision of your choice

	ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
	err = kvc.Compact(ctx, compRev)
	cancel()
	if err != nil {
		log.Fatal(err)
	}
}
Beispiel #9
0
func Demo(key string, auth3 *Auth3) {
	cli, err := Get().API(auth3)
	if err != nil {
		log.Fatal(err)
	}

	kvc := clientv3.NewKV(cli)

	log.Println("init key ")
	if err := initKey(key, kvc); err != nil {
		log.Fatal(err)
	}

	for i := 0; i < 1000; i++ {
		value := strconv.Itoa(i)
		log.Println("begin ", value)

		if err := require(key, value, kvc); err != nil {
			log.Fatal(err)
		}

		if err := release(key, value, kvc); err != nil {
			log.Fatal(err)
		}

		log.Printf("end %s\n\n", value)
	}
}
Beispiel #10
0
func ExampleKV_get() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)

	_, err = kvc.Put(context.TODO(), "foo", "bar")
	if err != nil {
		log.Fatal(err)
	}

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := kvc.Get(ctx, "foo")
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	for _, ev := range resp.Kvs {
		fmt.Printf("%s : %s\n", ev.Key, ev.Value)
	}
	// foo : bar
}
Beispiel #11
0
// txnCommandFunc executes the "txn" command.
func txnCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) != 0 {
		ExitWithError(ExitBadArgs, fmt.Errorf("txn command does not accept argument."))
	}

	if !txnInteractive {
		ExitWithError(ExitBadFeature, fmt.Errorf("txn command only supports interactive mode"))
	}

	reader := bufio.NewReader(os.Stdin)

	txn := clientv3.NewKV(mustClientFromCmd(cmd)).Txn(context.Background())
	fmt.Println("compares:")
	txn.If(readCompares(reader)...)
	fmt.Println("success requests (get, put, delete):")
	txn.Then(readOps(reader)...)
	fmt.Println("failure requests (get, put, delete):")
	txn.Else(readOps(reader)...)

	resp, err := txn.Commit()
	if err != nil {
		ExitWithError(ExitError, err)
	}

	display.Txn(*resp)
}
Beispiel #12
0
func TestKVPutWithRequireLeader(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	clus.Members[1].Stop(t)
	clus.Members[2].Stop(t)

	// wait for election timeout, then member[0] will not have a leader.
	var (
		electionTicks = 10
		tickDuration  = 10 * time.Millisecond
	)
	time.Sleep(time.Duration(3*electionTicks) * tickDuration)

	kv := clientv3.NewKV(clus.Client(0))
	_, err := kv.Put(clientv3.WithRequireLeader(context.Background()), "foo", "bar")
	if err != rpctypes.ErrNoLeader {
		t.Fatal(err)
	}

	// clients may give timeout errors since the members are stopped; take
	// the clients so that terminating the cluster won't complain
	clus.Client(1).Close()
	clus.Client(2).Close()
	clus.TakeClient(1)
	clus.TakeClient(2)
}
Beispiel #13
0
func ExampleKV_getSortedPrefix() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)

	for i := range make([]int, 3) {
		ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
		_, err = kvc.Put(ctx, fmt.Sprintf("key_%d", i), "value")
		cancel()
		if err != nil {
			log.Fatal(err)
		}
	}

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := kvc.Get(ctx, "key", clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	for _, ev := range resp.Kvs {
		fmt.Printf("%s : %s\n", ev.Key, ev.Value)
	}
	// key_2 : value
	// key_1 : value
	// key_0 : value
}
Beispiel #14
0
func TestKVNewAfterClose(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	clus.TakeClient(0)
	if err := cli.Close(); err != nil {
		t.Fatal(err)
	}

	donec := make(chan struct{})
	go func() {
		kv := clientv3.NewKV(cli)
		if _, err := kv.Get(context.TODO(), "foo"); err != grpc.ErrClientConnClosing {
			t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
		}
		close(donec)
	}()
	select {
	case <-time.After(3 * time.Second):
		t.Fatal("kv.Get took too long")
	case <-donec:
	}
}
Beispiel #15
0
func TestTxnReadRetry(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.Client(0))
	clus.Members[0].Stop(t)
	<-clus.Members[0].StopNotify()

	donec := make(chan struct{})
	go func() {
		ctx := context.TODO()
		_, err := kv.Txn(ctx).Then(clientv3.OpGet("foo")).Commit()
		if err != nil {
			t.Fatalf("expected response, got error %v", err)
		}
		donec <- struct{}{}
	}()
	// wait for txn to fail on disconnect
	time.Sleep(100 * time.Millisecond)

	// restart node; client should resume
	clus.Members[0].Restart(t)
	select {
	case <-donec:
	case <-time.After(5 * time.Second):
		t.Fatalf("waited too long")
	}
}
Beispiel #16
0
func TestTxnWriteFail(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.Client(0))
	ctx := context.TODO()

	clus.Members[0].Stop(t)
	<-clus.Members[0].StopNotify()

	donec := make(chan struct{})
	go func() {
		resp, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar")).Commit()
		if err == nil {
			t.Fatalf("expected error, got response %v", resp)
		}
		donec <- struct{}{}
	}()

	dialTimeout := 5 * time.Second
	select {
	case <-time.After(2*dialTimeout + time.Second):
		t.Fatalf("timed out waiting for txn to fail")
	case <-donec:
		// don't restart cluster until txn errors out
	}

	go func() {
		// reconnect so terminate doesn't complain about double-close
		clus.Members[0].Restart(t)
		// wait for etcdserver to get established (CI races and get req times out)
		time.Sleep(2 * time.Second)
		donec <- struct{}{}

		// and ensure the put didn't take
		gresp, gerr := kv.Get(ctx, "foo")
		if gerr != nil {
			t.Fatal(gerr)
		}
		if len(gresp.Kvs) != 0 {
			t.Fatalf("expected no keys, got %v", gresp.Kvs)
		}
		donec <- struct{}{}
	}()

	select {
	case <-time.After(5 * time.Second):
		t.Fatalf("timed out waiting for restart")
	case <-donec:
	}

	select {
	case <-time.After(5 * time.Second):
		t.Fatalf("timed out waiting for get")
	case <-donec:
	}
}
Beispiel #17
0
func NewDoubleBarrier(client *clientv3.Client, key string, count int) *DoubleBarrier {
	return &DoubleBarrier{
		client: client,
		kv:     clientv3.NewKV(client),
		ctx:    context.TODO(),
		key:    key,
		count:  count,
	}
}
Beispiel #18
0
func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) {
	respchan := make(chan clientv3.GetResponse, 1024)
	errchan := make(chan error, 1)

	kapi := clientv3.NewKV(s.c)
	// if rev is not specified, we will choose the most recent revision.
	if s.rev == 0 {
		resp, err := kapi.Get(ctx, "foo")
		if err != nil {
			errchan <- err
			close(respchan)
			close(errchan)
			return respchan, errchan
		}
		s.rev = resp.Header.Revision
	}

	go func() {
		defer close(respchan)
		defer close(errchan)

		var key string

		opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev)}

		if len(s.prefix) == 0 {
			// If len(s.prefix) == 0, we will sync the entire key-value space.
			// We then range from the smallest key (0x00) to the end.
			opts = append(opts, clientv3.WithFromKey())
			key = "\x00"
		} else {
			// If len(s.prefix) != 0, we will sync key-value space with given prefix.
			// We then range from the prefix to the next prefix if exists. Or we will
			// range from the prefix to the end if the next prefix does not exists.
			opts = append(opts, clientv3.WithPrefix())
			key = s.prefix
		}

		for {
			resp, err := kapi.Get(ctx, key, opts...)
			if err != nil {
				errchan <- err
				return
			}

			respchan <- (clientv3.GetResponse)(*resp)

			if !resp.More {
				return
			}
			// move to next key
			key = string(append(resp.Kvs[len(resp.Kvs)-1].Key, 0))
		}
	}()

	return respchan, errchan
}
Beispiel #19
0
// delCommandFunc executes the "del" command.
func delCommandFunc(cmd *cobra.Command, args []string) {
	key, opts := getDelOp(cmd, args)
	c := mustClientFromCmd(cmd)
	kvapi := clientv3.NewKV(c)
	resp, err := kvapi.Delete(context.TODO(), key, opts...)
	if err != nil {
		ExitWithError(ExitError, err)
	}
	display.Del(*resp)
}
Beispiel #20
0
// putCommandFunc executes the "put" command.
func putCommandFunc(cmd *cobra.Command, args []string) {
	key, value, opts := getPutOp(cmd, args)

	c := mustClientFromCmd(cmd)
	kvapi := clientv3.NewKV(c)
	resp, err := kvapi.Put(context.TODO(), key, value, opts...)
	if err != nil {
		ExitWithError(ExitError, err)
	}
	display.Put(*resp)
}
Beispiel #21
0
// NewEphemeralKV creates a new key/value pair associated with a session lease
func NewEphemeralKV(client *v3.Client, key, val string) (*EphemeralKV, error) {
	s, err := concurrency.NewSession(client)
	if err != nil {
		return nil, err
	}
	k, err := NewKV(v3.NewKV(client), key, val, s.Lease())
	if err != nil {
		return nil, err
	}
	return &EphemeralKV{*k}, nil
}
Beispiel #22
0
func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
	defer testutil.AfterTest(t)

	// accelerate report interval so test terminates quickly
	oldpi := v3rpc.GetProgressReportInterval()
	// using atomics to avoid race warnings
	v3rpc.SetProgressReportInterval(3 * time.Second)
	pi := 3 * time.Second
	defer func() { v3rpc.SetProgressReportInterval(oldpi) }()

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	wc := clientv3.NewWatcher(clus.RandClient())
	defer wc.Close()

	opts := []clientv3.OpOption{clientv3.WithProgressNotify()}
	if watchOnPut {
		opts = append(opts, clientv3.WithPrefix())
	}
	rch := wc.Watch(context.Background(), "foo", opts...)

	select {
	case resp := <-rch: // wait for notification
		if len(resp.Events) != 0 {
			t.Fatalf("resp.Events expected none, got %+v", resp.Events)
		}
	case <-time.After(2 * pi):
		t.Fatalf("watch response expected in %v, but timed out", pi)
	}

	kvc := clientv3.NewKV(clus.RandClient())
	if _, err := kvc.Put(context.TODO(), "foox", "bar"); err != nil {
		t.Fatal(err)
	}

	select {
	case resp := <-rch:
		if resp.Header.Revision != 2 {
			t.Fatalf("resp.Header.Revision expected 2, got %d", resp.Header.Revision)
		}
		if watchOnPut { // wait for put if watch on the put key
			ev := []*clientv3.Event{{Type: clientv3.EventTypePut,
				Kv: &mvccpb.KeyValue{Key: []byte("foox"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}}}
			if !reflect.DeepEqual(ev, resp.Events) {
				t.Fatalf("expected %+v, got %+v", ev, resp.Events)
			}
		} else if len(resp.Events) != 0 { // wait for notification otherwise
			t.Fatalf("expected no events, but got %+v", resp.Events)
		}
	case <-time.After(2 * pi):
		t.Fatalf("watch response expected in %v, but timed out", pi)
	}
}
Beispiel #23
0
// TestWatchResumeComapcted checks that the watcher gracefully closes in case
// that it tries to resume to a revision that's been compacted out of the store.
func TestWatchResumeCompacted(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// create a waiting watcher at rev 1
	w := clientv3.NewWatcher(clus.Client(0))
	defer w.Close()
	wch := w.Watch(context.Background(), "foo", clientv3.WithRev(1))
	select {
	case w := <-wch:
		t.Errorf("unexpected message from wch %v", w)
	default:
	}
	clus.Members[0].Stop(t)

	ticker := time.After(time.Second * 10)
	for clus.WaitLeader(t) <= 0 {
		select {
		case <-ticker:
			t.Fatalf("failed to wait for new leader")
		default:
			time.Sleep(10 * time.Millisecond)
		}
	}

	// put some data and compact away
	kv := clientv3.NewKV(clus.Client(1))
	for i := 0; i < 5; i++ {
		if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
			t.Fatal(err)
		}
	}
	if _, err := kv.Compact(context.TODO(), 3); err != nil {
		t.Fatal(err)
	}

	clus.Members[0].Restart(t)

	// get compacted error message
	wresp, ok := <-wch
	if !ok {
		t.Fatalf("expected wresp, but got closed channel")
	}
	if wresp.Err() != rpctypes.ErrCompacted {
		t.Fatalf("wresp.Err() expected %v, but got %v", rpctypes.ErrCompacted, wresp.Err())
	}
	// ensure the channel is closed
	if wresp, ok = <-wch; ok {
		t.Fatalf("expected closed channel, but got %v", wresp)
	}
}
Beispiel #24
0
func TestLeaseTimeToLive(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	resp, err := lapi.Grant(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	kv := clientv3.NewKV(clus.RandClient())
	keys := []string{"foo1", "foo2"}
	for i := range keys {
		if _, err = kv.Put(context.TODO(), keys[i], "bar", clientv3.WithLease(resp.ID)); err != nil {
			t.Fatal(err)
		}
	}

	lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys())
	if lerr != nil {
		t.Fatal(lerr)
	}
	if lresp.ID != resp.ID {
		t.Fatalf("leaseID expected %d, got %d", resp.ID, lresp.ID)
	}
	if lresp.GrantedTTL != int64(10) {
		t.Fatalf("GrantedTTL expected %d, got %d", 10, lresp.GrantedTTL)
	}
	if lresp.TTL == 0 || lresp.TTL > lresp.GrantedTTL {
		t.Fatalf("unexpected TTL %d (granted %d)", lresp.TTL, lresp.GrantedTTL)
	}
	ks := make([]string, len(lresp.Keys))
	for i := range lresp.Keys {
		ks[i] = string(lresp.Keys[i])
	}
	sort.Strings(ks)
	if !reflect.DeepEqual(ks, keys) {
		t.Fatalf("keys expected %v, got %v", keys, ks)
	}

	lresp, lerr = lapi.TimeToLive(context.Background(), resp.ID)
	if lerr != nil {
		t.Fatal(lerr)
	}
	if len(lresp.Keys) != 0 {
		t.Fatalf("unexpected keys %+v", lresp.Keys)
	}
}
Beispiel #25
0
// TestKVGetRetry ensures get will retry on disconnect.
func TestKVGetRetry(t *testing.T) {
	defer testutil.AfterTest(t)

	clusterSize := 3
	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize})
	defer clus.Terminate(t)

	// because killing leader and following election
	// could give no other endpoints for client reconnection
	fIdx := (clus.WaitLeader(t) + 1) % clusterSize

	kv := clientv3.NewKV(clus.Client(fIdx))
	ctx := context.TODO()

	if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
		t.Fatal(err)
	}

	clus.Members[fIdx].Stop(t)

	donec := make(chan struct{})
	go func() {
		// Get will fail, but reconnect will trigger
		gresp, gerr := kv.Get(ctx, "foo")
		if gerr != nil {
			t.Fatal(gerr)
		}
		wkvs := []*mvccpb.KeyValue{
			{
				Key:            []byte("foo"),
				Value:          []byte("bar"),
				CreateRevision: 2,
				ModRevision:    2,
				Version:        1,
			},
		}
		if !reflect.DeepEqual(gresp.Kvs, wkvs) {
			t.Fatalf("bad get: got %v, want %v", gresp.Kvs, wkvs)
		}
		donec <- struct{}{}
	}()

	time.Sleep(100 * time.Millisecond)
	clus.Members[fIdx].Restart(t)

	select {
	case <-time.After(5 * time.Second):
		t.Fatalf("timed out waiting for get")
	case <-donec:
	}
}
Beispiel #26
0
func TestLeaseNotFoundError(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	kv := clientv3.NewKV(clus.RandClient())

	_, err := kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(500)))
	if err != rpctypes.ErrLeaseNotFound {
		t.Fatalf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err)
	}
}
Beispiel #27
0
// TestKVGetRetry ensures get will retry on disconnect.
func TestKVGetRetry(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.Client(0))
	ctx := context.TODO()

	if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
		t.Fatal(err)
	}

	clus.Members[0].Stop(t)
	<-clus.Members[0].StopNotify()

	donec := make(chan struct{})
	go func() {
		// Get will fail, but reconnect will trigger
		gresp, gerr := kv.Get(ctx, "foo")
		if gerr != nil {
			t.Fatal(gerr)
		}
		wkvs := []*storagepb.KeyValue{
			{
				Key:            []byte("foo"),
				Value:          []byte("bar"),
				CreateRevision: 2,
				ModRevision:    2,
				Version:        1,
			},
		}
		if !reflect.DeepEqual(gresp.Kvs, wkvs) {
			t.Fatalf("bad get: got %v, want %v", gresp.Kvs, wkvs)
		}
		donec <- struct{}{}
	}()

	time.Sleep(100 * time.Millisecond)
	clus.Members[0].Restart(t)

	select {
	case <-time.After(5 * time.Second):
		t.Fatalf("timed out waiting for get")
	case <-donec:
	}
}
Beispiel #28
0
func TestTxnWriteFail(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.Client(0))

	clus.Members[0].Stop(t)

	txnc, getc := make(chan struct{}), make(chan struct{})
	go func() {
		ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
		defer cancel()
		resp, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar")).Commit()
		if err == nil {
			t.Fatalf("expected error, got response %v", resp)
		}
		close(txnc)
	}()

	go func() {
		select {
		case <-time.After(5 * time.Second):
			t.Fatalf("timed out waiting for txn fail")
		case <-txnc:
		}
		// and ensure the put didn't take
		gresp, gerr := clus.Client(1).Get(context.TODO(), "foo")
		if gerr != nil {
			t.Fatal(gerr)
		}
		if len(gresp.Kvs) != 0 {
			t.Fatalf("expected no keys, got %v", gresp.Kvs)
		}
		close(getc)
	}()

	select {
	case <-time.After(5 * time.Second):
		t.Fatalf("timed out waiting for get")
	case <-getc:
	}

	// reconnect so terminate doesn't complain about double-close
	clus.Members[0].Restart(t)
}
Beispiel #29
0
func Example() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close() // make sure to close the client

	kvc := clientv3.NewKV(cli)

	_, err = kvc.Put(context.TODO(), "foo", "bar")
	if err != nil {
		log.Fatal(err)
	}
}
Beispiel #30
0
// compactionCommandFunc executes the "compaction" command.
func compactionCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) != 1 {
		ExitWithError(ExitBadArgs, fmt.Errorf("compaction command needs 1 argument."))
	}

	rev, err := strconv.ParseInt(args[0], 10, 64)
	if err != nil {
		ExitWithError(ExitError, err)
	}

	c := mustClient(cmd)
	if cerr := clientv3.NewKV(c).Compact(context.TODO(), rev); cerr != nil {
		ExitWithError(ExitError, cerr)
		return
	}
	fmt.Println("compacted revision", rev)
}