Esempio n. 1
1
func ExampleLease_create() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)
	lapi := clientv3.NewLease(cli)
	defer lapi.Close()

	// minimum lease TTL is 5-second
	resp, err := lapi.Create(context.TODO(), 5)
	if err != nil {
		log.Fatal(err)
	}

	// after 5 seconds, the key 'foo' will be removed
	_, err = kvc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID)))
	if err != nil {
		log.Fatal(err)
	}
}
Esempio n. 2
0
// TestV3LeaseExists creates a lease on a random client, then sends a keepalive on another
// client to confirm it's visible to the whole cluster.
func TestV3LeaseExists(t *testing.T) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	defer clus.Terminate(t)

	// create lease
	lresp, err := pb.NewLeaseClient(clus.RandConn()).LeaseCreate(
		context.TODO(),
		&pb.LeaseCreateRequest{TTL: 30})
	if err != nil {
		t.Fatal(err)
	}
	if lresp.Error != "" {
		t.Fatal(lresp.Error)
	}

	// confirm keepalive
	lac, err := pb.NewLeaseClient(clus.RandConn()).LeaseKeepAlive(context.TODO())
	if err != nil {
		t.Fatal(err)
	}
	defer lac.CloseSend()
	if err = lac.Send(&pb.LeaseKeepAliveRequest{ID: lresp.ID}); err != nil {
		t.Fatal(err)
	}
	if _, err = lac.Recv(); err != nil {
		t.Fatal(err)
	}
}
Esempio n. 3
0
func observe(c *clientv3.Client, election string) error {
	e := concurrency.NewElection(context.TODO(), c, election)
	ctx, cancel := context.WithCancel(context.TODO())

	donec := make(chan struct{})
	sigc := make(chan os.Signal, 1)
	signal.Notify(sigc, os.Interrupt, os.Kill)
	go func() {
		<-sigc
		cancel()
	}()

	go func() {
		for resp := range e.Observe(ctx) {
			display.Get(resp)
		}
		close(donec)
	}()

	<-donec

	select {
	case <-ctx.Done():
	default:
		return errors.New("elect: observer lost")
	}

	return nil
}
Esempio n. 4
0
func testMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
	// stream lock acquisitions
	lockedC := make(chan *concurrency.Mutex, 1)
	for i := 0; i < waiters; i++ {
		go func() {
			m := concurrency.NewMutex(context.TODO(), chooseClient(), "test-mutex")
			if err := m.Lock(context.TODO()); err != nil {
				t.Fatalf("could not wait on lock (%v)", err)
			}
			lockedC <- m
		}()
	}
	// unlock locked mutexes
	timerC := time.After(time.Duration(waiters) * time.Second)
	for i := 0; i < waiters; i++ {
		select {
		case <-timerC:
			t.Fatalf("timed out waiting for lock %d", i)
		case m := <-lockedC:
			// lock acquired with m
			select {
			case <-lockedC:
				t.Fatalf("lock %d followers did not wait", i)
			default:
			}
			if err := m.Unlock(); err != nil {
				t.Fatalf("could not release lock (%v)", err)
			}
		}
	}
}
Esempio n. 5
0
// Create etcd directory structure from a map, slice or struct.
func Create(kapi client.KeysAPI, path string, val reflect.Value) error {
	switch val.Kind() {
	case reflect.Ptr:
		orig := val.Elem()
		if !orig.IsValid() {
			return nil
		}
		if err := Create(kapi, path, orig); err != nil {
			return err
		}
	case reflect.Interface:
		orig := val.Elem()
		if err := Create(kapi, path, orig); err != nil {
			return err
		}
	case reflect.Struct:
		for i := 0; i < val.NumField(); i++ {
			t := val.Type().Field(i)
			k := t.Tag.Get("etcd")
			if err := Create(kapi, path+"/"+k, val.Field(i)); err != nil {
				return err
			}
		}
	case reflect.Map:
		if strings.HasPrefix(pathx.Base(path), "_") {
			log.Printf("create hidden directory in etcd: %s", path)
		}
		for _, k := range val.MapKeys() {
			v := val.MapIndex(k)
			if err := Create(kapi, path+"/"+k.String(), v); err != nil {
				return err
			}
		}
	case reflect.Slice:
		for i := 0; i < val.Len(); i++ {
			Create(kapi, fmt.Sprintf("%s/%d", path, i), val.Index(i))
		}
	case reflect.String:
		if strings.HasPrefix(pathx.Base(path), "_") {
			log.Printf("set hidden key in etcd: %s", path)
		}
		_, err := kapi.Set(context.TODO(), path, val.String(), nil)
		if err != nil {
			return err
		}
	case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64:
		if strings.HasPrefix(pathx.Base(path), "_") {
			log.Printf("set hidden key in etcd: %s", path)
		}
		_, err := kapi.Set(context.TODO(), path, fmt.Sprintf("%v", val.Interface()), nil)
		if err != nil {
			return err
		}
	default:
		return fmt.Errorf("unsupported type: %s for path: %s", val.Kind(), path)
	}

	return nil
}
Esempio n. 6
0
// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
func TestV3PutMissingLease(t *testing.T) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	defer clus.Terminate(t)

	kvc := pb.NewKVClient(clus.RandConn())
	key := []byte("foo")
	preq := &pb.PutRequest{Key: key, Lease: 123456}
	tests := []func(){
		// put case
		func() {
			if presp, err := kvc.Put(context.TODO(), preq); err == nil {
				t.Errorf("succeeded put key. req: %v. resp: %v", preq, presp)
			}
		},
		// txn success case
		func() {
			txn := &pb.TxnRequest{}
			txn.Success = append(txn.Success, &pb.RequestUnion{RequestPut: preq})
			if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
				t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp)
			}
		},
		// txn failure case
		func() {
			txn := &pb.TxnRequest{}
			txn.Failure = append(txn.Failure, &pb.RequestUnion{RequestPut: preq})
			cmp := &pb.Compare{
				Result: pb.Compare_GREATER,
				Target: pb.Compare_CREATE,
				Key:    []byte("bar"),
			}
			txn.Compare = append(txn.Compare, cmp)
			if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
				t.Errorf("succeeded txn failure. req: %v. resp: %v", txn, tresp)
			}
		},
		// ignore bad lease in failure on success txn
		func() {
			txn := &pb.TxnRequest{}
			rreq := &pb.RangeRequest{Key: []byte("bar")}
			txn.Success = append(txn.Success, &pb.RequestUnion{RequestRange: rreq})
			txn.Failure = append(txn.Failure, &pb.RequestUnion{RequestPut: preq})
			if tresp, err := kvc.Txn(context.TODO(), txn); err != nil {
				t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp)
			}
		},
	}

	for i, f := range tests {
		f()
		// key shouldn't have been stored
		rreq := &pb.RangeRequest{Key: key}
		rresp, err := kvc.Range(context.TODO(), rreq)
		if err != nil {
			t.Errorf("#%d. could not rangereq (%v)", i, err)
		} else if len(rresp.Kvs) != 0 {
			t.Errorf("#%d. expected no keys, got %v", i, rresp)
		}
	}
}
Esempio n. 7
0
// TestBlockProposal ensures that node will block proposal when it does not
// know who is the current leader; node will accept proposal when it knows
// who is the current leader.
func TestBlockProposal(t *testing.T) {
	n := newNode()
	r := newRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
	go n.run(r)
	defer n.Stop()

	errc := make(chan error, 1)
	go func() {
		errc <- n.Propose(context.TODO(), []byte("somedata"))
	}()

	testutil.ForceGosched()
	select {
	case err := <-errc:
		t.Errorf("err = %v, want blocking", err)
	default:
	}

	n.Campaign(context.TODO())
	testutil.ForceGosched()
	select {
	case err := <-errc:
		if err != nil {
			t.Errorf("err = %v, want %v", err, nil)
		}
	default:
		t.Errorf("blocking proposal, want unblocking")
	}
}
Esempio n. 8
0
// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
func TestV3LeaseKeepAlive(t *testing.T) {
	testLeaseRemoveLeasedKey(t, func(clus *clusterV3, leaseID int64) error {
		lc := pb.NewLeaseClient(clus.RandConn())
		lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
		lac, err := lc.LeaseKeepAlive(context.TODO())
		if err != nil {
			return err
		}
		defer lac.CloseSend()

		// renew long enough so lease would've expired otherwise
		for i := 0; i < 3; i++ {
			if err = lac.Send(lreq); err != nil {
				return err
			}
			lresp, rxerr := lac.Recv()
			if rxerr != nil {
				return rxerr
			}
			if lresp.ID != leaseID {
				return fmt.Errorf("expected lease ID %v, got %v", leaseID, lresp.ID)
			}
			time.Sleep(time.Duration(lresp.TTL/2) * time.Second)
		}
		_, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
		return err
	})
}
Esempio n. 9
0
// TestNodePropose ensures that node.Propose sends the given proposal to the underlying raft.
func TestNodePropose(t *testing.T) {
	msgs := []raftpb.Message{}
	appendStep := func(r *raft, m raftpb.Message) {
		msgs = append(msgs, m)
	}

	n := newNode()
	s := NewMemoryStorage()
	r := newRaft(1, []uint64{1}, 10, 1, s)
	go n.run(r)
	n.Campaign(context.TODO())
	for {
		rd := <-n.Ready()
		s.Append(rd.Entries)
		// change the step function to appendStep until this raft becomes leader
		if rd.SoftState.Lead == r.id {
			r.step = appendStep
			n.Advance()
			break
		}
		n.Advance()
	}
	n.Propose(context.TODO(), []byte("somedata"))
	n.Stop()

	if len(msgs) != 1 {
		t.Fatalf("len(msgs) = %d, want %d", len(msgs), 1)
	}
	if msgs[0].Type != raftpb.MsgProp {
		t.Errorf("msg type = %d, want %d", msgs[0].Type, raftpb.MsgProp)
	}
	if !reflect.DeepEqual(msgs[0].Entries[0].Data, []byte("somedata")) {
		t.Errorf("data = %v, want %v", msgs[0].Entries[0].Data, []byte("somedata"))
	}
}
Esempio n. 10
0
func ExampleLease_keepAliveOnce() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)
	lapi := clientv3.NewLease(cli)
	defer lapi.Close()

	resp, err := lapi.Create(context.TODO(), 5)
	if err != nil {
		log.Fatal(err)
	}

	_, err = kvc.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID)))
	if err != nil {
		log.Fatal(err)
	}

	// to renew the lease only once
	_, err = lapi.KeepAliveOnce(context.TODO(), lease.LeaseID(resp.ID))
	if err != nil {
		log.Fatal(err)
	}
}
Esempio n. 11
0
// TestV3LeaseCreateById ensures leases may be created by a given id.
func TestV3LeaseCreateByID(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// create fixed lease
	lresp, err := toGRPC(clus.RandClient()).Lease.LeaseCreate(
		context.TODO(),
		&pb.LeaseCreateRequest{ID: 1, TTL: 1})
	if err != nil {
		t.Errorf("could not create lease 1 (%v)", err)
	}
	if lresp.ID != 1 {
		t.Errorf("got id %v, wanted id %v", lresp.ID, 1)
	}

	// create duplicate fixed lease
	lresp, err = toGRPC(clus.RandClient()).Lease.LeaseCreate(
		context.TODO(),
		&pb.LeaseCreateRequest{ID: 1, TTL: 1})
	if err != v3rpc.ErrLeaseExist {
		t.Error(err)
	}

	// create fresh fixed lease
	lresp, err = toGRPC(clus.RandClient()).Lease.LeaseCreate(
		context.TODO(),
		&pb.LeaseCreateRequest{ID: 2, TTL: 1})
	if err != nil {
		t.Errorf("could not create lease 2 (%v)", err)
	}
	if lresp.ID != 2 {
		t.Errorf("got id %v, wanted id %v", lresp.ID, 2)
	}
}
Esempio n. 12
0
func ExampleLease_revoke() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	resp, err := cli.Create(context.TODO(), 5)
	if err != nil {
		log.Fatal(err)
	}

	_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(resp.ID)))
	if err != nil {
		log.Fatal(err)
	}

	// revoking lease expires the key attached to its lease ID
	_, err = cli.Revoke(context.TODO(), clientv3.LeaseID(resp.ID))
	if err != nil {
		log.Fatal(err)
	}

	gresp, err := cli.Get(context.TODO(), "foo")
	if err != nil {
		log.Fatal(err)
	}
	fmt.Println("number of keys:", len(gresp.Kvs))
	// number of keys: 0
}
Esempio n. 13
0
func ExampleLease_keepAlive() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	resp, err := cli.Create(context.TODO(), 5)
	if err != nil {
		log.Fatal(err)
	}

	_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(resp.ID)))
	if err != nil {
		log.Fatal(err)
	}

	// the key 'foo' will be kept forever
	_, err = cli.KeepAlive(context.TODO(), clientv3.LeaseID(resp.ID))
	if err != nil {
		log.Fatal(err)
	}
}
Esempio n. 14
0
// TestSTMConflict tests that conflicts are retried.
func TestSTMConflict(t *testing.T) {
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	etcdc := clus.RandClient()
	keys := make([]string, 5)
	for i := 0; i < len(keys); i++ {
		keys[i] = fmt.Sprintf("foo-%d", i)
		if _, err := etcdc.Put(context.TODO(), keys[i], "100"); err != nil {
			t.Fatalf("could not make key (%v)", err)
		}
	}

	errc := make(chan error)
	for i := range keys {
		curEtcdc := clus.RandClient()
		srcKey := keys[i]
		applyf := func(stm concurrency.STM) error {
			src := stm.Get(srcKey)
			// must be different key to avoid double-adding
			dstKey := srcKey
			for dstKey == srcKey {
				dstKey = keys[rand.Intn(len(keys))]
			}
			dst := stm.Get(dstKey)
			srcV, _ := strconv.ParseInt(src, 10, 64)
			dstV, _ := strconv.ParseInt(dst, 10, 64)
			xfer := int64(rand.Intn(int(srcV)) / 2)
			stm.Put(srcKey, fmt.Sprintf("%d", srcV-xfer))
			stm.Put(dstKey, fmt.Sprintf("%d", dstV+xfer))
			return nil
		}
		go func() {
			_, err := concurrency.NewSTMRepeatable(context.TODO(), curEtcdc, applyf)
			errc <- err
		}()
	}

	// wait for txns
	for range keys {
		if err := <-errc; err != nil {
			t.Fatalf("apply failed (%v)", err)
		}
	}

	// ensure sum matches initial sum
	sum := 0
	for _, oldkey := range keys {
		rk, err := etcdc.Get(context.TODO(), oldkey)
		if err != nil {
			t.Fatalf("couldn't fetch key %s (%v)", oldkey, err)
		}
		v, _ := strconv.ParseInt(string(rk.Kvs[0].Value), 10, 64)
		sum += int(v)
	}
	if sum != len(keys)*100 {
		t.Fatalf("bad sum. got %d, expected %d", sum, len(keys)*100)
	}
}
Esempio n. 15
0
// TestV3LeaseSwitch tests a key can be switched from one lease to another.
func TestV3LeaseSwitch(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	key := "foo"

	// create lease
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	lresp1, err1 := toGRPC(clus.RandClient()).Lease.LeaseCreate(ctx, &pb.LeaseCreateRequest{TTL: 30})
	if err1 != nil {
		t.Fatal(err1)
	}
	lresp2, err2 := toGRPC(clus.RandClient()).Lease.LeaseCreate(ctx, &pb.LeaseCreateRequest{TTL: 30})
	if err2 != nil {
		t.Fatal(err2)
	}

	// attach key on lease1 then switch it to lease2
	put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID}
	_, err := toGRPC(clus.RandClient()).KV.Put(ctx, put1)
	if err != nil {
		t.Fatal(err)
	}
	put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID}
	_, err = toGRPC(clus.RandClient()).KV.Put(ctx, put2)
	if err != nil {
		t.Fatal(err)
	}

	// revoke lease1 should not remove key
	_, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID})
	if err != nil {
		t.Fatal(err)
	}
	rreq := &pb.RangeRequest{Key: []byte("foo")}
	rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
	if err != nil {
		t.Fatal(err)
	}
	if len(rresp.Kvs) != 1 {
		t.Fatalf("unexpect removal of key")
	}

	// revoke lease2 should remove key
	_, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID})
	if err != nil {
		t.Fatal(err)
	}
	rresp, err = toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
	if err != nil {
		t.Fatal(err)
	}
	if len(rresp.Kvs) != 0 {
		t.Fatalf("lease removed but key remains")
	}
}
Esempio n. 16
0
// TestV3WatchCancel tests Watch APIs cancellation.
func TestV3WatchCancel(t *testing.T) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	wAPI := pb.NewWatchClient(clus.RandConn())

	wStream, errW := wAPI.Watch(context.TODO())
	if errW != nil {
		t.Fatalf("wAPI.Watch error: %v", errW)
	}

	if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}}); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	wresp, errR := wStream.Recv()
	if errR != nil {
		t.Errorf("wStream.Recv error: %v", errR)
	}
	if !wresp.Created {
		t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
	}

	if err := wStream.Send(&pb.WatchRequest{CancelRequest: &pb.WatchCancelRequest{WatchId: wresp.WatchId}}); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	cresp, err := wStream.Recv()
	if err != nil {
		t.Errorf("wStream.Recv error: %v", err)
	}
	if !cresp.Canceled {
		t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
	}

	kvc := pb.NewKVClient(clus.RandConn())
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		t.Errorf("couldn't put key (%v)", err)
	}

	// watch got canceled, so this should block
	rCh := make(chan *pb.WatchResponse)
	go func() {
		resp, _ := wStream.Recv()
		rCh <- resp
	}()
	select {
	case nr := <-rCh:
		t.Errorf("unexpected response is received %+v", nr)
	case <-time.After(2 * time.Second):
	}
	wStream.CloseSend()
	rv, ok := <-rCh
	if rv != nil || !ok {
		t.Errorf("rv, ok got = %v %v, want = nil true", rv, ok)
	}

	clus.Terminate(t)
}
Esempio n. 17
0
// TestSTMSerialize tests that serialization is honored when serializable.
func TestSTMSerialize(t *testing.T) {
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	etcdc := clus.RandClient()

	// set up initial keys
	keys := make([]string, 5)
	for i := 0; i < len(keys); i++ {
		keys[i] = fmt.Sprintf("foo-%d", i)
	}

	// update keys in full batches
	updatec := make(chan struct{})
	go func() {
		defer close(updatec)
		for i := 0; i < 5; i++ {
			s := fmt.Sprintf("%d", i)
			ops := []v3.Op{}
			for _, k := range keys {
				ops = append(ops, v3.OpPut(k, s))
			}
			if _, err := etcdc.Txn(context.TODO()).Then(ops...).Commit(); err != nil {
				t.Fatalf("couldn't put keys (%v)", err)
			}
			updatec <- struct{}{}
		}
	}()

	// read all keys in txn, make sure all values match
	errc := make(chan error)
	for range updatec {
		curEtcdc := clus.RandClient()
		applyf := func(stm concurrency.STM) error {
			vs := []string{}
			for i := range keys {
				vs = append(vs, stm.Get(keys[i]))
			}
			for i := range vs {
				if vs[0] != vs[i] {
					return fmt.Errorf("got vs[%d] = %v, want %v", i, vs[i], vs[0])
				}
			}
			return nil
		}
		go func() {
			_, err := concurrency.NewSTMSerializable(context.TODO(), curEtcdc, applyf)
			errc <- err
		}()
	}

	for i := 0; i < 5; i++ {
		if err := <-errc; err != nil {
			t.Error(err)
		}
	}
}
Esempio n. 18
0
// TestElectionFailover tests that an election will
func TestElectionFailover(t *testing.T) {
	clus := NewClusterV3(t, &ClusterConfig{Size: 3})
	defer clus.Terminate(t)
	defer dropSessionLease(clus)

	cctx, cancel := context.WithCancel(context.TODO())
	defer cancel()

	// first leader (elected)
	e := concurrency.NewElection(clus.clients[0], "test-election")
	if err := e.Campaign(context.TODO(), "foo"); err != nil {
		t.Fatalf("failed volunteer (%v)", err)
	}

	// check first leader
	resp, ok := <-e.Observe(cctx)
	if !ok {
		t.Fatalf("could not wait for first election; channel closed")
	}
	s := string(resp.Kvs[0].Value)
	if s != "foo" {
		t.Fatalf("wrong election result. got %s, wanted foo", s)
	}

	// next leader
	electedc := make(chan struct{})
	go func() {
		ee := concurrency.NewElection(clus.clients[1], "test-election")
		if eer := ee.Campaign(context.TODO(), "bar"); eer != nil {
			t.Fatal(eer)
		}
		electedc <- struct{}{}
	}()

	// invoke leader failover
	session, serr := concurrency.NewSession(clus.clients[0])
	if serr != nil {
		t.Fatal(serr)
	}
	if err := session.Close(); err != nil {
		t.Fatal(err)
	}

	// check new leader
	e = concurrency.NewElection(clus.clients[2], "test-election")
	resp, ok = <-e.Observe(cctx)
	if !ok {
		t.Fatalf("could not wait for second election; channel closed")
	}
	s = string(resp.Kvs[0].Value)
	if s != "bar" {
		t.Fatalf("wrong election result. got %s, wanted bar", s)
	}

	// leader must ack election (otherwise, Campaign may see closed conn)
	<-electedc
}
Esempio n. 19
0
func (kv *kv) do(op Op) (*pb.ResponseUnion, error) {
	for {
		var err error
		switch op.t {
		// TODO: handle other ops
		case tRange:
			var resp *pb.RangeResponse
			r := &pb.RangeRequest{Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev}
			if op.sort != nil {
				r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
				r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
			}

			resp, err = kv.getRemote().Range(context.TODO(), r)
			if err == nil {
				respu := &pb.ResponseUnion_ResponseRange{ResponseRange: resp}
				return &pb.ResponseUnion{Response: respu}, nil
			}
		case tPut:
			var resp *pb.PutResponse
			r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID)}
			resp, err = kv.getRemote().Put(context.TODO(), r)
			if err == nil {
				respu := &pb.ResponseUnion_ResponsePut{ResponsePut: resp}
				return &pb.ResponseUnion{Response: respu}, nil
			}
		case tDeleteRange:
			var resp *pb.DeleteRangeResponse
			r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end}
			resp, err = kv.getRemote().DeleteRange(context.TODO(), r)
			if err == nil {
				respu := &pb.ResponseUnion_ResponseDeleteRange{ResponseDeleteRange: resp}
				return &pb.ResponseUnion{Response: respu}, nil
			}
		default:
			panic("Unknown op")
		}

		if isRPCError(err) {
			return nil, err
		}

		// do not retry on modifications
		if op.isWrite() {
			go kv.switchRemote(err)
			return nil, err
		}

		if nerr := kv.switchRemote(err); nerr != nil {
			return nil, nerr
		}
	}
}
Esempio n. 20
0
func addNodeHandler(resp http.ResponseWriter, req *http.Request) {
	vars := mux.Vars(req)
	node_id := vars["node_id"]

	fmt.Printf("addNode Request, node_id:%v\n", node_id)

	node_id_int, err := strconv.Atoi(node_id)
	if err != nil {
		fmt.Printf("字符串转换成整数失败")
		util.Response(nil, http.StatusBadRequest, resp)
		return
	}

	//添加peer
	if flag := addPeer(raft.Peer{uint64(node_id_int), nil}); !flag {
		fmt.Printf("add peer failure\n")
		util.Response(nil, http.StatusBadRequest, resp)
		return
	}

	//启动节点
	node := startNode(uint64(node_id_int), peers, nt.nodeNetwork(uint64(node_id_int)))
	nodes = append(nodes, node)
	node.Node.Campaign(context.TODO())

	cc := raftpb.ConfChange{
		Type:    raftpb.ConfChangeAddNode,
		NodeID:  uint64(node_id_int),
		Context: []byte(node_id),
	}

	//遍历所有节点
	for _, n := range nodes {
		if n.id == uint64(node_id_int) {
			continue
		}

		err = n.ProposeConfChange(context.TODO(), cc)
		if err != nil {
			fmt.Printf("node ProposeConfChange failure, err:%v\n", err)
			util.Response(nil, http.StatusBadRequest, resp)
			return
		}
		break
		//st := n.ApplyConfChange(cc)
		//fmt.Printf("state: %v\n", st.String())
	}

	resp.Header().Set("Content-Type", "Application/json")
	util.Response(nil, http.StatusOK, resp)
}
Esempio n. 21
0
// TestMultiNodeProposeConfig ensures that multiNode.ProposeConfChange
// sends the given configuration proposal to the underlying raft.
func TestMultiNodeProposeConfig(t *testing.T) {
	mn := newMultiNode(1)
	go mn.run()
	s := NewMemoryStorage()
	mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, s), []Peer{{ID: 1}})
	mn.Campaign(context.TODO(), 1)
	proposed := false
	var lastIndex uint64
	var ccdata []byte
	for {
		rds := <-mn.Ready()
		rd := rds[1]
		s.Append(rd.Entries)
		// change the step function to appendStep until this raft becomes leader
		if !proposed && rd.SoftState.Lead == mn.id {
			cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1}
			var err error
			ccdata, err = cc.Marshal()
			if err != nil {
				t.Fatal(err)
			}
			mn.ProposeConfChange(context.TODO(), 1, cc)
			proposed = true
		}
		mn.Advance(rds)

		var err error
		lastIndex, err = s.LastIndex()
		if err != nil {
			t.Fatal(err)
		}
		if lastIndex >= 3 {
			break
		}
	}
	mn.Stop()

	entries, err := s.Entries(lastIndex, lastIndex+1, noLimit)
	if err != nil {
		t.Fatal(err)
	}
	if len(entries) != 1 {
		t.Fatalf("len(entries) = %d, want %d", len(entries), 1)
	}
	if entries[0].Type != raftpb.EntryConfChange {
		t.Fatalf("type = %v, want %v", entries[0].Type, raftpb.EntryConfChange)
	}
	if !bytes.Equal(entries[0].Data, ccdata) {
		t.Errorf("data = %v, want %v", entries[0].Data, ccdata)
	}
}
Esempio n. 22
0
func testWatchReconnRequest(t *testing.T, wctx *watchctx) {
	donec, stopc := make(chan struct{}), make(chan struct{}, 1)
	go func() {
		timer := time.After(2 * time.Second)
		defer close(donec)
		// take down watcher connection
		for {
			wctx.wclient.ActiveConnection().Close()
			select {
			case <-timer:
				// spinning on close may live lock reconnection
				return
			case <-stopc:
				return
			default:
			}
		}
	}()
	// should reconnect when requesting watch
	if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil {
		t.Fatalf("expected non-nil channel")
	}

	// wait for disconnections to stop
	stopc <- struct{}{}
	<-donec

	// ensure watcher works
	putAndWatch(t, wctx, "a", "a")
}
Esempio n. 23
0
func TestKVDelete(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	kv := clientv3.NewKV(clus.RandClient())
	ctx := context.TODO()

	presp, err := kv.Put(ctx, "foo", "")
	if err != nil {
		t.Fatalf("couldn't put 'foo' (%v)", err)
	}
	if presp.Header.Revision != 2 {
		t.Fatalf("presp.Header.Revision got %d, want %d", presp.Header.Revision, 2)
	}
	resp, err := kv.Delete(ctx, "foo")
	if err != nil {
		t.Fatalf("couldn't delete key (%v)", err)
	}
	if resp.Header.Revision != 3 {
		t.Fatalf("resp.Header.Revision got %d, want %d", resp.Header.Revision, 3)
	}
	gresp, err := kv.Get(ctx, "foo")
	if err != nil {
		t.Fatalf("couldn't get key (%v)", err)
	}
	if len(gresp.Kvs) > 0 {
		t.Fatalf("gresp.Kvs got %+v, want none", gresp.Kvs)
	}
}
Esempio n. 24
0
func TestLeaseRevoke(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	lapi := clientv3.NewLease(clus.RandClient())
	defer lapi.Close()

	kv := clientv3.NewKV(clus.RandClient())

	resp, err := lapi.Create(context.Background(), 10)
	if err != nil {
		t.Errorf("failed to create lease %v", err)
	}

	_, err = lapi.Revoke(context.Background(), lease.LeaseID(resp.ID))
	if err != nil {
		t.Errorf("failed to revoke lease %v", err)
	}

	_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID)))
	if err != v3rpc.ErrLeaseNotFound {
		t.Fatalf("err = %v, want %v", err, v3rpc.ErrLeaseNotFound)
	}
}
Esempio n. 25
0
func ExampleKV_get() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	kvc := clientv3.NewKV(cli)

	_, err = kvc.Put(context.TODO(), "foo", "bar")
	if err != nil {
		log.Fatal(err)
	}

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := kvc.Get(ctx, "foo")
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	for _, ev := range resp.Kvs {
		fmt.Printf("%s : %s\n", ev.Key, ev.Value)
	}
	// foo : bar
}
Esempio n. 26
0
// TestV3LeaseRevoke ensures a key is deleted once its lease is revoked.
func TestV3LeaseRevoke(t *testing.T) {
	testLeaseRemoveLeasedKey(t, func(clus *clusterV3, leaseID int64) error {
		lc := pb.NewLeaseClient(clus.RandConn())
		_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
		return err
	})
}
Esempio n. 27
0
// deletePrefix performs a RangeRequest to get keys on a given prefix
func deletePrefix(kv pb.KVClient, prefix string) (*pb.DeleteRangeResponse, error) {
	return kv.DeleteRange(
		context.TODO(),
		&pb.DeleteRangeRequest{
			Key:      []byte(prefix),
			RangeEnd: []byte(prefixEnd(prefix))})
}
Esempio n. 28
0
File: kv.go Progetto: jkhelil/etcd
func (kv *kv) do(op Op) (*pb.ResponseUnion, error) {
	for {
		var err error
		switch op.t {
		// TODO: handle other ops
		case tRange:
			var resp *pb.RangeResponse
			// TODO: setup sorting
			r := &pb.RangeRequest{Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev}
			resp, err = kv.remote.Range(context.TODO(), r)
			if err == nil {
				return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseRange{resp}}, nil
			}
		default:
			panic("Unknown op")
		}

		if isRPCError(err) {
			return nil, err
		}

		newConn, cerr := kv.c.retryConnection(kv.conn, err)
		if cerr != nil {
			// TODO: return client lib defined connection error
			return nil, cerr
		}
		kv.conn = newConn
		kv.remote = pb.NewKVClient(kv.conn)
	}
}
Esempio n. 29
0
// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
func TestV3LeaseKeepAlive(t *testing.T) {
	defer testutil.AfterTest(t)
	testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
		lc := toGRPC(clus.RandClient()).Lease
		lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
		ctx, cancel := context.WithCancel(context.Background())
		defer cancel()
		lac, err := lc.LeaseKeepAlive(ctx)
		if err != nil {
			return err
		}
		defer lac.CloseSend()

		// renew long enough so lease would've expired otherwise
		for i := 0; i < 3; i++ {
			if err = lac.Send(lreq); err != nil {
				return err
			}
			lresp, rxerr := lac.Recv()
			if rxerr != nil {
				return rxerr
			}
			if lresp.ID != leaseID {
				return fmt.Errorf("expected lease ID %v, got %v", leaseID, lresp.ID)
			}
			time.Sleep(time.Duration(lresp.TTL/2) * time.Second)
		}
		_, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
		return err
	})
}
Esempio n. 30
0
// TestNodeStep ensures that node.Step sends msgProp to propc chan
// and other kinds of messages to recvc chan.
func TestNodeStep(t *testing.T) {
	for i, msgn := range raftpb.MessageType_name {
		n := &node{
			propc: make(chan raftpb.Message, 1),
			recvc: make(chan raftpb.Message, 1),
		}
		msgt := raftpb.MessageType(i)
		n.Step(context.TODO(), raftpb.Message{Type: msgt})
		// Proposal goes to proc chan. Others go to recvc chan.
		if msgt == raftpb.MsgProp {
			select {
			case <-n.propc:
			default:
				t.Errorf("%d: cannot receive %s on propc chan", msgt, msgn)
			}
		} else {
			if msgt == raftpb.MsgBeat || msgt == raftpb.MsgHup {
				select {
				case <-n.recvc:
					t.Errorf("%d: step should ignore %s", msgt, msgn)
				default:
				}
			} else {
				select {
				case <-n.recvc:
				default:
					t.Errorf("%d: cannot receive %s on recvc chan", msgt, msgn)
				}
			}
		}
	}
}