Example #1
0
// txnCommandFunc executes the "txn" command.
func txnCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) != 0 {
		ExitWithError(ExitBadArgs, fmt.Errorf("txn command does not accept argument."))
	}

	reader := bufio.NewReader(os.Stdin)

	next := compareState
	txn := &pb.TxnRequest{}
	for next != nil {
		next = next(txn, reader)
	}

	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitError, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}
	kv := pb.NewKVClient(conn)

	resp, err := kv.Txn(context.Background(), txn)
	if err != nil {
		ExitWithError(ExitError, err)
	}
	if resp.Succeeded {
		fmt.Println("executed success request list")
	} else {
		fmt.Println("executed failure request list")
	}
}
Example #2
0
File: put.go Project: navneetk/etcd
func benchPut(conn *grpc.ClientConn, key []byte, kc, n, c, size int) {
	wg.Add(c)
	requests := make(chan *etcdserverpb.PutRequest, n)

	v := make([]byte, size)
	_, err := rand.Read(v)
	if err != nil {
		fmt.Printf("failed to generate value: %v\n", err)
		os.Exit(1)
		return
	}

	for i := 0; i < c; i++ {
		go put(etcdserverpb.NewKVClient(conn), requests)
	}

	suffixb := make([]byte, 8)
	suffix := 0
	for i := 0; i < n; i++ {
		binary.BigEndian.PutUint64(suffixb, uint64(suffix))
		r := &etcdserverpb.PutRequest{
			Key:   append(key, suffixb...),
			Value: v,
		}
		requests <- r
		if suffix > kc {
			suffix = 0
		}
		suffix++
	}
	close(requests)
}
Example #3
0
File: kv.go Project: jkhelil/etcd
func (kv *kv) do(op Op) (*pb.ResponseUnion, error) {
	for {
		var err error
		switch op.t {
		// TODO: handle other ops
		case tRange:
			var resp *pb.RangeResponse
			// TODO: setup sorting
			r := &pb.RangeRequest{Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev}
			resp, err = kv.remote.Range(context.TODO(), r)
			if err == nil {
				return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseRange{resp}}, nil
			}
		default:
			panic("Unknown op")
		}

		if isRPCError(err) {
			return nil, err
		}

		newConn, cerr := kv.c.retryConnection(kv.conn, err)
		if cerr != nil {
			// TODO: return client lib defined connection error
			return nil, cerr
		}
		kv.conn = newConn
		kv.remote = pb.NewKVClient(kv.conn)
	}
}
Example #4
0
func (c *cluster) compactKV(rev int64) error {
	var (
		conn *grpc.ClientConn
		err  error
	)

	if rev <= 0 {
		return nil
	}

	for i, u := range c.GRPCURLs {
		conn, err = grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if err != nil {
			continue
		}
		kvc := pb.NewKVClient(conn)
		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
		_, err = kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true})
		cancel()
		conn.Close()
		if err != nil {
			if strings.Contains(err.Error(), "required revision has been compacted") && i > 0 {
				plog.Printf("%s is already compacted with %d (%v)", u, rev, err)
				err = nil // in case compact was requested more than once
			}
		}
	}
	return err
}
Example #5
0
func (c *cluster) compactKV(rev int64, timeout time.Duration) (err error) {
	if rev <= 0 {
		return nil
	}

	for i, u := range c.GRPCURLs {
		conn, derr := grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if derr != nil {
			err = derr
			continue
		}
		kvc := pb.NewKVClient(conn)
		ctx, cancel := context.WithTimeout(context.Background(), timeout)
		_, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true})
		cancel()
		conn.Close()
		if cerr != nil {
			if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
				plog.Printf("%s is already compacted with %d (%v)", u, rev, cerr)
			} else {
				err = cerr
			}
		}
	}
	return err
}
Example #6
0
func (c *cluster) compactKV(rev int64, timeout time.Duration) (err error) {
	if rev <= 0 {
		return nil
	}

	for i, m := range c.Members {
		u := m.ClientURL
		conn, derr := m.dialGRPC()
		if derr != nil {
			plog.Printf("[compact kv #%d] dial error %v (endpoint %s)", i, derr, u)
			err = derr
			continue
		}
		kvc := pb.NewKVClient(conn)
		ctx, cancel := context.WithTimeout(context.Background(), timeout)
		plog.Printf("[compact kv #%d] starting (endpoint %s)", i, u)
		_, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}, grpc.FailFast(false))
		cancel()
		conn.Close()
		succeed := true
		if cerr != nil {
			if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
				plog.Printf("[compact kv #%d] already compacted (endpoint %s)", i, u)
			} else {
				plog.Warningf("[compact kv #%d] error %v (endpoint %s)", i, cerr, u)
				err = cerr
				succeed = false
			}
		}
		if succeed {
			plog.Printf("[compact kv #%d] done (endpoint %s)", i, u)
		}
	}
	return err
}
Example #7
0
// txnCommandFunc executes the "txn" command.
func txnCommandFunc(c *cli.Context) {
	if len(c.Args()) != 0 {
		panic("unexpected args")
	}

	reader := bufio.NewReader(os.Stdin)

	next := compareState
	txn := &pb.TxnRequest{}
	for next != nil {
		next = next(txn, reader)
	}

	conn, err := grpc.Dial(c.GlobalString("endpoint"))
	if err != nil {
		panic(err)
	}
	kv := pb.NewKVClient(conn)

	resp, err := kv.Txn(context.Background(), txn)
	if err != nil {
		fmt.Println(err)
	}
	if resp.Succeeded {
		fmt.Println("executed success request list")
	} else {
		fmt.Println("executed failure request list")
	}
}
Example #8
0
// checkConsistency stops the cluster for a moment and get the hashes of KV storages.
func (c *cluster) checkConsistency() error {
	hashes := make(map[string]uint32)
	for _, u := range c.GRPCURLs {
		conn, err := grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if err != nil {
			return err
		}
		kvc := pb.NewKVClient(conn)

		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
		resp, err := kvc.Hash(ctx, &pb.HashRequest{})
		hv := resp.Hash
		if resp != nil && err != nil {
			return err
		}
		cancel()

		hashes[u] = hv
	}

	if !checkConsistency(hashes) {
		return fmt.Errorf("check consistency fails: %v", hashes)
	}
	return nil
}
Example #9
0
func (s *keyStresser) Stress() error {
	// TODO: add backoff option
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	ctx, cancel := context.WithCancel(context.Background())

	s.wg.Add(s.N)
	s.conn = conn
	s.cancel = cancel

	kvc := pb.NewKVClient(conn)

	var stressEntries = []stressEntry{
		{weight: 0.7, f: newStressPut(kvc, s.keySuffixRange, s.keySize)},
		{
			weight: 0.7 * float32(s.keySize) / float32(s.keyLargeSize),
			f:      newStressPut(kvc, s.keySuffixRange, s.keyLargeSize),
		},
		{weight: 0.07, f: newStressRange(kvc, s.keySuffixRange)},
		{weight: 0.07, f: newStressRangeInterval(kvc, s.keySuffixRange)},
		{weight: 0.07, f: newStressDelete(kvc, s.keySuffixRange)},
		{weight: 0.07, f: newStressDeleteInterval(kvc, s.keySuffixRange)},
	}
	s.stressTable = createStressTable(stressEntries)

	for i := 0; i < s.N; i++ {
		go s.run(ctx)
	}

	plog.Infof("keyStresser %q is started", s.Endpoint)
	return nil
}
Example #10
0
// rangeCommandFunc executes the "range" command.
func rangeCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) == 0 {
		ExitWithError(ExitBadArgs, fmt.Errorf("range command needs arguments."))
	}

	var rangeEnd []byte
	key := []byte(args[0])
	if len(args) > 1 {
		rangeEnd = []byte(args[1])
	}

	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitError, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.RangeRequest{Key: key, RangeEnd: rangeEnd}

	resp, err := kv.Range(context.Background(), req)
	for _, kv := range resp.Kvs {
		fmt.Printf("%s %s\n", string(kv.Key), string(kv.Value))
	}
}
Example #11
0
// deleteRangeCommandFunc executes the "deleteRange" command.
func deleteRangeCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) == 0 {
		ExitWithError(ExitBadArgs, fmt.Errorf("delete-range command needs arguments."))
	}

	var rangeEnd []byte
	key := []byte(args[0])
	if len(args) > 1 {
		rangeEnd = []byte(args[1])
	}

	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitError, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.DeleteRangeRequest{Key: key, RangeEnd: rangeEnd}

	kv.DeleteRange(context.Background(), req)

	if rangeEnd != nil {
		fmt.Printf("range [%s, %s) is deleted\n", string(key), string(rangeEnd))
	} else {
		fmt.Printf("key %s is deleted\n", string(key))
	}
}
Example #12
0
// putCommandFunc executes the "put" command.
func putCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) != 2 {
		ExitWithError(ExitBadArgs, fmt.Errorf("put command needs 2 arguments."))
	}

	id, err := strconv.ParseInt(leaseStr, 16, 64)
	if err != nil {
		ExitWithError(ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err))
	}

	key := []byte(args[0])
	value := []byte(args[1])

	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitError, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.PutRequest{Key: key, Value: value, Lease: id}

	kv.Put(context.Background(), req)
	fmt.Printf("%s %s\n", key, value)
}
Example #13
0
func (s *stresser) Stress() error {
	// TODO: add backoff option
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.conn = conn
	s.cancel = cancel
	s.wg = wg
	s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
	s.mu.Unlock()

	kvc := pb.NewKVClient(conn)

	for i := 0; i < s.N; i++ {
		go s.run(ctx, kvc)
	}

	plog.Printf("stresser %q is started", s.Endpoint)
	return nil
}
Example #14
0
func newClient(cfg *Config) (*Client, error) {
	if cfg == nil {
		cfg = &Config{RetryDialer: dialEndpointList}
	}
	var creds *credentials.TransportAuthenticator
	if cfg.TLS != nil {
		tlscfg, err := cfg.TLS.ClientConfig()
		if err != nil {
			return nil, err
		}
		c := credentials.NewTLS(tlscfg)
		creds = &c
	}
	// use a temporary skeleton client to bootstrap first connection
	conn, err := cfg.RetryDialer(&Client{cfg: *cfg, creds: creds})
	if err != nil {
		return nil, err
	}
	return &Client{
		KV:      pb.NewKVClient(conn),
		Lease:   pb.NewLeaseClient(conn),
		Watch:   pb.NewWatchClient(conn),
		Cluster: pb.NewClusterClient(conn),
		conn:    conn,
		cfg:     *cfg,
		creds:   creds,
	}, nil
}
Example #15
0
// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
func TestV3PutMissingLease(t *testing.T) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	defer clus.Terminate(t)

	kvc := pb.NewKVClient(clus.RandConn())
	key := []byte("foo")
	preq := &pb.PutRequest{Key: key, Lease: 123456}
	tests := []func(){
		// put case
		func() {
			if presp, err := kvc.Put(context.TODO(), preq); err == nil {
				t.Errorf("succeeded put key. req: %v. resp: %v", preq, presp)
			}
		},
		// txn success case
		func() {
			txn := &pb.TxnRequest{}
			txn.Success = append(txn.Success, &pb.RequestUnion{RequestPut: preq})
			if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
				t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp)
			}
		},
		// txn failure case
		func() {
			txn := &pb.TxnRequest{}
			txn.Failure = append(txn.Failure, &pb.RequestUnion{RequestPut: preq})
			cmp := &pb.Compare{
				Result: pb.Compare_GREATER,
				Target: pb.Compare_CREATE,
				Key:    []byte("bar"),
			}
			txn.Compare = append(txn.Compare, cmp)
			if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
				t.Errorf("succeeded txn failure. req: %v. resp: %v", txn, tresp)
			}
		},
		// ignore bad lease in failure on success txn
		func() {
			txn := &pb.TxnRequest{}
			rreq := &pb.RangeRequest{Key: []byte("bar")}
			txn.Success = append(txn.Success, &pb.RequestUnion{RequestRange: rreq})
			txn.Failure = append(txn.Failure, &pb.RequestUnion{RequestPut: preq})
			if tresp, err := kvc.Txn(context.TODO(), txn); err != nil {
				t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp)
			}
		},
	}

	for i, f := range tests {
		f()
		// key shouldn't have been stored
		rreq := &pb.RangeRequest{Key: key}
		rresp, err := kvc.Range(context.TODO(), rreq)
		if err != nil {
			t.Errorf("#%d. could not rangereq (%v)", i, err)
		} else if len(rresp.Kvs) != 0 {
			t.Errorf("#%d. expected no keys, got %v", i, rresp)
		}
	}
}
Example #16
0
// deleteRangeCommandFunc executes the "delegeRange" command.
func deleteRangeCommandFunc(c *cli.Context) {
	if len(c.Args()) == 0 {
		panic("bad arg")
	}

	var rangeEnd []byte
	key := []byte(c.Args()[0])
	if len(c.Args()) > 1 {
		rangeEnd = []byte(c.Args()[1])
	}
	conn, err := grpc.Dial(c.GlobalString("endpoint"))
	if err != nil {
		panic(err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.DeleteRangeRequest{Key: key, RangeEnd: rangeEnd}

	kv.DeleteRange(context.Background(), req)

	if rangeEnd != nil {
		fmt.Printf("range [%s, %s) is deleted\n", string(key), string(rangeEnd))
	} else {
		fmt.Printf("key %s is deleted\n", string(key))
	}
}
Example #17
0
func toGRPC(c *clientv3.Client) grpcAPI {
	return grpcAPI{
		pb.NewClusterClient(c.ActiveConnection()),
		pb.NewKVClient(c.ActiveConnection()),
		pb.NewLeaseClient(c.ActiveConnection()),
		pb.NewWatchClient(c.ActiveConnection()),
	}
}
Example #18
0
// TestV3WatchCancel tests Watch APIs cancellation.
func TestV3WatchCancel(t *testing.T) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	wAPI := pb.NewWatchClient(clus.RandConn())

	wStream, errW := wAPI.Watch(context.TODO())
	if errW != nil {
		t.Fatalf("wAPI.Watch error: %v", errW)
	}

	if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}}); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	wresp, errR := wStream.Recv()
	if errR != nil {
		t.Errorf("wStream.Recv error: %v", errR)
	}
	if !wresp.Created {
		t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
	}

	if err := wStream.Send(&pb.WatchRequest{CancelRequest: &pb.WatchCancelRequest{WatchId: wresp.WatchId}}); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	cresp, err := wStream.Recv()
	if err != nil {
		t.Errorf("wStream.Recv error: %v", err)
	}
	if !cresp.Canceled {
		t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
	}

	kvc := pb.NewKVClient(clus.RandConn())
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		t.Errorf("couldn't put key (%v)", err)
	}

	// watch got canceled, so this should block
	rCh := make(chan *pb.WatchResponse)
	go func() {
		resp, _ := wStream.Recv()
		rCh <- resp
	}()
	select {
	case nr := <-rCh:
		t.Errorf("unexpected response is received %+v", nr)
	case <-time.After(2 * time.Second):
	}
	wStream.CloseSend()
	rv, ok := <-rCh
	if rv != nil || !ok {
		t.Errorf("rv, ok got = %v %v, want = nil true", rv, ok)
	}

	clus.Terminate(t)
}
Example #19
0
func TestV3TxnTooManyOps(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	defer clus.Terminate(t)

	kvc := pb.NewKVClient(clus.RandConn())

	addCompareOps := func(txn *pb.TxnRequest) {
		txn.Compare = append(txn.Compare,
			&pb.Compare{
				Result: pb.Compare_GREATER,
				Target: pb.Compare_CREATE,
				Key:    []byte("bar"),
			})
	}
	addSuccessOps := func(txn *pb.TxnRequest) {
		txn.Success = append(txn.Success,
			&pb.RequestUnion{
				Request: &pb.RequestUnion_RequestPut{
					RequestPut: &pb.PutRequest{
						Key:   []byte("bar"),
						Value: []byte("bar"),
					},
				},
			})
	}
	addFailureOps := func(txn *pb.TxnRequest) {
		txn.Failure = append(txn.Failure,
			&pb.RequestUnion{
				Request: &pb.RequestUnion_RequestPut{
					RequestPut: &pb.PutRequest{
						Key:   []byte("bar"),
						Value: []byte("bar"),
					},
				},
			})
	}

	tests := []func(txn *pb.TxnRequest){
		addCompareOps,
		addSuccessOps,
		addFailureOps,
	}

	for i, tt := range tests {
		txn := &pb.TxnRequest{}
		for j := 0; j < v3rpc.MaxOpsPerTxn+1; j++ {
			tt(txn)
		}

		_, err := kvc.Txn(context.Background(), txn)
		if err != v3rpc.ErrTooManyOps {
			t.Errorf("#%d: err = %v, want %v", i, err, v3rpc.ErrTooManyOps)
		}
	}
}
Example #20
0
func testV3WatchCancel(t *testing.T, startRev int64) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	wAPI := pb.NewWatchClient(clus.RandConn())

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	wStream, errW := wAPI.Watch(ctx)
	if errW != nil {
		t.Fatalf("wAPI.Watch error: %v", errW)
	}

	wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
		CreateRequest: &pb.WatchCreateRequest{
			Key: []byte("foo"), StartRevision: startRev}}}
	if err := wStream.Send(wreq); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	wresp, errR := wStream.Recv()
	if errR != nil {
		t.Errorf("wStream.Recv error: %v", errR)
	}
	if !wresp.Created {
		t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
	}

	creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{
		CancelRequest: &pb.WatchCancelRequest{
			WatchId: wresp.WatchId}}}
	if err := wStream.Send(creq); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	cresp, err := wStream.Recv()
	if err != nil {
		t.Errorf("wStream.Recv error: %v", err)
	}
	if !cresp.Canceled {
		t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
	}

	kvc := pb.NewKVClient(clus.RandConn())
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		t.Errorf("couldn't put key (%v)", err)
	}

	// watch got canceled, so this should block
	rok, nr := WaitResponse(wStream, 1*time.Second)
	if !rok {
		t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
	}

	clus.Terminate(t)
}
Example #21
0
File: kv.go Project: khogeland/etcd
func NewKV(c *Client) KV {
	conn := c.ActiveConnection()
	remote := pb.NewKVClient(conn)

	return &kv{
		conn:   c.ActiveConnection(),
		remote: remote,

		c: c,
	}
}
Example #22
0
func (s *stresser) Stress() error {
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	defer conn.Close()
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.conn = conn
	s.cancel = cancel
	s.wg = wg
	s.mu.Unlock()

	kvc := pb.NewKVClient(conn)

	for i := 0; i < s.N; i++ {
		go func(i int) {
			defer wg.Done()
			for {
				// TODO: 10-second is enough timeout to cover leader failure
				// and immediate leader election. Find out what other cases this
				// could be timed out.
				putctx, putcancel := context.WithTimeout(ctx, 10*time.Second)
				_, err := kvc.Put(putctx, &pb.PutRequest{
					Key:   []byte(fmt.Sprintf("foo%d", rand.Intn(s.KeySuffixRange))),
					Value: []byte(randStr(s.KeySize)),
				})
				putcancel()
				if err != nil {
					if grpc.ErrorDesc(err) == context.DeadlineExceeded.Error() {
						// This retries when request is triggered at the same time as
						// leader failure. When we terminate the leader, the request to
						// that leader cannot be processed, and times out. Also requests
						// to followers cannot be forwarded to the old leader, so timing out
						// as well. We want to keep stressing until the cluster elects a
						// new leader and start processing requests again.
						continue
					}
					return
				}
				s.mu.Lock()
				s.success++
				s.mu.Unlock()
			}
		}(i)
	}

	<-ctx.Done()
	return nil
}
Example #23
0
File: kv.go Project: khogeland/etcd
func (kv *kv) switchRemote(prevErr error) error {
	newConn, err := kv.c.retryConnection(kv.conn, prevErr)
	if err != nil {
		return err
	}

	kv.mu.Lock()
	defer kv.mu.Unlock()

	kv.conn = newConn
	kv.remote = pb.NewKVClient(kv.conn)
	return nil
}
Example #24
0
func newClient(conn *grpc.ClientConn, cfg *Config) *Client {
	if cfg == nil {
		cfg = &Config{RetryDialer: dialEndpointList}
	}
	return &Client{
		KV:      pb.NewKVClient(conn),
		Lease:   pb.NewLeaseClient(conn),
		Watch:   pb.NewWatchClient(conn),
		Cluster: pb.NewClusterClient(conn),
		conn:    conn,
		cfg:     *cfg,
	}
}
Example #25
0
File: get.go Project: navneetk/etcd
func benchGet(conn *grpc.ClientConn, key, rangeEnd []byte, n, c int) {
	wg.Add(c)
	requests := make(chan struct{}, n)

	for i := 0; i < c; i++ {
		go get(etcdserverpb.NewKVClient(conn), key, rangeEnd, requests)
	}

	for i := 0; i < n; i++ {
		requests <- struct{}{}
	}
	close(requests)
}
Example #26
0
File: put.go Project: ikatson/etcd
func putFunc(cmd *cobra.Command, args []string) {
	if keySpaceSize <= 0 {
		fmt.Fprintf(os.Stderr, "expected positive --key-space-size, got (%v)", keySpaceSize)
		os.Exit(1)
	}

	results = make(chan result)
	requests := make(chan etcdserverpb.PutRequest, totalClients)
	bar = pb.New(putTotal)

	k, v := make([]byte, keySize), mustRandBytes(valSize)

	conns := make([]*grpc.ClientConn, totalConns)
	for i := range conns {
		conns[i] = mustCreateConn()
	}

	clients := make([]etcdserverpb.KVClient, totalClients)
	for i := range clients {
		clients[i] = etcdserverpb.NewKVClient(conns[i%int(totalConns)])
	}

	bar.Format("Bom !")
	bar.Start()

	for i := range clients {
		wg.Add(1)
		go doPut(context.Background(), clients[i], requests)
	}

	pdoneC := printReport(results)

	go func() {
		for i := 0; i < putTotal; i++ {
			if seqKeys {
				binary.PutVarint(k, int64(i%keySpaceSize))
			} else {
				binary.PutVarint(k, int64(rand.Intn(keySpaceSize)))
			}
			requests <- etcdserverpb.PutRequest{Key: k, Value: v}
		}
		close(requests)
	}()

	wg.Wait()

	bar.Finish()

	close(results)
	<-pdoneC
}
Example #27
0
File: range.go Project: ngaut/etcd
func rangeFunc(cmd *cobra.Command, args []string) {
	if len(args) == 0 || len(args) > 2 {
		fmt.Fprintln(os.Stderr, cmd.Usage())
		os.Exit(1)
	}

	k := []byte(args[0])
	var end []byte
	if len(args) == 1 {
		end = []byte(args[1])
	}

	results = make(chan *result, rangeTotal)
	requests := make(chan *etcdserverpb.RangeRequest, rangeTotal)
	bar = pb.New(rangeTotal)

	conns := make([]*grpc.ClientConn, totalConns)
	for i := range conns {
		conns[i] = mustCreateConn()
	}

	clients := make([]etcdserverpb.KVClient, totalClients)
	for i := range clients {
		clients[i] = etcdserverpb.NewKVClient(conns[i%int(totalConns)])
	}

	bar.Format("Bom !")
	bar.Start()

	for i := range clients {
		wg.Add(1)
		go doRange(clients[i], requests)
	}

	start := time.Now()
	for i := 0; i < rangeTotal; i++ {
		r := &etcdserverpb.RangeRequest{
			Key:      k,
			RangeEnd: end,
		}
		requests <- r
	}
	close(requests)

	wg.Wait()

	bar.Finish()
	printReport(rangeTotal, results, time.Now().Sub(start))
}
Example #28
0
// setHealthKey sets health key on all given urls.
func setHealthKey(us []string) error {
	for _, u := range us {
		conn, err := grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if err != nil {
			return fmt.Errorf("%v (%s)", err, u)
		}
		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		kvc := pb.NewKVClient(conn)
		_, err = kvc.Put(ctx, &pb.PutRequest{Key: []byte("health"), Value: []byte("good")})
		cancel()
		if err != nil {
			return err
		}
	}
	return nil
}
Example #29
0
func (kv *kv) switchRemote(prevErr error) error {
	// Usually it's a bad idea to lock on network i/o but here it's OK
	// since the link is down and new requests can't be processed anyway.
	// Likewise, if connecting stalls, closing the Client can break the
	// lock via context cancelation.
	kv.mu.Lock()
	defer kv.mu.Unlock()

	newConn, err := kv.c.retryConnection(kv.conn, prevErr)
	if err != nil {
		return err
	}

	kv.conn = newConn
	kv.remote = pb.NewKVClient(kv.conn)
	return nil
}
Example #30
0
// putCommandFunc executes the "put" command.
func putCommandFunc(c *cli.Context) {
	if len(c.Args()) != 2 {
		panic("bad arg")
	}

	key := []byte(c.Args()[0])
	value := []byte(c.Args()[1])
	conn, err := grpc.Dial(c.GlobalString("endpoint"))
	if err != nil {
		panic(err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.PutRequest{Key: key, Value: value}

	kv.Put(context.Background(), req)
	fmt.Printf("%s %s\n", key, value)
}