コード例 #1
0
ファイル: txn_command.go プロジェクト: ikatson/etcd
// txnCommandFunc executes the "txn" command.
func txnCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) != 0 {
		ExitWithError(ExitBadArgs, fmt.Errorf("txn command does not accept argument."))
	}

	reader := bufio.NewReader(os.Stdin)

	next := compareState
	txn := &pb.TxnRequest{}
	for next != nil {
		next = next(txn, reader)
	}

	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitError, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}
	kv := pb.NewKVClient(conn)

	resp, err := kv.Txn(context.Background(), txn)
	if err != nil {
		ExitWithError(ExitError, err)
	}
	if resp.Succeeded {
		fmt.Println("executed success request list")
	} else {
		fmt.Println("executed failure request list")
	}
}
コード例 #2
0
ファイル: put.go プロジェクト: navneetk/etcd
func benchPut(conn *grpc.ClientConn, key []byte, kc, n, c, size int) {
	wg.Add(c)
	requests := make(chan *etcdserverpb.PutRequest, n)

	v := make([]byte, size)
	_, err := rand.Read(v)
	if err != nil {
		fmt.Printf("failed to generate value: %v\n", err)
		os.Exit(1)
		return
	}

	for i := 0; i < c; i++ {
		go put(etcdserverpb.NewKVClient(conn), requests)
	}

	suffixb := make([]byte, 8)
	suffix := 0
	for i := 0; i < n; i++ {
		binary.BigEndian.PutUint64(suffixb, uint64(suffix))
		r := &etcdserverpb.PutRequest{
			Key:   append(key, suffixb...),
			Value: v,
		}
		requests <- r
		if suffix > kc {
			suffix = 0
		}
		suffix++
	}
	close(requests)
}
コード例 #3
0
ファイル: kv.go プロジェクト: jkhelil/etcd
func (kv *kv) do(op Op) (*pb.ResponseUnion, error) {
	for {
		var err error
		switch op.t {
		// TODO: handle other ops
		case tRange:
			var resp *pb.RangeResponse
			// TODO: setup sorting
			r := &pb.RangeRequest{Key: op.key, RangeEnd: op.end, Limit: op.limit, Revision: op.rev}
			resp, err = kv.remote.Range(context.TODO(), r)
			if err == nil {
				return &pb.ResponseUnion{Response: &pb.ResponseUnion_ResponseRange{resp}}, nil
			}
		default:
			panic("Unknown op")
		}

		if isRPCError(err) {
			return nil, err
		}

		newConn, cerr := kv.c.retryConnection(kv.conn, err)
		if cerr != nil {
			// TODO: return client lib defined connection error
			return nil, cerr
		}
		kv.conn = newConn
		kv.remote = pb.NewKVClient(kv.conn)
	}
}
コード例 #4
0
ファイル: cluster.go プロジェクト: luxas/flannel
func (c *cluster) compactKV(rev int64) error {
	var (
		conn *grpc.ClientConn
		err  error
	)

	if rev <= 0 {
		return nil
	}

	for i, u := range c.GRPCURLs {
		conn, err = grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if err != nil {
			continue
		}
		kvc := pb.NewKVClient(conn)
		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
		_, err = kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true})
		cancel()
		conn.Close()
		if err != nil {
			if strings.Contains(err.Error(), "required revision has been compacted") && i > 0 {
				plog.Printf("%s is already compacted with %d (%v)", u, rev, err)
				err = nil // in case compact was requested more than once
			}
		}
	}
	return err
}
コード例 #5
0
ファイル: cluster.go プロジェクト: dnaeon/etcd
func (c *cluster) compactKV(rev int64, timeout time.Duration) (err error) {
	if rev <= 0 {
		return nil
	}

	for i, u := range c.GRPCURLs {
		conn, derr := grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if derr != nil {
			err = derr
			continue
		}
		kvc := pb.NewKVClient(conn)
		ctx, cancel := context.WithTimeout(context.Background(), timeout)
		_, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true})
		cancel()
		conn.Close()
		if cerr != nil {
			if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
				plog.Printf("%s is already compacted with %d (%v)", u, rev, cerr)
			} else {
				err = cerr
			}
		}
	}
	return err
}
コード例 #6
0
ファイル: cluster.go プロジェクト: pulcy/vault-monkey
func (c *cluster) compactKV(rev int64, timeout time.Duration) (err error) {
	if rev <= 0 {
		return nil
	}

	for i, m := range c.Members {
		u := m.ClientURL
		conn, derr := m.dialGRPC()
		if derr != nil {
			plog.Printf("[compact kv #%d] dial error %v (endpoint %s)", i, derr, u)
			err = derr
			continue
		}
		kvc := pb.NewKVClient(conn)
		ctx, cancel := context.WithTimeout(context.Background(), timeout)
		plog.Printf("[compact kv #%d] starting (endpoint %s)", i, u)
		_, cerr := kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true}, grpc.FailFast(false))
		cancel()
		conn.Close()
		succeed := true
		if cerr != nil {
			if strings.Contains(cerr.Error(), "required revision has been compacted") && i > 0 {
				plog.Printf("[compact kv #%d] already compacted (endpoint %s)", i, u)
			} else {
				plog.Warningf("[compact kv #%d] error %v (endpoint %s)", i, cerr, u)
				err = cerr
				succeed = false
			}
		}
		if succeed {
			plog.Printf("[compact kv #%d] done (endpoint %s)", i, u)
		}
	}
	return err
}
コード例 #7
0
ファイル: txn_command.go プロジェクト: navneetk/etcd
// txnCommandFunc executes the "txn" command.
func txnCommandFunc(c *cli.Context) {
	if len(c.Args()) != 0 {
		panic("unexpected args")
	}

	reader := bufio.NewReader(os.Stdin)

	next := compareState
	txn := &pb.TxnRequest{}
	for next != nil {
		next = next(txn, reader)
	}

	conn, err := grpc.Dial(c.GlobalString("endpoint"))
	if err != nil {
		panic(err)
	}
	kv := pb.NewKVClient(conn)

	resp, err := kv.Txn(context.Background(), txn)
	if err != nil {
		fmt.Println(err)
	}
	if resp.Succeeded {
		fmt.Println("executed success request list")
	} else {
		fmt.Println("executed failure request list")
	}
}
コード例 #8
0
ファイル: tester.go プロジェクト: Longbow98/etcd
// checkConsistency stops the cluster for a moment and get the hashes of KV storages.
func (c *cluster) checkConsistency() error {
	hashes := make(map[string]uint32)
	for _, u := range c.GRPCURLs {
		conn, err := grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if err != nil {
			return err
		}
		kvc := pb.NewKVClient(conn)

		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
		resp, err := kvc.Hash(ctx, &pb.HashRequest{})
		hv := resp.Hash
		if resp != nil && err != nil {
			return err
		}
		cancel()

		hashes[u] = hv
	}

	if !checkConsistency(hashes) {
		return fmt.Errorf("check consistency fails: %v", hashes)
	}
	return nil
}
コード例 #9
0
ファイル: key_stresser.go プロジェクト: pulcy/vault-monkey
func (s *keyStresser) Stress() error {
	// TODO: add backoff option
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	ctx, cancel := context.WithCancel(context.Background())

	s.wg.Add(s.N)
	s.conn = conn
	s.cancel = cancel

	kvc := pb.NewKVClient(conn)

	var stressEntries = []stressEntry{
		{weight: 0.7, f: newStressPut(kvc, s.keySuffixRange, s.keySize)},
		{
			weight: 0.7 * float32(s.keySize) / float32(s.keyLargeSize),
			f:      newStressPut(kvc, s.keySuffixRange, s.keyLargeSize),
		},
		{weight: 0.07, f: newStressRange(kvc, s.keySuffixRange)},
		{weight: 0.07, f: newStressRangeInterval(kvc, s.keySuffixRange)},
		{weight: 0.07, f: newStressDelete(kvc, s.keySuffixRange)},
		{weight: 0.07, f: newStressDeleteInterval(kvc, s.keySuffixRange)},
	}
	s.stressTable = createStressTable(stressEntries)

	for i := 0; i < s.N; i++ {
		go s.run(ctx)
	}

	plog.Infof("keyStresser %q is started", s.Endpoint)
	return nil
}
コード例 #10
0
ファイル: range_command.go プロジェクト: ikatson/etcd
// rangeCommandFunc executes the "range" command.
func rangeCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) == 0 {
		ExitWithError(ExitBadArgs, fmt.Errorf("range command needs arguments."))
	}

	var rangeEnd []byte
	key := []byte(args[0])
	if len(args) > 1 {
		rangeEnd = []byte(args[1])
	}

	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitError, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.RangeRequest{Key: key, RangeEnd: rangeEnd}

	resp, err := kv.Range(context.Background(), req)
	for _, kv := range resp.Kvs {
		fmt.Printf("%s %s\n", string(kv.Key), string(kv.Value))
	}
}
コード例 #11
0
ファイル: delete_range_command.go プロジェクト: ikatson/etcd
// deleteRangeCommandFunc executes the "deleteRange" command.
func deleteRangeCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) == 0 {
		ExitWithError(ExitBadArgs, fmt.Errorf("delete-range command needs arguments."))
	}

	var rangeEnd []byte
	key := []byte(args[0])
	if len(args) > 1 {
		rangeEnd = []byte(args[1])
	}

	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitError, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.DeleteRangeRequest{Key: key, RangeEnd: rangeEnd}

	kv.DeleteRange(context.Background(), req)

	if rangeEnd != nil {
		fmt.Printf("range [%s, %s) is deleted\n", string(key), string(rangeEnd))
	} else {
		fmt.Printf("key %s is deleted\n", string(key))
	}
}
コード例 #12
0
ファイル: put_command.go プロジェクト: Timer/etcd
// putCommandFunc executes the "put" command.
func putCommandFunc(cmd *cobra.Command, args []string) {
	if len(args) != 2 {
		ExitWithError(ExitBadArgs, fmt.Errorf("put command needs 2 arguments."))
	}

	id, err := strconv.ParseInt(leaseStr, 16, 64)
	if err != nil {
		ExitWithError(ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err))
	}

	key := []byte(args[0])
	value := []byte(args[1])

	endpoint, err := cmd.Flags().GetString("endpoint")
	if err != nil {
		ExitWithError(ExitError, err)
	}
	conn, err := grpc.Dial(endpoint)
	if err != nil {
		ExitWithError(ExitBadConnection, err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.PutRequest{Key: key, Value: value, Lease: id}

	kv.Put(context.Background(), req)
	fmt.Printf("%s %s\n", key, value)
}
コード例 #13
0
ファイル: stresser.go プロジェクト: yuya008/etcd
func (s *stresser) Stress() error {
	// TODO: add backoff option
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure())
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.conn = conn
	s.cancel = cancel
	s.wg = wg
	s.rateLimiter = rate.NewLimiter(rate.Every(time.Second), s.qps)
	s.mu.Unlock()

	kvc := pb.NewKVClient(conn)

	for i := 0; i < s.N; i++ {
		go s.run(ctx, kvc)
	}

	plog.Printf("stresser %q is started", s.Endpoint)
	return nil
}
コード例 #14
0
ファイル: client.go プロジェクト: obeattie/etcd
func newClient(cfg *Config) (*Client, error) {
	if cfg == nil {
		cfg = &Config{RetryDialer: dialEndpointList}
	}
	var creds *credentials.TransportAuthenticator
	if cfg.TLS != nil {
		tlscfg, err := cfg.TLS.ClientConfig()
		if err != nil {
			return nil, err
		}
		c := credentials.NewTLS(tlscfg)
		creds = &c
	}
	// use a temporary skeleton client to bootstrap first connection
	conn, err := cfg.RetryDialer(&Client{cfg: *cfg, creds: creds})
	if err != nil {
		return nil, err
	}
	return &Client{
		KV:      pb.NewKVClient(conn),
		Lease:   pb.NewLeaseClient(conn),
		Watch:   pb.NewWatchClient(conn),
		Cluster: pb.NewClusterClient(conn),
		conn:    conn,
		cfg:     *cfg,
		creds:   creds,
	}, nil
}
コード例 #15
0
ファイル: v3_grpc_test.go プロジェクト: lijianwei123/etcd
// TestV3PutMissingLease ensures that a Put on a key with a bogus lease fails.
func TestV3PutMissingLease(t *testing.T) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	defer clus.Terminate(t)

	kvc := pb.NewKVClient(clus.RandConn())
	key := []byte("foo")
	preq := &pb.PutRequest{Key: key, Lease: 123456}
	tests := []func(){
		// put case
		func() {
			if presp, err := kvc.Put(context.TODO(), preq); err == nil {
				t.Errorf("succeeded put key. req: %v. resp: %v", preq, presp)
			}
		},
		// txn success case
		func() {
			txn := &pb.TxnRequest{}
			txn.Success = append(txn.Success, &pb.RequestUnion{RequestPut: preq})
			if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
				t.Errorf("succeeded txn success. req: %v. resp: %v", txn, tresp)
			}
		},
		// txn failure case
		func() {
			txn := &pb.TxnRequest{}
			txn.Failure = append(txn.Failure, &pb.RequestUnion{RequestPut: preq})
			cmp := &pb.Compare{
				Result: pb.Compare_GREATER,
				Target: pb.Compare_CREATE,
				Key:    []byte("bar"),
			}
			txn.Compare = append(txn.Compare, cmp)
			if tresp, err := kvc.Txn(context.TODO(), txn); err == nil {
				t.Errorf("succeeded txn failure. req: %v. resp: %v", txn, tresp)
			}
		},
		// ignore bad lease in failure on success txn
		func() {
			txn := &pb.TxnRequest{}
			rreq := &pb.RangeRequest{Key: []byte("bar")}
			txn.Success = append(txn.Success, &pb.RequestUnion{RequestRange: rreq})
			txn.Failure = append(txn.Failure, &pb.RequestUnion{RequestPut: preq})
			if tresp, err := kvc.Txn(context.TODO(), txn); err != nil {
				t.Errorf("failed good txn. req: %v. resp: %v", txn, tresp)
			}
		},
	}

	for i, f := range tests {
		f()
		// key shouldn't have been stored
		rreq := &pb.RangeRequest{Key: key}
		rresp, err := kvc.Range(context.TODO(), rreq)
		if err != nil {
			t.Errorf("#%d. could not rangereq (%v)", i, err)
		} else if len(rresp.Kvs) != 0 {
			t.Errorf("#%d. expected no keys, got %v", i, rresp)
		}
	}
}
コード例 #16
0
ファイル: delete_range_command.go プロジェクト: navneetk/etcd
// deleteRangeCommandFunc executes the "delegeRange" command.
func deleteRangeCommandFunc(c *cli.Context) {
	if len(c.Args()) == 0 {
		panic("bad arg")
	}

	var rangeEnd []byte
	key := []byte(c.Args()[0])
	if len(c.Args()) > 1 {
		rangeEnd = []byte(c.Args()[1])
	}
	conn, err := grpc.Dial(c.GlobalString("endpoint"))
	if err != nil {
		panic(err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.DeleteRangeRequest{Key: key, RangeEnd: rangeEnd}

	kv.DeleteRange(context.Background(), req)

	if rangeEnd != nil {
		fmt.Printf("range [%s, %s) is deleted\n", string(key), string(rangeEnd))
	} else {
		fmt.Printf("key %s is deleted\n", string(key))
	}
}
コード例 #17
0
ファイル: cluster.go プロジェクト: rhuss/gofabric8
func toGRPC(c *clientv3.Client) grpcAPI {
	return grpcAPI{
		pb.NewClusterClient(c.ActiveConnection()),
		pb.NewKVClient(c.ActiveConnection()),
		pb.NewLeaseClient(c.ActiveConnection()),
		pb.NewWatchClient(c.ActiveConnection()),
	}
}
コード例 #18
0
ファイル: v3_grpc_test.go プロジェクト: BruceZhou2012/etcd
// TestV3WatchCancel tests Watch APIs cancellation.
func TestV3WatchCancel(t *testing.T) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	wAPI := pb.NewWatchClient(clus.RandConn())

	wStream, errW := wAPI.Watch(context.TODO())
	if errW != nil {
		t.Fatalf("wAPI.Watch error: %v", errW)
	}

	if err := wStream.Send(&pb.WatchRequest{CreateRequest: &pb.WatchCreateRequest{Key: []byte("foo")}}); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	wresp, errR := wStream.Recv()
	if errR != nil {
		t.Errorf("wStream.Recv error: %v", errR)
	}
	if !wresp.Created {
		t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
	}

	if err := wStream.Send(&pb.WatchRequest{CancelRequest: &pb.WatchCancelRequest{WatchId: wresp.WatchId}}); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	cresp, err := wStream.Recv()
	if err != nil {
		t.Errorf("wStream.Recv error: %v", err)
	}
	if !cresp.Canceled {
		t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
	}

	kvc := pb.NewKVClient(clus.RandConn())
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		t.Errorf("couldn't put key (%v)", err)
	}

	// watch got canceled, so this should block
	rCh := make(chan *pb.WatchResponse)
	go func() {
		resp, _ := wStream.Recv()
		rCh <- resp
	}()
	select {
	case nr := <-rCh:
		t.Errorf("unexpected response is received %+v", nr)
	case <-time.After(2 * time.Second):
	}
	wStream.CloseSend()
	rv, ok := <-rCh
	if rv != nil || !ok {
		t.Errorf("rv, ok got = %v %v, want = nil true", rv, ok)
	}

	clus.Terminate(t)
}
コード例 #19
0
ファイル: v3_grpc_test.go プロジェクト: rtewalt/etcd
func TestV3TxnTooManyOps(t *testing.T) {
	defer testutil.AfterTest(t)
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	defer clus.Terminate(t)

	kvc := pb.NewKVClient(clus.RandConn())

	addCompareOps := func(txn *pb.TxnRequest) {
		txn.Compare = append(txn.Compare,
			&pb.Compare{
				Result: pb.Compare_GREATER,
				Target: pb.Compare_CREATE,
				Key:    []byte("bar"),
			})
	}
	addSuccessOps := func(txn *pb.TxnRequest) {
		txn.Success = append(txn.Success,
			&pb.RequestUnion{
				Request: &pb.RequestUnion_RequestPut{
					RequestPut: &pb.PutRequest{
						Key:   []byte("bar"),
						Value: []byte("bar"),
					},
				},
			})
	}
	addFailureOps := func(txn *pb.TxnRequest) {
		txn.Failure = append(txn.Failure,
			&pb.RequestUnion{
				Request: &pb.RequestUnion_RequestPut{
					RequestPut: &pb.PutRequest{
						Key:   []byte("bar"),
						Value: []byte("bar"),
					},
				},
			})
	}

	tests := []func(txn *pb.TxnRequest){
		addCompareOps,
		addSuccessOps,
		addFailureOps,
	}

	for i, tt := range tests {
		txn := &pb.TxnRequest{}
		for j := 0; j < v3rpc.MaxOpsPerTxn+1; j++ {
			tt(txn)
		}

		_, err := kvc.Txn(context.Background(), txn)
		if err != v3rpc.ErrTooManyOps {
			t.Errorf("#%d: err = %v, want %v", i, err, v3rpc.ErrTooManyOps)
		}
	}
}
コード例 #20
0
ファイル: v3_grpc_test.go プロジェクト: rtewalt/etcd
func testV3WatchCancel(t *testing.T, startRev int64) {
	clus := newClusterGRPC(t, &clusterConfig{size: 3})
	wAPI := pb.NewWatchClient(clus.RandConn())

	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	wStream, errW := wAPI.Watch(ctx)
	if errW != nil {
		t.Fatalf("wAPI.Watch error: %v", errW)
	}

	wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
		CreateRequest: &pb.WatchCreateRequest{
			Key: []byte("foo"), StartRevision: startRev}}}
	if err := wStream.Send(wreq); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	wresp, errR := wStream.Recv()
	if errR != nil {
		t.Errorf("wStream.Recv error: %v", errR)
	}
	if !wresp.Created {
		t.Errorf("wresp.Created got = %v, want = true", wresp.Created)
	}

	creq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CancelRequest{
		CancelRequest: &pb.WatchCancelRequest{
			WatchId: wresp.WatchId}}}
	if err := wStream.Send(creq); err != nil {
		t.Fatalf("wStream.Send error: %v", err)
	}

	cresp, err := wStream.Recv()
	if err != nil {
		t.Errorf("wStream.Recv error: %v", err)
	}
	if !cresp.Canceled {
		t.Errorf("cresp.Canceled got = %v, want = true", cresp.Canceled)
	}

	kvc := pb.NewKVClient(clus.RandConn())
	if _, err := kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}); err != nil {
		t.Errorf("couldn't put key (%v)", err)
	}

	// watch got canceled, so this should block
	rok, nr := WaitResponse(wStream, 1*time.Second)
	if !rok {
		t.Errorf("unexpected pb.WatchResponse is received %+v", nr)
	}

	clus.Terminate(t)
}
コード例 #21
0
ファイル: kv.go プロジェクト: khogeland/etcd
func NewKV(c *Client) KV {
	conn := c.ActiveConnection()
	remote := pb.NewKVClient(conn)

	return &kv{
		conn:   c.ActiveConnection(),
		remote: remote,

		c: c,
	}
}
コード例 #22
0
ファイル: stresser.go プロジェクト: luxas/flannel
func (s *stresser) Stress() error {
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	defer conn.Close()
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.conn = conn
	s.cancel = cancel
	s.wg = wg
	s.mu.Unlock()

	kvc := pb.NewKVClient(conn)

	for i := 0; i < s.N; i++ {
		go func(i int) {
			defer wg.Done()
			for {
				// TODO: 10-second is enough timeout to cover leader failure
				// and immediate leader election. Find out what other cases this
				// could be timed out.
				putctx, putcancel := context.WithTimeout(ctx, 10*time.Second)
				_, err := kvc.Put(putctx, &pb.PutRequest{
					Key:   []byte(fmt.Sprintf("foo%d", rand.Intn(s.KeySuffixRange))),
					Value: []byte(randStr(s.KeySize)),
				})
				putcancel()
				if err != nil {
					if grpc.ErrorDesc(err) == context.DeadlineExceeded.Error() {
						// This retries when request is triggered at the same time as
						// leader failure. When we terminate the leader, the request to
						// that leader cannot be processed, and times out. Also requests
						// to followers cannot be forwarded to the old leader, so timing out
						// as well. We want to keep stressing until the cluster elects a
						// new leader and start processing requests again.
						continue
					}
					return
				}
				s.mu.Lock()
				s.success++
				s.mu.Unlock()
			}
		}(i)
	}

	<-ctx.Done()
	return nil
}
コード例 #23
0
ファイル: kv.go プロジェクト: khogeland/etcd
func (kv *kv) switchRemote(prevErr error) error {
	newConn, err := kv.c.retryConnection(kv.conn, prevErr)
	if err != nil {
		return err
	}

	kv.mu.Lock()
	defer kv.mu.Unlock()

	kv.conn = newConn
	kv.remote = pb.NewKVClient(kv.conn)
	return nil
}
コード例 #24
0
ファイル: client.go プロジェクト: s016374/etcd
func newClient(conn *grpc.ClientConn, cfg *Config) *Client {
	if cfg == nil {
		cfg = &Config{RetryDialer: dialEndpointList}
	}
	return &Client{
		KV:      pb.NewKVClient(conn),
		Lease:   pb.NewLeaseClient(conn),
		Watch:   pb.NewWatchClient(conn),
		Cluster: pb.NewClusterClient(conn),
		conn:    conn,
		cfg:     *cfg,
	}
}
コード例 #25
0
ファイル: get.go プロジェクト: navneetk/etcd
func benchGet(conn *grpc.ClientConn, key, rangeEnd []byte, n, c int) {
	wg.Add(c)
	requests := make(chan struct{}, n)

	for i := 0; i < c; i++ {
		go get(etcdserverpb.NewKVClient(conn), key, rangeEnd, requests)
	}

	for i := 0; i < n; i++ {
		requests <- struct{}{}
	}
	close(requests)
}
コード例 #26
0
ファイル: put.go プロジェクト: ikatson/etcd
func putFunc(cmd *cobra.Command, args []string) {
	if keySpaceSize <= 0 {
		fmt.Fprintf(os.Stderr, "expected positive --key-space-size, got (%v)", keySpaceSize)
		os.Exit(1)
	}

	results = make(chan result)
	requests := make(chan etcdserverpb.PutRequest, totalClients)
	bar = pb.New(putTotal)

	k, v := make([]byte, keySize), mustRandBytes(valSize)

	conns := make([]*grpc.ClientConn, totalConns)
	for i := range conns {
		conns[i] = mustCreateConn()
	}

	clients := make([]etcdserverpb.KVClient, totalClients)
	for i := range clients {
		clients[i] = etcdserverpb.NewKVClient(conns[i%int(totalConns)])
	}

	bar.Format("Bom !")
	bar.Start()

	for i := range clients {
		wg.Add(1)
		go doPut(context.Background(), clients[i], requests)
	}

	pdoneC := printReport(results)

	go func() {
		for i := 0; i < putTotal; i++ {
			if seqKeys {
				binary.PutVarint(k, int64(i%keySpaceSize))
			} else {
				binary.PutVarint(k, int64(rand.Intn(keySpaceSize)))
			}
			requests <- etcdserverpb.PutRequest{Key: k, Value: v}
		}
		close(requests)
	}()

	wg.Wait()

	bar.Finish()

	close(results)
	<-pdoneC
}
コード例 #27
0
ファイル: range.go プロジェクト: ngaut/etcd
func rangeFunc(cmd *cobra.Command, args []string) {
	if len(args) == 0 || len(args) > 2 {
		fmt.Fprintln(os.Stderr, cmd.Usage())
		os.Exit(1)
	}

	k := []byte(args[0])
	var end []byte
	if len(args) == 1 {
		end = []byte(args[1])
	}

	results = make(chan *result, rangeTotal)
	requests := make(chan *etcdserverpb.RangeRequest, rangeTotal)
	bar = pb.New(rangeTotal)

	conns := make([]*grpc.ClientConn, totalConns)
	for i := range conns {
		conns[i] = mustCreateConn()
	}

	clients := make([]etcdserverpb.KVClient, totalClients)
	for i := range clients {
		clients[i] = etcdserverpb.NewKVClient(conns[i%int(totalConns)])
	}

	bar.Format("Bom !")
	bar.Start()

	for i := range clients {
		wg.Add(1)
		go doRange(clients[i], requests)
	}

	start := time.Now()
	for i := 0; i < rangeTotal; i++ {
		r := &etcdserverpb.RangeRequest{
			Key:      k,
			RangeEnd: end,
		}
		requests <- r
	}
	close(requests)

	wg.Wait()

	bar.Finish()
	printReport(rangeTotal, results, time.Now().Sub(start))
}
コード例 #28
0
ファイル: cluster.go プロジェクト: salatamartin/etcd
// setHealthKey sets health key on all given urls.
func setHealthKey(us []string) error {
	for _, u := range us {
		conn, err := grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if err != nil {
			return fmt.Errorf("%v (%s)", err, u)
		}
		ctx, cancel := context.WithTimeout(context.Background(), time.Second)
		kvc := pb.NewKVClient(conn)
		_, err = kvc.Put(ctx, &pb.PutRequest{Key: []byte("health"), Value: []byte("good")})
		cancel()
		if err != nil {
			return err
		}
	}
	return nil
}
コード例 #29
0
ファイル: kv.go プロジェクト: Clarifai/kubernetes
func (kv *kv) switchRemote(prevErr error) error {
	// Usually it's a bad idea to lock on network i/o but here it's OK
	// since the link is down and new requests can't be processed anyway.
	// Likewise, if connecting stalls, closing the Client can break the
	// lock via context cancelation.
	kv.mu.Lock()
	defer kv.mu.Unlock()

	newConn, err := kv.c.retryConnection(kv.conn, prevErr)
	if err != nil {
		return err
	}

	kv.conn = newConn
	kv.remote = pb.NewKVClient(kv.conn)
	return nil
}
コード例 #30
0
ファイル: put_command.go プロジェクト: navneetk/etcd
// putCommandFunc executes the "put" command.
func putCommandFunc(c *cli.Context) {
	if len(c.Args()) != 2 {
		panic("bad arg")
	}

	key := []byte(c.Args()[0])
	value := []byte(c.Args()[1])
	conn, err := grpc.Dial(c.GlobalString("endpoint"))
	if err != nil {
		panic(err)
	}
	kv := pb.NewKVClient(conn)
	req := &pb.PutRequest{Key: key, Value: value}

	kv.Put(context.Background(), req)
	fmt.Printf("%s %s\n", key, value)
}