Esempio n. 1
0
// deletePrefix performs a RangeRequest to get keys on a given prefix
func deletePrefix(kv pb.KVClient, prefix string) (*pb.DeleteRangeResponse, error) {
	return kv.DeleteRange(
		context.TODO(),
		&pb.DeleteRangeRequest{
			Key:      []byte(prefix),
			RangeEnd: []byte(prefixEnd(prefix))})
}
Esempio n. 2
0
func newStressRange(kvc pb.KVClient, keySuffixRange int) stressFunc {
	return func(ctx context.Context) error {
		_, err := kvc.Range(ctx, &pb.RangeRequest{
			Key: []byte(fmt.Sprintf("foo%d", rand.Intn(keySuffixRange))),
		}, grpc.FailFast(false))
		return err
	}
}
Esempio n. 3
0
func newStressDelete(kvc pb.KVClient, keySuffixRange int) stressFunc {
	return func(ctx context.Context) (error, int64) {
		_, err := kvc.DeleteRange(ctx, &pb.DeleteRangeRequest{
			Key: []byte(fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))),
		}, grpc.FailFast(false))
		return err, 1
	}
}
Esempio n. 4
0
func newStressPut(kvc pb.KVClient, keySuffixRange, keySize int) stressFunc {
	return func(ctx context.Context) (error, int64) {
		_, err := kvc.Put(ctx, &pb.PutRequest{
			Key:   []byte(fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange))),
			Value: randBytes(keySize),
		}, grpc.FailFast(false))
		return err, 1
	}
}
Esempio n. 5
0
File: watch.go Progetto: vsayer/etcd
func doPutForWatch(ctx context.Context, client etcdserverpb.KVClient, requests <-chan etcdserverpb.PutRequest) {
	for r := range requests {
		_, err := client.Put(ctx, &r)
		if err != nil {
			fmt.Fprintln(os.Stderr, "failed to Put for watch benchmark: %s", err)
			os.Exit(1)
		}
	}
}
Esempio n. 6
0
func newStressRangeInterval(kvc pb.KVClient, keySuffixRange int) stressFunc {
	return func(ctx context.Context) (error, int64) {
		start := rand.Intn(keySuffixRange)
		end := start + 500
		_, err := kvc.Range(ctx, &pb.RangeRequest{
			Key:      []byte(fmt.Sprintf("foo%016x", start)),
			RangeEnd: []byte(fmt.Sprintf("foo%016x", end)),
		}, grpc.FailFast(false))
		return err, 0
	}
}
Esempio n. 7
0
func doRange(client etcdserverpb.KVClient, requests <-chan etcdserverpb.RangeRequest) {
	defer wg.Done()

	for req := range requests {
		st := time.Now()
		_, err := client.Range(context.Background(), &req)

		var errStr string
		if err != nil {
			errStr = err.Error()
		}
		results <- result{errStr: errStr, duration: time.Since(st)}
		bar.Increment()
	}
}
Esempio n. 8
0
File: put.go Progetto: ikatson/etcd
func doPut(ctx context.Context, client etcdserverpb.KVClient, requests <-chan etcdserverpb.PutRequest) {
	defer wg.Done()

	for r := range requests {
		st := time.Now()
		_, err := client.Put(ctx, &r)

		var errStr string
		if err != nil {
			errStr = err.Error()
		}
		results <- result{errStr: errStr, duration: time.Since(st)}
		bar.Increment()
	}
}
Esempio n. 9
0
File: put.go Progetto: navneetk/etcd
func put(client etcdserverpb.KVClient, requests <-chan *etcdserverpb.PutRequest) {
	defer wg.Done()

	for r := range requests {
		st := time.Now()
		_, err := client.Put(context.Background(), r)

		var errStr string
		if err != nil {
			errStr = err.Error()
		}
		results <- &result{
			errStr:   errStr,
			duration: time.Now().Sub(st),
		}
		bar.Increment()
	}
}
Esempio n. 10
0
File: get.go Progetto: navneetk/etcd
func get(client etcdserverpb.KVClient, key, end []byte, requests <-chan struct{}) {
	defer wg.Done()
	req := &etcdserverpb.RangeRequest{Key: key, RangeEnd: end}

	for _ = range requests {
		st := time.Now()
		_, err := client.Range(context.Background(), req)

		var errStr string
		if err != nil {
			errStr = err.Error()
		}
		results <- &result{
			errStr:   errStr,
			duration: time.Now().Sub(st),
		}
		bar.Increment()
	}
}
Esempio n. 11
0
// deleteRevKey deletes a key by revision, returning false if key is missing
func deleteRevKey(kvc pb.KVClient, key string, rev int64) (bool, error) {
	cmp := &pb.Compare{
		Result:      pb.Compare_EQUAL,
		Target:      pb.Compare_MOD,
		Key:         []byte(key),
		TargetUnion: &pb.Compare_ModRevision{ModRevision: rev},
	}
	req := &pb.RequestUnion{Request: &pb.RequestUnion_RequestDeleteRange{
		RequestDeleteRange: &pb.DeleteRangeRequest{Key: []byte(key)}}}
	txnresp, err := kvc.Txn(
		context.TODO(),
		&pb.TxnRequest{
			Compare: []*pb.Compare{cmp},
			Success: []*pb.RequestUnion{req},
			Failure: nil,
		})
	if err != nil {
		return false, err
	} else if txnresp.Succeeded == false {
		return false, nil
	}
	return true, nil
}
Esempio n. 12
0
func putEmptyKey(kv pb.KVClient, key string) (*pb.PutResponse, error) {
	return kv.Put(context.TODO(), &pb.PutRequest{Key: []byte(key), Value: []byte{}})
}
Esempio n. 13
0
// waitForRestart tries a range request until the client's server responds.
// This is mainly a stop-gap function until grpcproxy's KVClient adapter
// (and by extension, clientv3) supports grpc.CallOption pass-through so
// FailFast=false works with Put.
func waitForRestart(t *testing.T, kvc pb.KVClient) {
	req := &pb.RangeRequest{Key: []byte("_"), Serializable: true}
	if _, err := kvc.Range(context.TODO(), req, grpc.FailFast(false)); err != nil {
		t.Fatal(err)
	}
}
Esempio n. 14
0
func (s *stresser) run(ctx context.Context, kvc pb.KVClient) {
	defer s.wg.Done()

	for {
		if err := s.rateLimiter.Wait(ctx); err == context.Canceled {
			return
		}

		// TODO: 10-second is enough timeout to cover leader failure
		// and immediate leader election. Find out what other cases this
		// could be timed out.
		putctx, putcancel := context.WithTimeout(ctx, 10*time.Second)
		_, err := kvc.Put(putctx, &pb.PutRequest{
			Key:   []byte(fmt.Sprintf("foo%d", rand.Intn(s.KeySuffixRange))),
			Value: []byte(randStr(s.KeySize)),
		},
			grpc.FailFast(false))
		putcancel()
		if err != nil {
			shouldContinue := false
			switch grpc.ErrorDesc(err) {
			case context.DeadlineExceeded.Error():
				// This retries when request is triggered at the same time as
				// leader failure. When we terminate the leader, the request to
				// that leader cannot be processed, and times out. Also requests
				// to followers cannot be forwarded to the old leader, so timing out
				// as well. We want to keep stressing until the cluster elects a
				// new leader and start processing requests again.
				shouldContinue = true

			case etcdserver.ErrTimeoutDueToLeaderFail.Error(), etcdserver.ErrTimeout.Error():
				// This retries when request is triggered at the same time as
				// leader failure and follower nodes receive time out errors
				// from losing their leader. Followers should retry to connect
				// to the new leader.
				shouldContinue = true

			case etcdserver.ErrStopped.Error():
				// one of the etcd nodes stopped from failure injection
				shouldContinue = true

			case transport.ErrConnClosing.Desc:
				// server closed the transport (failure injected node)
				shouldContinue = true

			case rpctypes.ErrNotCapable.Error():
				// capability check has not been done (in the beginning)
				shouldContinue = true

				// default:
				// errors from stresser.Cancel method:
				// rpc error: code = 1 desc = context canceled (type grpc.rpcError)
				// rpc error: code = 2 desc = grpc: the client connection is closing (type grpc.rpcError)
			}
			if shouldContinue {
				continue
			}
			return
		}
		s.mu.Lock()
		s.success++
		s.mu.Unlock()
	}
}