Пример #1
0
func TestRemoveUnusedSecret(t *testing.T) {
	ts := newTestServer(t)
	defer ts.Stop()

	// removing a secret without providing an ID results in an InvalidArgument
	_, err := ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{})
	assert.Error(t, err)
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err), grpc.ErrorDesc(err))

	// removing a secret that exists succeeds
	secret := secretFromSecretSpec(createSecretSpec("name", []byte("data"), nil))
	err = ts.Store.Update(func(tx store.Tx) error {
		return store.CreateSecret(tx, secret)
	})
	assert.NoError(t, err)

	resp, err := ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: secret.ID})
	assert.NoError(t, err)
	assert.Equal(t, api.RemoveSecretResponse{}, *resp)

	// ---- it was really removed because attempting to remove it again fails with a NotFound ----
	_, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: secret.ID})
	assert.Error(t, err)
	assert.Equal(t, codes.NotFound, grpc.Code(err), grpc.ErrorDesc(err))

}
Пример #2
0
// TabletErrorFromGRPC returns a ServerError or a
// OperationalError from the gRPC error.
func TabletErrorFromGRPC(err error) error {
	if err == nil {
		return nil
	}

	// TODO(aaijazi): Unfortunately, there's no better way to check for a gRPC server
	// error (vs a client error).
	// See: https://github.com/grpc/grpc-go/issues/319
	if !strings.Contains(err.Error(), vterrors.GRPCServerErrPrefix) {
		return OperationalError(fmt.Sprintf("vttablet: %v", err))
	}
	// server side error, convert it
	var code int
	switch grpc.Code(err) {
	case codes.Internal:
		code = ERR_FATAL
	case codes.FailedPrecondition:
		code = ERR_RETRY
	case codes.ResourceExhausted:
		code = ERR_TX_POOL_FULL
	case codes.Aborted:
		code = ERR_NOT_IN_TX
	default:
		code = ERR_NORMAL
	}

	return &ServerError{
		Code:       code,
		Err:        fmt.Sprintf("vttablet: %v", err),
		ServerCode: vterrors.GRPCCodeToErrorCode(grpc.Code(err)),
	}
}
Пример #3
0
func doCancelAfterFirstResponse(tc testpb.TestServiceClient) {
	ctx, cancel := context.WithCancel(context.Background())
	stream, err := tc.FullDuplexCall(ctx)
	if err != nil {
		grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
	}
	respParam := []*testpb.ResponseParameters{
		{
			Size: proto.Int32(31415),
		},
	}
	pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182)
	req := &testpb.StreamingOutputCallRequest{
		ResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseParameters: respParam,
		Payload:            pl,
	}
	if err := stream.Send(req); err != nil {
		grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err)
	}
	if _, err := stream.Recv(); err != nil {
		grpclog.Fatalf("%v.Recv() = %v", stream, err)
	}
	cancel()
	if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled {
		grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled)
	}
	grpclog.Println("CancelAfterFirstResponse done")
}
Пример #4
0
func TestGetSecret(t *testing.T) {
	ts := newTestServer(t)
	defer ts.Stop()

	// ---- getting a secret without providing an ID results in an InvalidArgument ----
	_, err := ts.Client.GetSecret(context.Background(), &api.GetSecretRequest{})
	assert.Error(t, err)
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err), grpc.ErrorDesc(err))

	// ---- getting a non-existent secret fails with NotFound ----
	_, err = ts.Client.GetSecret(context.Background(), &api.GetSecretRequest{SecretID: "12345"})
	assert.Error(t, err)
	assert.Equal(t, codes.NotFound, grpc.Code(err), grpc.ErrorDesc(err))

	// ---- getting an existing secret returns the secret with all the private data cleaned ----
	secret := secretFromSecretSpec(createSecretSpec("name", []byte("data"), nil))
	err = ts.Store.Update(func(tx store.Tx) error {
		return store.CreateSecret(tx, secret)
	})
	assert.NoError(t, err)

	resp, err := ts.Client.GetSecret(context.Background(), &api.GetSecretRequest{SecretID: secret.ID})
	assert.NoError(t, err)
	assert.NotNil(t, resp)
	assert.NotNil(t, resp.Secret)

	// the data should be empty/omitted
	assert.NotEqual(t, secret, resp.Secret)
	secret.Spec.Data = nil
	assert.Equal(t, secret, resp.Secret)
}
Пример #5
0
func TestCreateService(t *testing.T) {
	ts := newTestServer(t)
	defer ts.Stop()
	_, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{})
	assert.Error(t, err)
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err))

	spec := createSpec("name", "image", 1)
	r, err := ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec})
	assert.NoError(t, err)
	assert.NotEmpty(t, r.Service.ID)

	// test port conflicts
	spec = createSpec("name2", "image", 1)
	spec.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{
		{PublishedPort: uint32(9000), TargetPort: uint32(9000), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)},
	}}
	r, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec})
	assert.NoError(t, err)
	assert.NotEmpty(t, r.Service.ID)

	spec2 := createSpec("name3", "image", 1)
	spec2.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{
		{PublishedPort: uint32(9000), TargetPort: uint32(9000), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)},
	}}
	_, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec2})
	assert.Error(t, err)
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err))

	// test no port conflicts when no publish port is specified
	spec3 := createSpec("name4", "image", 1)
	spec3.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{
		{TargetPort: uint32(9000), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)},
	}}
	r, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec3})
	assert.NoError(t, err)
	assert.NotEmpty(t, r.Service.ID)
	spec4 := createSpec("name5", "image", 1)
	spec4.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{
		{TargetPort: uint32(9001), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)},
	}}
	_, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec4})
	assert.NoError(t, err)

	// ensure no port conflict when different protocols are used
	spec = createSpec("name6", "image", 1)
	spec.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{
		{PublishedPort: uint32(9100), TargetPort: uint32(9100), Protocol: api.PortConfig_Protocol(api.ProtocolTCP)},
	}}
	r, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec})
	assert.NoError(t, err)
	assert.NotEmpty(t, r.Service.ID)

	spec2 = createSpec("name7", "image", 1)
	spec2.Endpoint = &api.EndpointSpec{Ports: []*api.PortConfig{
		{PublishedPort: uint32(9100), TargetPort: uint32(9100), Protocol: api.PortConfig_Protocol(api.ProtocolUDP)},
	}}
	_, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec2})
	assert.NoError(t, err)
}
Пример #6
0
func TestGetCluster(t *testing.T) {
	ts := newTestServer(t)
	defer ts.Stop()
	_, err := ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{})
	assert.Error(t, err)
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err))

	_, err = ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{ClusterID: "invalid"})
	assert.Error(t, err)
	assert.Equal(t, codes.NotFound, grpc.Code(err))

	cluster := createCluster(t, ts, "name", "name", api.AcceptancePolicy{}, ts.Server.rootCA)
	r, err := ts.Client.GetCluster(context.Background(), &api.GetClusterRequest{ClusterID: cluster.ID})
	assert.NoError(t, err)
	cluster.Meta.Version = r.Cluster.Meta.Version
	// Only public fields should be available
	assert.Equal(t, cluster.ID, r.Cluster.ID)
	assert.Equal(t, cluster.Meta, r.Cluster.Meta)
	assert.Equal(t, cluster.Spec, r.Cluster.Spec)
	assert.Equal(t, cluster.RootCA.CACert, r.Cluster.RootCA.CACert)
	assert.Equal(t, cluster.RootCA.CACertHash, r.Cluster.RootCA.CACertHash)
	// CAKey and network keys should be nil
	assert.Nil(t, r.Cluster.RootCA.CAKey)
	assert.Nil(t, r.Cluster.NetworkBootstrapKeys)
}
Пример #7
0
// DefaultHTTPError is the default implementation of HTTPError.
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body returned by this function is a JSON object,
// which contains a member whose key is "error" and whose value is err.Error().
func DefaultHTTPError(ctx context.Context, w http.ResponseWriter, _ *http.Request, err error) {
	const fallback = `{"error": "failed to marshal error message"}`

	w.Header().Del("Trailer")
	w.Header().Set("Content-Type", "application/json")
	body := errorBody{
		Error: grpc.ErrorDesc(err),
		Code:  int(grpc.Code(err)),
	}
	buf, merr := json.Marshal(body)
	if merr != nil {
		grpclog.Printf("Failed to marshal error message %q: %v", body, merr)
		w.WriteHeader(http.StatusInternalServerError)
		if _, err := io.WriteString(w, fallback); err != nil {
			grpclog.Printf("Failed to write response: %v", err)
		}
		return
	}

	md, ok := ServerMetadataFromContext(ctx)
	if !ok {
		grpclog.Printf("Failed to extract ServerMetadata from context")
	}

	handleForwardResponseServerMetadata(w, md)
	st := HTTPStatusFromCode(grpc.Code(err))
	w.WriteHeader(st)
	if _, err := w.Write(buf); err != nil {
		grpclog.Printf("Failed to write response: %v", err)
	}

	handleForwardResponseTrailer(w, md)
}
Пример #8
0
func checkRPCError(err error, code codes.Code, message string) error {
	if g, e := grpc.Code(err), code; g != e {
		return fmt.Errorf("wrong grpc error code: %v != %v", g, e)
	}
	// TODO https://github.com/grpc/grpc-go/issues/110
	if g, e := err.Error(), fmt.Sprintf("rpc error: code = %d desc = %q", grpc.Code(err), message); g != e {
		return fmt.Errorf("wrong error message: %v != %v", g, e)
	}
	return nil
}
Пример #9
0
// DoCancelAfterBegin cancels the RPC after metadata has been sent but before payloads are sent.
func DoCancelAfterBegin(tc testpb.TestServiceClient) {
	ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata))
	stream, err := tc.StreamingInputCall(ctx)
	if err != nil {
		grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err)
	}
	cancel()
	_, err = stream.CloseAndRecv()
	if grpc.Code(err) != codes.Canceled {
		grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled)
	}
}
Пример #10
0
func TestValidateNetworkSpec(t *testing.T) {
	err := validateNetworkSpec(nil)
	assert.Error(t, err)
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err))

	spec := createNetworkSpec("invalid_driver")
	spec.DriverConfig = &api.Driver{
		Name: "external",
	}

	err = validateNetworkSpec(spec)
	assert.Error(t, err)
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err))
}
Пример #11
0
// IsClosedConnection returns true if err is an error produced by gRPC on closed connections.
func IsClosedConnection(err error) bool {
	if err == context.Canceled ||
		grpc.Code(err) == codes.Canceled ||
		grpc.Code(err) == codes.Unavailable ||
		grpc.ErrorDesc(err) == grpc.ErrClientConnClosing.Error() ||
		strings.Contains(err.Error(), "is closing") ||
		strings.Contains(err.Error(), "node unavailable") {
		return true
	}
	if streamErr, ok := err.(transport.StreamError); ok && streamErr.Code == codes.Canceled {
		return true
	}
	return netutil.IsClosedConnection(err)
}
Пример #12
0
func TestRemoveUsedSecret(t *testing.T) {
	ts := newTestServer(t)
	defer ts.Stop()

	// Create two secrets
	data := []byte("secret")
	creationSpec := createSecretSpec("secretID1", data, nil)
	resp, err := ts.Client.CreateSecret(context.Background(), &api.CreateSecretRequest{Spec: creationSpec})
	assert.NoError(t, err)
	creationSpec2 := createSecretSpec("secretID2", data, nil)
	resp2, err := ts.Client.CreateSecret(context.Background(), &api.CreateSecretRequest{Spec: creationSpec2})
	assert.NoError(t, err)

	// Create a service that uses a secret
	service := createSpec("service1", "image", 1)
	secretRefs := []*api.SecretReference{
		{
			SecretName: resp.Secret.Spec.Annotations.Name,
			SecretID:   resp.Secret.ID,
			Target: &api.SecretReference_File{
				File: &api.SecretReference_FileTarget{
					Name: "target.txt",
				},
			},
		},
	}
	service.Task.GetContainer().Secrets = secretRefs
	_, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: service})
	assert.NoError(t, err)

	service2 := createSpec("service2", "image", 1)
	service2.Task.GetContainer().Secrets = secretRefs
	_, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: service2})
	assert.NoError(t, err)

	// removing a secret that exists but is in use fails
	_, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: resp.Secret.ID})
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err), grpc.ErrorDesc(err))
	assert.Regexp(t, "service[1-2], service[1-2]", grpc.ErrorDesc(err))

	// removing a secret that exists but is not in use succeeds
	_, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: resp2.Secret.ID})
	assert.NoError(t, err)

	// it was really removed because attempting to remove it again fails with a NotFound
	_, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: resp2.Secret.ID})
	assert.Error(t, err)
	assert.Equal(t, codes.NotFound, grpc.Code(err), grpc.ErrorDesc(err))
}
Пример #13
0
func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) {

	if p.cluster.IsLeader() {
		return p.local.Check(ctx, r)
	}
	ctx, err := p.runCtxMods(ctx)
	if err != nil {
		return nil, err
	}
	conn, err := p.connSelector.Conn()
	if err != nil {
		return nil, err
	}

	defer func() {
		if err != nil {
			errStr := err.Error()
			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
				strings.Contains(errStr, "connection error") ||
				grpc.Code(err) == codes.Internal {
				p.connSelector.Reset()
			}
		}
	}()

	return NewHealthClient(conn).Check(ctx, r)
}
Пример #14
0
func TestLanguageServiceAnnotateTextError(t *testing.T) {
	errCode := codes.Internal
	mockLanguage.err = grpc.Errorf(errCode, "test error")

	var document *languagepb.Document = &languagepb.Document{}
	var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{}
	var encodingType languagepb.EncodingType = 0
	var request = &languagepb.AnnotateTextRequest{
		Document:     document,
		Features:     features,
		EncodingType: encodingType,
	}

	c, err := NewClient(context.Background(), clientOpt)
	if err != nil {
		t.Fatal(err)
	}

	resp, err := c.AnnotateText(context.Background(), request)

	if c := grpc.Code(err); c != errCode {
		t.Errorf("got error code %q, want %q", c, errCode)
	}
	_ = resp
}
Пример #15
0
func unwrapError(err error) error {
	code := grpc.Code(err)
	errBody := grpc.ErrorDesc(err)
	switch code {
	case InternalServerError:
		return core.InternalServerError(errBody)
	case NotSupportedError:
		return core.NotSupportedError(errBody)
	case MalformedRequestError:
		return core.MalformedRequestError(errBody)
	case UnauthorizedError:
		return core.UnauthorizedError(errBody)
	case NotFoundError:
		return core.NotFoundError(errBody)
	case SignatureValidationError:
		return core.SignatureValidationError(errBody)
	case NoSuchRegistrationError:
		return core.NoSuchRegistrationError(errBody)
	case RateLimitedError:
		return core.RateLimitedError(errBody)
	case LengthRequiredError:
		return core.LengthRequiredError(errBody)
	case BadNonceError:
		return core.BadNonceError(errBody)
	default:
		return err
	}
}
Пример #16
0
func testCancel(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 2718
	respSize := 314

	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize))
	if err != nil {
		t.Fatal(err)
	}

	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      payload,
	}
	ctx, cancel := context.WithCancel(context.Background())
	time.AfterFunc(1*time.Millisecond, cancel)
	reply, err := tc.UnaryCall(ctx, req)
	if grpc.Code(err) != codes.Canceled {
		t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.Canceled)
	}
}
Пример #17
0
// TODO(zhaoq): Have a better test coverage of timeout and cancellation mechanism.
func testRPCTimeout(t *testing.T, e env) {
	s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e)
	cc := clientSetUp(t, addr, nil, nil, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	argSize := 2718
	respSize := 314

	payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize))
	if err != nil {
		t.Fatal(err)
	}

	req := &testpb.SimpleRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize: proto.Int32(int32(respSize)),
		Payload:      payload,
	}
	for i := -1; i <= 10; i++ {
		ctx, _ := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond)
		reply, err := tc.UnaryCall(ctx, req)
		if grpc.Code(err) != codes.DeadlineExceeded {
			t.Fatalf(`TestService/UnaryCallv(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.DeadlineExceeded)
		}
	}
}
Пример #18
0
func TestPublisherPublishError(t *testing.T) {
	errCode := codes.Internal
	mockPublisher.err = grpc.Errorf(errCode, "test error")

	var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]")
	var data []byte = []byte("-86")
	var messagesElement = &pubsubpb.PubsubMessage{
		Data: data,
	}
	var messages = []*pubsubpb.PubsubMessage{messagesElement}
	var request = &pubsubpb.PublishRequest{
		Topic:    formattedTopic,
		Messages: messages,
	}

	c, err := NewPublisherClient(context.Background(), clientOpt)
	if err != nil {
		t.Fatal(err)
	}

	resp, err := c.Publish(context.Background(), request)

	if c := grpc.Code(err); c != errCode {
		t.Errorf("got error code %q, want %q", c, errCode)
	}
	_ = resp
}
Пример #19
0
func (s *session) heartbeat(ctx context.Context) error {
	log.G(ctx).Debugf("(*session).heartbeat")
	client := api.NewDispatcherClient(s.conn)
	heartbeat := time.NewTimer(1) // send out a heartbeat right away
	defer heartbeat.Stop()

	for {
		select {
		case <-heartbeat.C:
			heartbeatCtx, cancel := context.WithTimeout(ctx, dispatcherRPCTimeout)
			resp, err := client.Heartbeat(heartbeatCtx, &api.HeartbeatRequest{
				SessionID: s.sessionID,
			})
			cancel()
			if err != nil {
				if grpc.Code(err) == codes.NotFound {
					err = errNodeNotRegistered
				}

				return err
			}

			period, err := ptypes.Duration(&resp.Period)
			if err != nil {
				return err
			}

			heartbeat.Reset(period)
		case <-s.closed:
			return errSessionClosed
		case <-ctx.Done():
			return ctx.Err()
		}
	}
}
Пример #20
0
func TestValidateDriver(t *testing.T) {
	assert.NoError(t, validateDriver(nil))

	err := validateDriver(&api.Driver{Name: ""})
	assert.Error(t, err)
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err))
}
Пример #21
0
func TestRemoveInternalNetwork(t *testing.T) {
	ts := newTestServer(t)
	defer ts.Stop()

	name := "testnet3"
	spec := createNetworkSpec(name)
	// add label denoting internal network
	spec.Annotations.Labels = map[string]string{"com.docker.swarm.internal": "true"}
	nr, err := ts.Server.createInternalNetwork(context.Background(), &api.CreateNetworkRequest{
		Spec: spec,
	})
	assert.NoError(t, err)
	assert.NotEqual(t, nr.Network, nil)
	assert.NotEqual(t, nr.Network.ID, "")

	_, err = ts.Client.RemoveNetwork(context.Background(), &api.RemoveNetworkRequest{NetworkID: nr.Network.ID})
	// this SHOULD fail, because the internal network cannot be removed
	assert.Error(t, err)
	assert.Equal(t, grpc.Code(err), codes.PermissionDenied)
	assert.Contains(t, err.Error(), fmt.Sprintf("%s (%s) is a pre-defined network and cannot be removed", name, nr.Network.ID))

	// then, check to make sure network is still there
	ng, err := ts.Client.GetNetwork(context.Background(), &api.GetNetworkRequest{NetworkID: nr.Network.ID})
	assert.NoError(t, err)
	assert.NotEqual(t, ng.Network, nil)
	assert.NotEqual(t, ng.Network.ID, "")
}
Пример #22
0
func translateMandError(err error) error {
	switch grpc.Code(err) {
	case codes.NotFound, codes.PermissionDenied:
		return NewNotFoundError(err)
	}
	return fmt.Errorf("wrapped mand error: %v", err)
}
Пример #23
0
func TestClientStreamRedirect(t *testing.T) {
	l, err := net.Listen("tcp", "127.0.0.1:0")
	require.NoError(t, err)
	addr := l.Addr().String()
	conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
	require.NoError(t, err)
	defer conn.Close()

	cluster := &mockCluster{conn: conn}

	forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
	api := NewRaftProxyRouteGuideServer(testRouteGuide{}, cluster, forwardAsOwnRequest)
	srv := grpc.NewServer()
	RegisterRouteGuideServer(srv, api)
	go srv.Serve(l)
	defer srv.Stop()

	client := NewRouteGuideClient(conn)
	stream, err := client.RecordRoute(context.Background())
	// err not nil is only on network issues
	assert.Nil(t, err)
	// any Send will be ok, I don't know why
	assert.Nil(t, stream.Send(&Point{}))
	_, err = stream.CloseAndRecv()
	assert.NotNil(t, err)
	assert.Equal(t, codes.ResourceExhausted, grpc.Code(err))
}
Пример #24
0
// Commit applies the enqueued operations atomically.
func (t *Transaction) Commit() (*Commit, error) {
	if t.id == nil {
		return nil, errExpiredTransaction
	}
	req := &pb.CommitRequest{
		ProjectId:           t.client.dataset,
		TransactionSelector: &pb.CommitRequest_Transaction{t.id},
		Mutations:           t.mutations,
		Mode:                pb.CommitRequest_TRANSACTIONAL,
	}
	t.id = nil
	resp, err := t.client.client.Commit(t.ctx, req)
	if err != nil {
		if grpc.Code(err) == codes.Aborted {
			return nil, ErrConcurrentTransaction
		}
		return nil, err
	}

	// Copy any newly minted keys into the returned keys.
	commit := &Commit{}
	for i, p := range t.pending {
		if i >= len(resp.MutationResults) || resp.MutationResults[i].Key == nil {
			return nil, errors.New("datastore: internal error: server returned the wrong mutation results")
		}
		key, err := protoToKey(resp.MutationResults[i].Key)
		if err != nil {
			return nil, errors.New("datastore: internal error: server returned an invalid key")
		}
		p.key = key
		p.commit = commit
	}

	return commit, nil
}
Пример #25
0
func testTimeoutOnDeadServer(t *testing.T, e env) {
	s, cc := setUp(nil, math.MaxUint32, "", e)
	tc := testpb.NewTestServiceClient(cc)
	if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok)
	}
	if ok := cc.WaitForStateChange(time.Second, grpc.Connecting); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok)
	}
	if cc.State() != grpc.Ready {
		t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready)
	}
	if ok := cc.WaitForStateChange(time.Millisecond, grpc.Ready); ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok)
	}
	s.Stop()
	// Set -1 as the timeout to make sure if transportMonitor gets error
	// notification in time the failure path of the 1st invoke of
	// ClientConn.wait hits the deadline exceeded error.
	ctx, _ := context.WithTimeout(context.Background(), -1)
	if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded {
		t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded)
	}
	if ok := cc.WaitForStateChange(time.Second, grpc.Ready); !ok {
		t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok)
	}
	state := cc.State()
	if state != grpc.Connecting && state != grpc.TransientFailure {
		t.Fatalf("cc.State() = %s, want %s or %s", state, grpc.Connecting, grpc.TransientFailure)
	}
	cc.Close()
}
Пример #26
0
func (c *Client) newRetryWrapper() retryRpcFunc {
	return func(rpcCtx context.Context, f rpcFunc) {
		for {
			err := f(rpcCtx)
			if err == nil {
				return
			}
			// only retry if unavailable
			if grpc.Code(err) != codes.Unavailable {
				return
			}
			// always stop retry on etcd errors
			eErr := rpctypes.Error(err)
			if _, ok := eErr.(rpctypes.EtcdError); ok {
				return
			}
			select {
			case <-c.balancer.ConnectNotify():
			case <-rpcCtx.Done():
			case <-c.ctx.Done():
				return
			}
		}
	}
}
Пример #27
0
func testExceedMaxStreamsLimit(t *testing.T, e env) {
	// Only allows 1 live stream per server transport.
	s, cc := setUp(nil, 1, "", e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	done := make(chan struct{})
	ch := make(chan int)
	go func() {
		for {
			select {
			case <-time.After(5 * time.Millisecond):
				ch <- 0
			case <-time.After(5 * time.Second):
				close(done)
				return
			}
		}
	}()
	// Loop until a stream creation hangs due to the new max stream setting.
	for {
		select {
		case <-ch:
			ctx, _ := context.WithTimeout(context.Background(), time.Second)
			if _, err := tc.StreamingInputCall(ctx); err != nil {
				if grpc.Code(err) == codes.DeadlineExceeded {
					return
				}
				t.Fatalf("%v.StreamingInputCall(_) = %v, want <nil>", tc, err)
			}
		case <-done:
			t.Fatalf("Client has not received the max stream setting in 5 seconds.")
		}
	}
}
Пример #28
0
func testExceedMaxStreamsLimit(t *testing.T, e env) {
	// Only allows 1 live stream per server transport.
	s, cc := setUp(nil, 1, e)
	tc := testpb.NewTestServiceClient(cc)
	defer tearDown(s, cc)
	// Perform a unary RPC to make sure the new settings were propagated to the client.
	if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
		t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", tc, err)
	}
	// Initiate the 1st stream
	if _, err := tc.StreamingInputCall(context.Background()); err != nil {
		t.Fatalf("%v.StreamingInputCall(_) = %v, want <nil>", tc, err)
	}
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		// The 2nd stream should block until its deadline exceeds.
		ctx, _ := context.WithTimeout(context.Background(), time.Second)
		if _, err := tc.StreamingInputCall(ctx); grpc.Code(err) != codes.DeadlineExceeded {
			t.Errorf("%v.StreamingInputCall(%v) = _, %v, want error code %d", tc, ctx, err, codes.DeadlineExceeded)
		}
	}()
	wg.Wait()
}
Пример #29
0
// ProxyStream performs a forward of a gRPC frontend stream to a backend.
func ProxyStream(director StreamDirector, logger grpclog.Logger, frontTrans transport.ServerTransport, frontStream *transport.Stream) {
	backendTrans, backendStream, err := backendTransportStream(director, frontStream.Context())
	if err != nil {
		frontTrans.WriteStatus(frontStream, grpc.Code(err), grpc.ErrorDesc(err))
		logger.Printf("proxy: Proxy.handleStream %v failed to allocate backend: %v", frontStream.Method(), err)
		return
	}
	defer backendTrans.CloseStream(backendStream, nil)

	// data coming from client call to backend
	ingressPathChan := forwardDataFrames(frontStream, backendStream, backendTrans)

	// custom header handling *must* be after some data is processed by the backend, otherwise there's a deadlock
	headerMd, err := backendStream.Header()
	if err == nil && len(headerMd) > 0 {
		frontTrans.WriteHeader(frontStream, headerMd)
	}
	// data coming from backend back to client call
	egressPathChan := forwardDataFrames(backendStream, frontStream, frontTrans)

	// wait for both data streams to complete.
	egressErr := <-egressPathChan
	ingressErr := <-ingressPathChan
	if egressErr != io.EOF || ingressErr != io.EOF {
		logger.Printf("proxy: Proxy.handleStream %v failure during transfer ingres: %v egress: %v", frontStream.Method(), ingressErr, egressErr)
		frontTrans.WriteStatus(frontStream, codes.Unavailable, fmt.Sprintf("problem in transfer ingress: %v egress: %v", ingressErr, egressErr))
		return
	}
	// handle trailing metadata
	trailingMd := backendStream.Trailer()
	if len(trailingMd) > 0 {
		frontStream.SetTrailer(trailingMd)
	}
	frontTrans.WriteStatus(frontStream, backendStream.StatusCode(), backendStream.StatusDesc())
}
Пример #30
0
func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error {
	md, ok := metadata.FromContext(stream.Context())
	if ok {
		if err := stream.SendHeader(md); err != nil {
			return grpc.Errorf(grpc.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil)
		}
		stream.SetTrailer(testTrailerMetadata)
	}
	for {
		in, err := stream.Recv()
		if err == io.EOF {
			// read done.
			return nil
		}
		if err != nil {
			return err
		}

		if in.Id == errorID {
			return fmt.Errorf("got error id: %v", in.Id)
		}

		if err := stream.Send(&testpb.SimpleResponse{Id: in.Id}); err != nil {
			return err
		}
	}
}