示例#1
0
func TestRemoveUnusedSecret(t *testing.T) {
	ts := newTestServer(t)
	defer ts.Stop()

	// removing a secret without providing an ID results in an InvalidArgument
	_, err := ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{})
	assert.Error(t, err)
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err), grpc.ErrorDesc(err))

	// removing a secret that exists succeeds
	secret := secretFromSecretSpec(createSecretSpec("name", []byte("data"), nil))
	err = ts.Store.Update(func(tx store.Tx) error {
		return store.CreateSecret(tx, secret)
	})
	assert.NoError(t, err)

	resp, err := ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: secret.ID})
	assert.NoError(t, err)
	assert.Equal(t, api.RemoveSecretResponse{}, *resp)

	// ---- it was really removed because attempting to remove it again fails with a NotFound ----
	_, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: secret.ID})
	assert.Error(t, err)
	assert.Equal(t, codes.NotFound, grpc.Code(err), grpc.ErrorDesc(err))

}
示例#2
0
// NewContextErrorGeneric ...
func NewContextErrorGeneric(logger log.Logger, err error) *log.Context {
	if code := grpc.Code(err); code != codes.Unknown {
		return log.NewContext(logger).With(sklog.KeyMessage, grpc.ErrorDesc(err), "code", code.String())
	}

	return log.NewContext(logger).With(sklog.KeyMessage, grpc.ErrorDesc(err))
}
示例#3
0
文件: invoker.go 项目: sr/operator
func (i *invoker) Invoke(ctx context.Context, msg *Message, req *Request) {
	ctx, cancel := context.WithTimeout(ctx, i.timeout)
	defer cancel()
	errC := make(chan error, 1)
	go func() {
		errC <- i.f(ctx, i.conn, req, i.pkg)
	}()
	event := &Event{Key: "invoker", Request: req, Message: msg}
	select {
	case <-ctx.Done():
		event.Error = fmt.Errorf("RPC request failed to complete within %s", i.timeout)
	case err := <-errC:
		event.Error = err
	}
	if event.Error != nil &&
		i.sender != nil &&
		req != nil &&
		!strings.Contains(event.Error.Error(), "no such service:") {
		if err := i.sender.Send(ctx, req.GetSource(), req.SenderId, &Message{
			Text:    grpc.ErrorDesc(event.Error),
			HTML:    fmt.Sprintf("Request failed: <code>%s</code>", grpc.ErrorDesc(event.Error)),
			Options: i.msgOpts,
		}); err != nil {
			i.inst.Instrument(&Event{
				Key:     "invoker_sender_error",
				Request: req,
				Message: msg,
				Error:   err,
			})
		}
	}
	if i.inst != nil {
		i.inst.Instrument(event)
	}
}
示例#4
0
func TestGetSecret(t *testing.T) {
	ts := newTestServer(t)
	defer ts.Stop()

	// ---- getting a secret without providing an ID results in an InvalidArgument ----
	_, err := ts.Client.GetSecret(context.Background(), &api.GetSecretRequest{})
	assert.Error(t, err)
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err), grpc.ErrorDesc(err))

	// ---- getting a non-existent secret fails with NotFound ----
	_, err = ts.Client.GetSecret(context.Background(), &api.GetSecretRequest{SecretID: "12345"})
	assert.Error(t, err)
	assert.Equal(t, codes.NotFound, grpc.Code(err), grpc.ErrorDesc(err))

	// ---- getting an existing secret returns the secret with all the private data cleaned ----
	secret := secretFromSecretSpec(createSecretSpec("name", []byte("data"), nil))
	err = ts.Store.Update(func(tx store.Tx) error {
		return store.CreateSecret(tx, secret)
	})
	assert.NoError(t, err)

	resp, err := ts.Client.GetSecret(context.Background(), &api.GetSecretRequest{SecretID: secret.ID})
	assert.NoError(t, err)
	assert.NotNil(t, resp)
	assert.NotNil(t, resp.Secret)

	// the data should be empty/omitted
	assert.NotEqual(t, secret, resp.Secret)
	secret.Spec.Data = nil
	assert.Equal(t, secret, resp.Secret)
}
示例#5
0
func TestWatchErrConnClosed(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	cli := clus.Client(0)
	defer cli.Close()
	wc := clientv3.NewWatcher(cli)

	donec := make(chan struct{})
	go func() {
		defer close(donec)
		ch := wc.Watch(context.TODO(), "foo")
		if wr := <-ch; grpc.ErrorDesc(wr.Err()) != grpc.ErrClientConnClosing.Error() {
			t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, grpc.ErrorDesc(wr.Err()))
		}
	}()

	if err := cli.ActiveConnection().Close(); err != nil {
		t.Fatal(err)
	}
	clus.TakeClient(0)

	select {
	case <-time.After(3 * time.Second):
		t.Fatal("wc.Watch took too long")
	case <-donec:
	}
}
示例#6
0
func (h *handler) milesGet(responseWriter http.ResponseWriter, request *http.Request) {
	route := pkghttp.QueryGet(request, "route")
	minMiles, err := pkghttp.QueryGetUint32(request, "min_miles")
	if err != nil {
		h.handleError(responseWriter, err)
		return
	}
	percentage, err := pkghttp.QueryGetUint32(request, "percentage")
	if err != nil {
		h.handleError(responseWriter, err)
		return
	}
	if percentage == 0 {
		percentage = 100
	}
	getMilesRequest := &openflights.GetMilesRequest{
		Route:      route,
		MinMiles:   minMiles,
		Percentage: percentage,
	}
	getMilesResponse, err := h.client.GetMiles(getMilesRequest)
	if err != nil {
		// unknown airport code is the only thing it can be right now, but this is relying on implementation, fix
		if grpc.Code(err) == codes.NotFound && grpc.ErrorDesc(err) != "" {
			h.handleError(responseWriter, fmt.Errorf("openflights: unknown airport code: %s", grpc.ErrorDesc(err)))
		} else {
			h.handleError(responseWriter, err)
		}
		return
	}
	if err := h.Execute(responseWriter, "miles.html", getMilesResponse); err != nil {
		h.handleError(responseWriter, err)
	}
}
示例#7
0
文件: error.go 项目: mgurevin/etcd
func Error(err error) error {
	if err == nil {
		return nil
	}
	verr, ok := errStringToError[grpc.ErrorDesc(err)]
	if !ok { // not gRPC error
		return err
	}
	return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)}
}
示例#8
0
func TestRemoveUsedSecret(t *testing.T) {
	ts := newTestServer(t)
	defer ts.Stop()

	// Create two secrets
	data := []byte("secret")
	creationSpec := createSecretSpec("secretID1", data, nil)
	resp, err := ts.Client.CreateSecret(context.Background(), &api.CreateSecretRequest{Spec: creationSpec})
	assert.NoError(t, err)
	creationSpec2 := createSecretSpec("secretID2", data, nil)
	resp2, err := ts.Client.CreateSecret(context.Background(), &api.CreateSecretRequest{Spec: creationSpec2})
	assert.NoError(t, err)

	// Create a service that uses a secret
	service := createSpec("service1", "image", 1)
	secretRefs := []*api.SecretReference{
		{
			SecretName: resp.Secret.Spec.Annotations.Name,
			SecretID:   resp.Secret.ID,
			Target: &api.SecretReference_File{
				File: &api.SecretReference_FileTarget{
					Name: "target.txt",
				},
			},
		},
	}
	service.Task.GetContainer().Secrets = secretRefs
	_, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: service})
	assert.NoError(t, err)

	service2 := createSpec("service2", "image", 1)
	service2.Task.GetContainer().Secrets = secretRefs
	_, err = ts.Client.CreateService(context.Background(), &api.CreateServiceRequest{Spec: service2})
	assert.NoError(t, err)

	// removing a secret that exists but is in use fails
	_, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: resp.Secret.ID})
	assert.Equal(t, codes.InvalidArgument, grpc.Code(err), grpc.ErrorDesc(err))
	assert.Regexp(t, "service[1-2], service[1-2]", grpc.ErrorDesc(err))

	// removing a secret that exists but is not in use succeeds
	_, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: resp2.Secret.ID})
	assert.NoError(t, err)

	// it was really removed because attempting to remove it again fails with a NotFound
	_, err = ts.Client.RemoveSecret(context.Background(), &api.RemoveSecretRequest{SecretID: resp2.Secret.ID})
	assert.Error(t, err)
	assert.Equal(t, codes.NotFound, grpc.Code(err), grpc.ErrorDesc(err))
}
示例#9
0
func run(appEnv *appEnv, dirPath string, outDirPath string, compileOptions *protoeasy.CompileOptions) error {
	compiler := protoeasy.DefaultClientCompiler
	if appEnv.Address != "" {
		clientConn, err := grpc.Dial(appEnv.Address, grpc.WithInsecure())
		if err != nil {
			return err
		}
		compiler = protoeasy.NewClientCompiler(
			protoeasy.NewAPIClient(
				clientConn,
			),
			protoeasy.CompilerOptions{},
		)
	}

	commands, err := compiler.Compile(dirPath, outDirPath, compileOptions)
	if err != nil {
		if desc := grpc.ErrorDesc(err); desc != "" {
			err = errors.New(desc)
		}
		if errString := strings.TrimSpace(err.Error()); errString != "" {
			protolion.Errorln(errString)
		}
		return errors.New("")
	}
	for _, command := range commands {
		if len(command.Arg) > 0 {
			protolion.Infof("\n%s\n", strings.Join(command.Arg, " \\\n\t"))
		}
	}
	return nil
}
示例#10
0
func sanitizeErr(err error) error {
	if err == nil {
		return nil
	}

	return errors.New(grpc.ErrorDesc(err))
}
示例#11
0
func TestHeartbeatTimeout(t *testing.T) {
	cfg := DefaultConfig()
	cfg.HeartbeatPeriod = 100 * time.Millisecond
	cfg.HeartbeatEpsilon = 0
	gd, err := startDispatcher(cfg)
	assert.NoError(t, err)
	defer gd.Close()

	var expectedSessionID string
	{
		stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{})
		assert.NoError(t, err)
		resp, err := stream.Recv()
		assert.NoError(t, err)
		assert.NotEmpty(t, resp.SessionID)
		expectedSessionID = resp.SessionID

	}
	time.Sleep(500 * time.Millisecond)

	gd.Store.View(func(readTx store.ReadTx) {
		storeNodes, err := store.FindNodes(readTx, store.All)
		assert.NoError(t, err)
		assert.NotEmpty(t, storeNodes)
		assert.Equal(t, api.NodeStatus_DOWN, storeNodes[0].Status.State)
	})

	// check that node is deregistered
	resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{SessionID: expectedSessionID})
	assert.Nil(t, resp)
	assert.Error(t, err)
	assert.Equal(t, grpc.ErrorDesc(err), ErrNodeNotRegistered.Error())
}
示例#12
0
// FromGRPC converts error from GRPC error back to trace.Error
// Debug information will be retrieved from the metadata if specified in args
func FromGRPC(err error, args ...interface{}) error {
	if err == nil {
		return nil
	}
	code := grpc.Code(err)
	message := grpc.ErrorDesc(err)
	var e error
	switch code {
	case codes.OK:
		return nil
	case codes.NotFound:
		e = &trace.NotFoundError{Message: message}
	case codes.AlreadyExists:
		e = &trace.AlreadyExistsError{Message: message}
	case codes.PermissionDenied:
		e = &trace.AccessDeniedError{Message: message}
	case codes.FailedPrecondition:
		e = &trace.CompareFailedError{Message: message}
	case codes.InvalidArgument:
		e = &trace.BadParameterError{Message: message}
	case codes.ResourceExhausted:
		e = &trace.LimitExceededError{Message: message}
	case codes.Unavailable:
		e = &trace.ConnectionProblemError{Message: message}
	default:
		e = errors.New(message)
	}
	if len(args) != 0 {
		if meta, ok := args[0].(metadata.MD); ok {
			e = DecodeDebugInfo(e, meta)
		}
	}
	return e
}
示例#13
0
func TestPollErrorResult(t *testing.T) {
	const (
		errCode = codes.NotFound
		errMsg  = "my error"
	)
	op := &Operation{
		proto: &pb.Operation{
			Name: "foo",
			Done: true,
			Result: &pb.Operation_Error{
				Error: &status.Status{
					Code:    int32(errCode),
					Message: errMsg,
				},
			},
		},
	}
	err := op.Poll(context.Background(), nil)
	if got := grpc.Code(err); got != errCode {
		t.Errorf("error code, want %s, got %s", errCode, got)
	}
	if got := grpc.ErrorDesc(err); got != errMsg {
		t.Errorf("error code, want %s, got %s", errMsg, got)
	}
	if !op.Done() {
		t.Errorf("operation should have completed")
	}
}
示例#14
0
func respondWithGRPCError(w http.ResponseWriter, err error) {
	const fallback = `{"error": "failed to marshal error message"}`

	var (
		code   = grpc.Code(err)
		desc   = grpc.ErrorDesc(err)
		status = httpStatusFromCode(code)
		msg    struct {
			Error string `json:"error"`
		}
	)

	msg.Error = desc

	data, err := json.Marshal(&msg)
	if err != nil {
		log.WithError(err).Errorf("failed to marshal error message")
		status = http.StatusInternalServerError
		data = []byte(`{"error": "failed to marshal error message"}`)
		err = nil
	}

	w.Header().Set("Content-Type", "application/json; charset=utf-8")
	w.WriteHeader(status)
	w.Write(data)
}
示例#15
0
// FromGRPCError creates a regular error with the same type as the gRPC error
func FromGRPCError(err error) error {
	if err == nil {
		return nil
	}
	code := grpc.Code(err)
	desc := grpc.ErrorDesc(err)
	switch code {
	case codes.AlreadyExists:
		return NewErrAlreadyExists(strings.TrimSuffix(desc, " already exists"))
	case codes.Internal:
		return NewErrInternal(strings.TrimPrefix(desc, "Internal error: "))
	case codes.InvalidArgument:
		if split := strings.Split(desc, " not valid: "); len(split) == 2 {
			return NewErrInvalidArgument(split[0], split[1])
		}
		return NewErrInvalidArgument("Argument", desc)
	case codes.NotFound:
		return NewErrNotFound(strings.TrimSuffix(desc, " not found"))
	case codes.PermissionDenied:
		return NewErrPermissionDenied(strings.TrimPrefix(desc, "permission denied: "))
	case codes.Unknown: // This also includes all non-gRPC errors
		if desc == "EOF" {
			return io.EOF
		}
		return errs.New(desc)
	}
	return NewErrInternal(fmt.Sprintf("[%s] %s", code, desc))
}
示例#16
0
// ProxyStream performs a forward of a gRPC frontend stream to a backend.
func ProxyStream(director StreamDirector, logger grpclog.Logger, frontTrans transport.ServerTransport, frontStream *transport.Stream) {
	backendTrans, backendStream, err := backendTransportStream(director, frontStream.Context())
	if err != nil {
		frontTrans.WriteStatus(frontStream, grpc.Code(err), grpc.ErrorDesc(err))
		logger.Printf("proxy: Proxy.handleStream %v failed to allocate backend: %v", frontStream.Method(), err)
		return
	}
	defer backendTrans.CloseStream(backendStream, nil)

	// data coming from client call to backend
	ingressPathChan := forwardDataFrames(frontStream, backendStream, backendTrans)

	// custom header handling *must* be after some data is processed by the backend, otherwise there's a deadlock
	headerMd, err := backendStream.Header()
	if err == nil && len(headerMd) > 0 {
		frontTrans.WriteHeader(frontStream, headerMd)
	}
	// data coming from backend back to client call
	egressPathChan := forwardDataFrames(backendStream, frontStream, frontTrans)

	// wait for both data streams to complete.
	egressErr := <-egressPathChan
	ingressErr := <-ingressPathChan
	if egressErr != io.EOF || ingressErr != io.EOF {
		logger.Printf("proxy: Proxy.handleStream %v failure during transfer ingres: %v egress: %v", frontStream.Method(), ingressErr, egressErr)
		frontTrans.WriteStatus(frontStream, codes.Unavailable, fmt.Sprintf("problem in transfer ingress: %v egress: %v", ingressErr, egressErr))
		return
	}
	// handle trailing metadata
	trailingMd := backendStream.Trailer()
	if len(trailingMd) > 0 {
		frontStream.SetTrailer(trailingMd)
	}
	frontTrans.WriteStatus(frontStream, backendStream.StatusCode(), backendStream.StatusDesc())
}
示例#17
0
func waitForExit(c types.APIClient, events types.API_EventsClient, id, pid string, closer func()) {
	timestamp := time.Now()
	for {
		e, err := events.Recv()
		if err != nil {
			if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc {
				closer()
				os.Exit(128 + int(syscall.SIGHUP))
			}
			time.Sleep(1 * time.Second)
			tsp, err := ptypes.TimestampProto(timestamp)
			if err != nil {
				closer()
				fmt.Fprintf(os.Stderr, "%s", err.Error())
				os.Exit(1)
			}
			events, _ = c.Events(netcontext.Background(), &types.EventsRequest{Timestamp: tsp})
			continue
		}
		timestamp, err = ptypes.Timestamp(e.Timestamp)
		if e.Id == id && e.Type == "exit" && e.Pid == pid {
			closer()
			os.Exit(int(e.Status))
		}
	}
}
示例#18
0
func TestGRPCRequireLeader(t *testing.T) {
	defer testutil.AfterTest(t)

	cfg := ClusterConfig{Size: 3}
	clus := newClusterV3NoClients(t, &cfg)
	defer clus.Terminate(t)

	clus.Members[1].Stop(t)
	clus.Members[2].Stop(t)

	client, err := NewClientV3(clus.Members[0])
	if err != nil {
		t.Fatalf("cannot create client: %v", err)
	}
	defer client.Close()

	// wait for election timeout, then member[0] will not have a leader.
	time.Sleep(time.Duration(3*electionTicks) * tickDuration)

	md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
	ctx := metadata.NewContext(context.Background(), md)
	reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}
	if _, err := toGRPC(client).KV.Put(ctx, reqput); grpc.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() {
		t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader)
	}
}
示例#19
0
func unwrapError(err error) error {
	code := grpc.Code(err)
	errBody := grpc.ErrorDesc(err)
	switch code {
	case InternalServerError:
		return core.InternalServerError(errBody)
	case NotSupportedError:
		return core.NotSupportedError(errBody)
	case MalformedRequestError:
		return core.MalformedRequestError(errBody)
	case UnauthorizedError:
		return core.UnauthorizedError(errBody)
	case NotFoundError:
		return core.NotFoundError(errBody)
	case SignatureValidationError:
		return core.SignatureValidationError(errBody)
	case NoSuchRegistrationError:
		return core.NoSuchRegistrationError(errBody)
	case RateLimitedError:
		return core.RateLimitedError(errBody)
	case LengthRequiredError:
		return core.LengthRequiredError(errBody)
	case BadNonceError:
		return core.BadNonceError(errBody)
	default:
		return err
	}
}
示例#20
0
// DefaultHTTPError is the default implementation of HTTPError.
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body returned by this function is a JSON object,
// which contains a member whose key is "error" and whose value is err.Error().
func DefaultHTTPError(ctx context.Context, w http.ResponseWriter, _ *http.Request, err error) {
	const fallback = `{"error": "failed to marshal error message"}`

	w.Header().Del("Trailer")
	w.Header().Set("Content-Type", "application/json")
	body := errorBody{
		Error: grpc.ErrorDesc(err),
		Code:  int(grpc.Code(err)),
	}
	buf, merr := json.Marshal(body)
	if merr != nil {
		grpclog.Printf("Failed to marshal error message %q: %v", body, merr)
		w.WriteHeader(http.StatusInternalServerError)
		if _, err := io.WriteString(w, fallback); err != nil {
			grpclog.Printf("Failed to write response: %v", err)
		}
		return
	}

	md, ok := ServerMetadataFromContext(ctx)
	if !ok {
		grpclog.Printf("Failed to extract ServerMetadata from context")
	}

	handleForwardResponseServerMetadata(w, md)
	st := HTTPStatusFromCode(grpc.Code(err))
	w.WriteHeader(st)
	if _, err := w.Write(buf); err != nil {
		grpclog.Printf("Failed to write response: %v", err)
	}

	handleForwardResponseTrailer(w, md)
}
示例#21
0
func (s *ProxyHappySuite) TestDirectorErrorIsPropagated() {
	// See SetupSuite where the StreamDirector has a special case.
	ctx := metadata.NewContext(s.ctx, metadata.Pairs(rejectingMdKey, "true"))
	_, err := s.testClient.Ping(ctx, &pb.PingRequest{Value: "foo"})
	require.Error(s.T(), err, "Director should reject this RPC")
	assert.Equal(s.T(), codes.PermissionDenied, grpc.Code(err))
	assert.Equal(s.T(), "testing rejection", grpc.ErrorDesc(err))
}
示例#22
0
文件: cli-client.go 项目: otm/limes
func lookupCorrection(err error) string {
	switch grpc.Code(err) {
	case codes.FailedPrecondition:
		switch grpc.ErrorDesc(err) {
		case errMFANeeded.Error():
			return fmt.Sprintf("%v: run 'limes assume <profile>'\n", grpc.ErrorDesc(err))
		case errUnknownProfile.Error():
			return fmt.Sprintf("%v: run 'limes assume <profile>'\n", grpc.ErrorDesc(err))
		}
	case codes.Unknown:
		switch grpc.ErrorDesc(err) {
		case grpc.ErrClientConnClosing.Error(), grpc.ErrClientConnTimeout.Error():
			return fmt.Sprintf("service down: run 'limes start'\n")
		}
	}
	return fmt.Sprintf("%s\n", err)
}
示例#23
0
文件: helm.go 项目: runseb/helm
// prettyError unwraps or rewrites certain errors to make them more user-friendly.
func prettyError(err error) error {
	if err == nil {
		return nil
	}
	// This is ridiculous. Why is 'grpc.rpcError' not exported? The least they
	// could do is throw an interface on the lib that would let us get back
	// the desc. Instead, we have to pass ALL errors through this.
	return errors.New(grpc.ErrorDesc(err))
}
示例#24
0
func TestHeartbeatUnregistered(t *testing.T) {
	gd, err := startDispatcher(DefaultConfig())
	assert.NoError(t, err)
	defer gd.Close()
	resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{})
	assert.Nil(t, resp)
	assert.Error(t, err)
	assert.Equal(t, ErrSessionInvalid.Error(), grpc.ErrorDesc(err))
}
示例#25
0
func checkEnd(t *testing.T, d *gotData, e *expectedData) {
	var (
		ok bool
		st *stats.End
	)
	if st, ok = d.s.(*stats.End); !ok {
		t.Fatalf("got %T, want End", d.s)
	}
	if d.ctx == nil {
		t.Fatalf("d.ctx = nil, want <non-nil>")
	}
	if st.EndTime.IsZero() {
		t.Fatalf("st.EndTime = %v, want <non-zero>", st.EndTime)
	}
	if grpc.Code(st.Error) != grpc.Code(e.err) || grpc.ErrorDesc(st.Error) != grpc.ErrorDesc(e.err) {
		t.Fatalf("st.Error = %v, want %v", st.Error, e.err)
	}
}
示例#26
0
func (r *remote) handleEventStream(events containerd.API_EventsClient) {
	live := false
	for {
		e, err := events.Recv()
		if err != nil {
			if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc &&
				r.closeManually {
				// ignore error if grpc remote connection is closed manually
				return
			}
			logrus.Errorf("failed to receive event from containerd: %v", err)
			go r.startEventsMonitor()
			return
		}

		if live == false {
			logrus.Debugf("received past containerd event: %#v", e)

			// Pause/Resume events should never happens after exit one
			switch e.Type {
			case StateExit:
				r.pastEvents[e.Id] = e
			case StatePause:
				r.pastEvents[e.Id] = e
			case StateResume:
				r.pastEvents[e.Id] = e
			case stateLive:
				live = true
				r.updateEventTimestamp(time.Unix(int64(e.Timestamp), 0))
			}
		} else {
			logrus.Debugf("received containerd event: %#v", e)

			var container *container
			var c *client
			r.RLock()
			for _, c = range r.clients {
				container, err = c.getContainer(e.Id)
				if err == nil {
					break
				}
			}
			r.RUnlock()
			if container == nil {
				logrus.Errorf("no state for container: %q", err)
				continue
			}

			if err := container.handleEvent(e); err != nil {
				logrus.Errorf("error processing state change for %s: %v", e.Id, err)
			}

			r.updateEventTimestamp(time.Unix(int64(e.Timestamp), 0))
		}
	}
}
示例#27
0
func (cs *ContainerdSuite) TestStartBusyboxNoSuchFile(t *check.C) {
	expectedOutput := `oci runtime error: exec: "NoSuchFile": executable file not found in $PATH`

	if err := CreateBusyboxBundle("busybox-no-such-file", []string{"NoSuchFile"}); err != nil {
		t.Fatal(err)
	}

	_, err := cs.RunContainer("NoSuchFile", "busybox-no-such-file")
	t.Assert(grpc.ErrorDesc(err), checker.Contains, expectedOutput)
}
示例#28
0
文件: client.go 项目: XuHuaiyu/tidb
// isHaltErr returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting.
func isHaltErr(ctx context.Context, err error) bool {
	if ctx != nil && ctx.Err() != nil {
		return true
	}
	if err == nil {
		return false
	}
	return strings.HasPrefix(grpc.ErrorDesc(err), "etcdserver: ") ||
		strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error())
}
示例#29
0
文件: stresser.go 项目: luxas/flannel
func (s *stresser) Stress() error {
	conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
	if err != nil {
		return fmt.Errorf("%v (%s)", err, s.Endpoint)
	}
	defer conn.Close()
	ctx, cancel := context.WithCancel(context.Background())

	wg := &sync.WaitGroup{}
	wg.Add(s.N)

	s.mu.Lock()
	s.conn = conn
	s.cancel = cancel
	s.wg = wg
	s.mu.Unlock()

	kvc := pb.NewKVClient(conn)

	for i := 0; i < s.N; i++ {
		go func(i int) {
			defer wg.Done()
			for {
				// TODO: 10-second is enough timeout to cover leader failure
				// and immediate leader election. Find out what other cases this
				// could be timed out.
				putctx, putcancel := context.WithTimeout(ctx, 10*time.Second)
				_, err := kvc.Put(putctx, &pb.PutRequest{
					Key:   []byte(fmt.Sprintf("foo%d", rand.Intn(s.KeySuffixRange))),
					Value: []byte(randStr(s.KeySize)),
				})
				putcancel()
				if err != nil {
					if grpc.ErrorDesc(err) == context.DeadlineExceeded.Error() {
						// This retries when request is triggered at the same time as
						// leader failure. When we terminate the leader, the request to
						// that leader cannot be processed, and times out. Also requests
						// to followers cannot be forwarded to the old leader, so timing out
						// as well. We want to keep stressing until the cluster elects a
						// new leader and start processing requests again.
						continue
					}
					return
				}
				s.mu.Lock()
				s.success++
				s.mu.Unlock()
			}
		}(i)
	}

	<-ctx.Done()
	return nil
}
示例#30
0
// handleRPCError Handle an RPC error
func (trans *GrpcTransport) handleRPCError(peerURL string, err error) {
	if (err == gpt.ErrConnClosing) || (grpc.ErrorDesc(err) == grpc.ErrClientConnClosing.Error()) {
		log.Infof("Node %s disconnected. Err: %v", peerURL, err)

		// remove the Peer
		trans.RemovePeer(peerURL)

		// make callbacks
		trans.handler.NodeError(peerURL)
	}
}