Пример #1
0
func cleanup(ctx *testutils.RktRunCtx, svc *gexpect.ExpectSubprocess, conn *grpc.ClientConn, imagePath string) {
	t := new(testing.T) // Print no messages.
	os.Remove(imagePath)
	conn.Close()
	stopAPIService(t, svc)
	ctx.Cleanup()
}
Пример #2
0
// ExecuteFetchAsApp is part of the tmclient.TabletManagerClient interface.
func (client *Client) ExecuteFetchAsApp(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, query []byte, maxRows int) (*querypb.QueryResult, error) {
	var c tabletmanagerservicepb.TabletManagerClient
	var err error
	if usePool {
		c, err = client.dialPool(tablet)
		if err != nil {
			return nil, err
		}
	} else {
		var cc *grpc.ClientConn
		cc, c, err = client.dial(tablet)
		if err != nil {
			return nil, err
		}
		defer cc.Close()
	}

	response, err := c.ExecuteFetchAsApp(ctx, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{
		Query:   query,
		MaxRows: uint64(maxRows),
	})
	if err != nil {
		return nil, err
	}
	return response.Result, nil
}
Пример #3
0
func benchmarkEchoGRPC(b *testing.B, size int) {
	var conn *grpc.ClientConn
	var client EchoClient
	benchmarkEcho(b, size, listenAndServeGRPC,
		func(addr net.Addr) {
			var err error
			conn, err = grpc.Dial(addr.String(), grpc.WithTransportCredentials(credentials.NewTLS(clientTLSConfig)))
			if err != nil {
				b.Fatal(err)
			}
			client = NewEchoClient(conn)
		},
		func() {
			if err := conn.Close(); err != nil {
				b.Fatal(err)
			}
		},
		func(echoMsg string) string {
			resp, err := client.Echo(context.Background(), &EchoRequest{Msg: echoMsg})
			if err != nil {
				b.Fatal(err)
			}
			return resp.Msg
		},
	)
}
Пример #4
0
func (c *cluster) compactKV(rev int64) error {
	var (
		conn *grpc.ClientConn
		err  error
	)

	if rev <= 0 {
		return nil
	}

	for i, u := range c.GRPCURLs {
		conn, err = grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if err != nil {
			continue
		}
		kvc := pb.NewKVClient(conn)
		ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
		_, err = kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev, Physical: true})
		cancel()
		conn.Close()
		if err != nil {
			if strings.Contains(err.Error(), "required revision has been compacted") && i > 0 {
				plog.Printf("%s is already compacted with %d (%v)", u, rev, err)
				err = nil // in case compact was requested more than once
			}
		}
	}
	return err
}
Пример #5
0
// ExecuteFetchAsDba is part of the tmclient.TabletManagerClient interface.
func (client *Client) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, query []byte, maxRows int, disableBinlogs, reloadSchema bool) (*querypb.QueryResult, error) {
	var c tabletmanagerservicepb.TabletManagerClient
	var err error
	if usePool {
		c, err = client.dialPool(tablet)
		if err != nil {
			return nil, err
		}
	} else {
		var cc *grpc.ClientConn
		cc, c, err = client.dial(tablet)
		if err != nil {
			return nil, err
		}
		defer cc.Close()
	}

	response, err := c.ExecuteFetchAsDba(ctx, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{
		Query:          query,
		DbName:         topoproto.TabletDbName(tablet),
		MaxRows:        uint64(maxRows),
		DisableBinlogs: disableBinlogs,
		ReloadSchema:   reloadSchema,
	})
	if err != nil {
		return nil, err
	}
	return response.Result, nil
}
Пример #6
0
// Put returns a connection to the pool or closes and discards the connection
// incase the pool channel is at capacity.
func (p *pool) Put(conn *grpc.ClientConn) error {
	select {
	case p.conns <- conn:
		return nil
	default:
		return conn.Close()
	}
}
Пример #7
0
func (ctx *Context) removeConn(key string, conn *grpc.ClientConn) {
	if err := conn.Close(); err != nil && !grpcutil.IsClosedConnection(err) {
		if log.V(1) {
			log.Errorf("failed to close client connection: %s", err)
		}
	}
	delete(ctx.conns.cache, key)
}
Пример #8
0
func (c *commImpl) createConnection(endpoint string, expectedPKIID common.PKIidType) (*connection, error) {
	var err error
	var cc *grpc.ClientConn
	var stream proto.Gossip_GossipStreamClient
	var pkiID common.PKIidType

	c.logger.Debug("Entering", endpoint, expectedPKIID)
	defer c.logger.Debug("Exiting")

	if c.isStopping() {
		return nil, fmt.Errorf("Stopping")
	}
	cc, err = grpc.Dial(endpoint, append(c.opts, grpc.WithBlock())...)
	if err != nil {
		return nil, err
	}

	cl := proto.NewGossipClient(cc)

	if _, err = cl.Ping(context.Background(), &proto.Empty{}); err != nil {
		cc.Close()
		return nil, err
	}

	if stream, err = cl.GossipStream(context.Background()); err == nil {
		pkiID, err = c.authenticateRemotePeer(stream)
		if err == nil {
			if expectedPKIID != nil && !bytes.Equal(pkiID, expectedPKIID) {
				// PKIID is nil when we don't know the remote PKI id's
				c.logger.Warning("Remote endpoint claims to be a different peer, expected", expectedPKIID, "but got", pkiID)
				return nil, fmt.Errorf("Authentication failure")
			}
			conn := newConnection(cl, cc, stream, nil)
			conn.pkiID = pkiID
			conn.logger = c.logger

			h := func(m *proto.GossipMessage) {
				c.logger.Debug("Got message:", m)
				c.msgPublisher.DeMultiplex(&ReceivedMessageImpl{
					conn:          conn,
					lock:          conn,
					GossipMessage: m,
				})
			}
			conn.handler = h
			return conn, nil
		}
	}
	cc.Close()
	return nil, err
}
Пример #9
0
func TestConnection_Correct(t *testing.T) {
	config.SetupTestConfig("./../../peer")
	viper.Set("ledger.blockchain.deploy-system-chaincode", "false")
	var tmpConn *grpc.ClientConn
	var err error
	if TLSEnabled() {
		tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, true, InitTLSForPeer())
	}
	tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, false, nil)
	if err != nil {
		t.Fatalf("error connection to server at host:port = %s\n", viper.GetString("peer.address"))
	}

	tmpConn.Close()
}
Пример #10
0
func TestConnection_WrongAddress(t *testing.T) {
	config.SetupTestConfig("./../../peer")
	viper.Set("ledger.blockchain.deploy-system-chaincode", "false")
	viper.Set("peer.address", "0.0.0.0:30304")
	var tmpConn *grpc.ClientConn
	var err error
	if TLSEnabled() {
		tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, true, InitTLSForPeer())
	}
	tmpConn, err = NewClientConnectionWithAddress(viper.GetString("peer.address"), true, false, nil)
	if err == nil {
		fmt.Printf("error connection to server -  at host:port = %s\n", viper.GetString("peer.address"))
		t.Error("error connection to server - connection should fail")
		tmpConn.Close()
	}
}
Пример #11
0
func (c *cluster) compactKV(rev int64) error {
	var (
		conn *grpc.ClientConn
		err  error
	)
	for _, u := range c.GRPCURLs {
		conn, err = grpc.Dial(u, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second))
		if err != nil {
			continue
		}
		kvc := pb.NewKVClient(conn)
		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
		_, err = kvc.Compact(ctx, &pb.CompactionRequest{Revision: rev})
		cancel()
		conn.Close()
		if err == nil {
			return nil
		}
	}
	return err
}
Пример #12
0
// retryConnection establishes a new connection
func (c *Client) retryConnection(oldConn *grpc.ClientConn, err error) (*grpc.ClientConn, error) {
	c.mu.Lock()
	defer c.mu.Unlock()
	if err != nil {
		c.errors = append(c.errors, err)
	}
	if c.cancel == nil {
		return nil, c.ctx.Err()
	}
	if oldConn != c.conn {
		// conn has already been updated
		return c.conn, nil
	}

	oldConn.Close()
	if st, _ := oldConn.State(); st != grpc.Shutdown {
		// wait for shutdown so grpc doesn't leak sleeping goroutines
		oldConn.WaitForStateChange(c.ctx, st)
	}

	conn, dialErr := c.cfg.RetryDialer(c)
	if dialErr != nil {
		c.errors = append(c.errors, dialErr)
		return nil, dialErr
	}
	c.conn = conn
	return c.conn, nil
}
Пример #13
0
func (b *BDDContext) invokeOnWithTimeout(composeServices []string, timeoutInSecs string, callBack func(context.Context, pb.EndorserClient) (proposalResponse *pb.ProposalResponse, err error)) (map[string]*pb.ProposalResponse, error) {
	var err error
	resultsMap := make(map[string]*pb.ProposalResponse)
	errRetFunc := func() error {
		return fmt.Errorf("Error when invoking endorser(s) on '%s':  %s", composeServices, err)
	}
	var (
		durationToWait time.Duration
		ctx            context.Context
		cancel         context.CancelFunc
	)
	if durationToWait, err = time.ParseDuration(fmt.Sprintf("%ss", timeoutInSecs)); err != nil {
		return nil, errRetFunc()
	}
	ctx, cancel = context.WithTimeout(context.Background(), durationToWait)
	defer cancel()
	cancel()
	for _, cs := range composeServices {
		go func(composeService string) {
			var proposalResponse *pb.ProposalResponse
			// Now use the endorser client to create the send the proposal
			println("Calling endorser for compose service:", composeService)
			var grpcClient *grpc.ClientConn
			if grpcClient, err = NewGrpcClient("172.17.0.4:7051"); err != nil {
				return
			}
			defer grpcClient.Close()
			endorserClient := pb.NewEndorserClient(grpcClient)
			if proposalResponse, err = callBack(ctx, endorserClient); err != nil {
				return
			}
			resultsMap[composeService] = proposalResponse
		}(cs)
	}
	return resultsMap, err
}
Пример #14
0
func (b *BDDContext) userSendsProposalToEndorsersWithTimeoutOfSeconds(enrollID, proposalAlias, timeoutInSecs string, endorsersTable *gherkin.DataTable) (err error) {
	var proposal *pb.Proposal
	var keyedProposalResponsesMap KeyedProposalResponseMap
	keyedProposalResponsesMap = make(KeyedProposalResponseMap)
	errRetFunc := func() error {
		return fmt.Errorf("Error sending proposal '%s' for user '%s':  %s", proposalAlias, enrollID, err)
	}
	var userRegistration *UserRegistration
	if userRegistration, err = b.GetUserRegistration(enrollID); err != nil {
		return errRetFunc()
	}
	// Get the proposal from the user
	if proposal, err = userRegistration.GetProposal(proposalAlias); err != nil {
		return errRetFunc()
	}

	var ctx context.Context
	var cancel context.CancelFunc
	if ctx, cancel, err = getContextAndCancelForTimeoutInSecs(context.Background(), timeoutInSecs); err != nil {
		return errRetFunc()
	}
	defer cancel()
	// Loop through endorsers and send proposals
	var endorsers []string
	if endorsers, err = b.GetArgsForUser(endorsersTable.Rows[0].Cells, userRegistration); err != nil {
		return errRetFunc()
	}
	respQueue := make(chan *KeyedProposalResponse)
	for _, e := range endorsers {
		go func(endorser string) {
			var localErr error
			var proposalResponse *pb.ProposalResponse
			// Now use the endorser client to send the proposal
			var grpcClient *grpc.ClientConn
			if grpcClient, localErr = b.getGrpcClientForComposeService(endorser); localErr != nil {
				respQueue <- &KeyedProposalResponse{endorser, nil, fmt.Errorf("Error calling endorser '%s': %s", endorser, localErr)}
				return
			}
			defer grpcClient.Close()

			proposalBytes, err := utils.GetBytesProposal(proposal)
			if err != nil {
				respQueue <- &KeyedProposalResponse{endorser, nil, fmt.Errorf("Error serializing proposal bytes")}
				return
			}
			// FIXME: the endorser needs to be given a signed proposal - who should sign?
			signedProposal := &pb.SignedProposal{ProposalBytes: proposalBytes, Signature: []byte("signature")}

			endorserClient := pb.NewEndorserClient(grpcClient)
			if proposalResponse, localErr = endorserClient.ProcessProposal(ctx, signedProposal); localErr != nil {
				respQueue <- &KeyedProposalResponse{endorser, nil, fmt.Errorf("Error calling endorser '%s':  %s", endorser, localErr)}
				return
			}
			respQueue <- &KeyedProposalResponse{endorser, proposalResponse, nil}
		}(e)
	}
	go func() {
		for i := 0; i < len(endorsers); i++ {
			result := <-respQueue
			keyedProposalResponsesMap[result.endorser] = result
			if result.err != nil {
				// TODO: think about whether to break on first failure, or allow to collect
			}
		}
		cancel()
	}()
	<-ctx.Done()
	if ctx.Err() != context.Canceled {
		err = ctx.Err()
		return errRetFunc()
	}
	userRegistration.lastResult = keyedProposalResponsesMap
	return nil
}
Пример #15
0
func (b *balancer) Start(target string, config grpc.BalancerConfig) error {
	// TODO: Fall back to the basic direct connection if there is no name resolver.
	if b.r == nil {
		return errors.New("there is no name resolver installed")
	}
	b.mu.Lock()
	if b.done {
		b.mu.Unlock()
		return grpc.ErrClientConnClosing
	}
	b.addrCh = make(chan []grpc.Address)
	w, err := b.r.Resolve(target)
	if err != nil {
		b.mu.Unlock()
		return err
	}
	b.w = w
	b.mu.Unlock()
	balancerAddrCh := make(chan remoteBalancerInfo, 1)
	// Spawn a goroutine to monitor the name resolution of remote load balancer.
	go func() {
		for {
			if err := b.watchAddrUpdates(w, balancerAddrCh); err != nil {
				grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err)
				close(balancerAddrCh)
				return
			}
		}
	}()
	// Spawn a goroutine to talk to the remote load balancer.
	go func() {
		var cc *grpc.ClientConn
		for {
			rb, ok := <-balancerAddrCh
			if cc != nil {
				cc.Close()
			}
			if !ok {
				// b is closing.
				return
			}
			// Talk to the remote load balancer to get the server list.
			var err error
			creds := config.DialCreds
			if creds == nil {
				cc, err = grpc.Dial(rb.addr, grpc.WithInsecure())
			} else {
				if rb.name != "" {
					if err := creds.OverrideServerName(rb.name); err != nil {
						grpclog.Printf("Failed to override the server name in the credentials: %v", err)
						continue
					}
				}
				cc, err = grpc.Dial(rb.addr, grpc.WithTransportCredentials(creds))
			}
			if err != nil {
				grpclog.Printf("Failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
				return
			}
			b.mu.Lock()
			b.seq++ // tick when getting a new balancer address
			seq := b.seq
			b.next = 0
			b.mu.Unlock()
			go func(cc *grpc.ClientConn) {
				lbc := lbpb.NewLoadBalancerClient(cc)
				for {
					if retry := b.callRemoteBalancer(lbc, seq); !retry {
						cc.Close()
						return
					}
				}
			}(cc)
		}
	}()
	return nil
}
	var connectoToStream = func() (*grpc.ClientConn, plumbing.Doppler_StreamClient) {
		out, err := grpc.Dial(localIPAddress+":5678", grpc.WithInsecure())
		Expect(err).ToNot(HaveOccurred())
		client := plumbing.NewDopplerClient(out)
		ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
		stream, err := client.Stream(ctx, &plumbing.StreamRequest{})
		Expect(err).ToNot(HaveOccurred())

		return out, stream
	}

	Context("with a stream connection established", func() {
		var (
			in     net.Conn
			out    *grpc.ClientConn
			stream plumbing.Doppler_StreamClient
		)

		BeforeEach(func() {
			in = connectToDoppler()
			out, stream = connectoToStream()

			primePump(in)
			waitForPrimer(stream)
		})

		AfterEach(func() {
			in.Close()
			out.Close()
		})
Пример #17
0
// RunStepOn will run the step on the specified node
func RunStepOn(step string, node uuid.UUID, c TxnCtx) (TxnCtx, error) {
	// TODO: I'm creating connections on demand. This should be changed so that
	// we have long term connections.
	p, err := peer.GetPeerF(node.String())
	if err != nil {
		c.Logger().WithFields(log.Fields{
			"peerid": node.String(),
			"error":  err,
		}).Error("peer not found")
		return nil, err
	}

	logger := c.Logger().WithField("remotepeer", p.ID.String()+"("+p.Name+")")

	var conn *grpc.ClientConn

	remote, err := utils.FormRemotePeerAddress(p.Addresses[0])
	if err != nil {
		return nil, err
	}

	conn, err = grpc.Dial(remote, grpc.WithInsecure())
	if err == nil && conn != nil {
		logger.WithFields(log.Fields{
			"remote": remote,
		}).Debug("connected to remote")
	}

	if conn == nil {
		logger.WithFields(log.Fields{
			"error":  err,
			"remote": p.Addresses,
		}).Error("failed to grpc.Dial remote")
		return nil, err
	}
	defer conn.Close()

	client := NewTxnSvcClient(conn)

	req := &TxnStepReq{
		StepFunc: step,
	}
	data, err := json.Marshal(c)
	if err != nil {
		logger.WithError(err).Error("failed to JSON marshal transaction context")
		return nil, err
	}
	req.Context = data

	var rsp *TxnStepResp

	rsp, err = client.RunStep(netctx.TODO(), req)
	if err != nil {
		logger.WithFields(log.Fields{
			"error": err,
			"rpc":   "TxnSvc.RunStep",
		}).Error("failed RPC call")
		return nil, err
	}

	if rsp.Error != "" {
		logger.WithError(errors.New(rsp.Error)).Error("TxnSvc.Runstep failed on peer")
		return nil, errors.New(rsp.Error)
	}

	rspCtx := new(Tctx)
	err = json.Unmarshal(rsp.Resp, rspCtx)
	if err != nil {
		logger.WithError(err).Error("failed to JSON unmarhsal transaction context")
	}

	return rspCtx, err
}
Пример #18
0
func tearDownClient(conn *grpc.ClientConn) {
	_ = conn.Close()
	Stop()
}
Пример #19
0
func tearDown(s *grpc.Server, cc *grpc.ClientConn) {
	cc.Close()
	s.Stop()
}
Пример #20
0
func tearDownClient(conn *grpc.ClientConn) {
	_ = conn.Close()
	Stop()
	testutils.TearDownTests()
}