func TestClientStreamRedirect(t *testing.T) { l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) addr := l.Addr().String() conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second)) require.NoError(t, err) defer conn.Close() cluster := &mockCluster{addr: addr} cs := raftpicker.NewConnSelector(cluster, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second)) forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil } api := NewRaftProxyRouteGuideServer(testRouteGuide{}, cs, cluster, forwardAsOwnRequest) srv := grpc.NewServer() RegisterRouteGuideServer(srv, api) go srv.Serve(l) defer srv.Stop() client := NewRouteGuideClient(conn) stream, err := client.RecordRoute(context.Background()) // err not nil is only on network issues assert.Nil(t, err) // any Send will be ok, I don't know why assert.Nil(t, stream.Send(&Point{})) _, err = stream.CloseAndRecv() assert.NotNil(t, err) assert.Equal(t, codes.ResourceExhausted, grpc.Code(err)) }
// This is just a test that shows how to instantiate a gossip component func TestNewGossipCryptoService(t *testing.T) { s1 := grpc.NewServer() s2 := grpc.NewServer() s3 := grpc.NewServer() ll1, _ := net.Listen("tcp", fmt.Sprintf("%s:%d", "", 5611)) ll2, _ := net.Listen("tcp", fmt.Sprintf("%s:%d", "", 5612)) ll3, _ := net.Listen("tcp", fmt.Sprintf("%s:%d", "", 5613)) endpoint1 := "localhost:5611" endpoint2 := "localhost:5612" endpoint3 := "localhost:5613" g1 := NewGossipComponent(endpoint1, s1, []grpc.DialOption{grpc.WithInsecure()}) g2 := NewGossipComponent(endpoint2, s2, []grpc.DialOption{grpc.WithInsecure()}, endpoint1) g3 := NewGossipComponent(endpoint3, s3, []grpc.DialOption{grpc.WithInsecure()}, endpoint1) go s1.Serve(ll1) go s2.Serve(ll2) go s3.Serve(ll3) time.Sleep(time.Second * 5) fmt.Println(g1.GetPeers()) fmt.Println(g2.GetPeers()) fmt.Println(g3.GetPeers()) time.Sleep(time.Second) }
func newCluster(tb testing.TB, discoveryClient discovery.Client, servers map[string]*grpc.Server) Cluster { sharder := route.NewSharder( testShardsPerServer*testNumServers, testNumReplicas, ) addresser := route.NewDiscoveryTestAddresser(discoveryClient, sharder, testNamespace()) cluster := cluster{ servers: make(map[string]server.APIServer), internalServers: make(map[string]server.InternalAPIServer), cancels: make(map[string]chan bool), cancel: make(chan bool), addresser: addresser, sharder: sharder, tb: tb, } for address, s := range servers { apiServer := server.NewAPIServer( cluster.sharder, route.NewRouter( cluster.addresser, grpcutil.NewDialer( grpc.WithInsecure(), ), address, ), ) pfs.RegisterApiServer(s, apiServer) internalAPIServer := server.NewInternalAPIServer( cluster.sharder, route.NewRouter( cluster.addresser, grpcutil.NewDialer( grpc.WithInsecure(), ), address, ), getDriver(tb, address), ) pfs.RegisterInternalApiServer(s, internalAPIServer) cluster.addresses = append(cluster.addresses, address) cluster.servers[address] = apiServer cluster.internalServers[address] = internalAPIServer cluster.cancels[address] = make(chan bool) go func(address string) { require.Equal(tb, cluster.addresser.Register(cluster.cancels[address], address, address, cluster.internalServers[address]), route.ErrCancelled) }(address) } go func() { require.Equal(tb, cluster.addresser.AssignRoles(cluster.cancel), route.ErrCancelled) }() return &cluster }
func main() { flag.Parse() log.Infof("Simulating %v clients.", *count) for i := 0; i < *count; i++ { id := uuid.New() log.Infof("client %v with id %v", i, id) client, err := doorman.NewWithID(*addr, id, doorman.DialOpts(grpc.WithInsecure())) if err != nil { log.Exit(err) } defer client.Close() res, err := client.Resource(*resource, *initialCapacity) if err != nil { log.Exit(err) } go manipulateCapacity(res, *initialCapacity, id) conn, err := grpc.Dial(*target, grpc.WithInsecure()) if err != nil { log.Exitf("did not connect: %v", err) } defer conn.Close() c := pb.NewGreeterClient(conn) rl := ratelimiter.NewQPS(res) for i := 0; i < *workers; i++ { go func() { ctx := context.Background() for { if err := rl.Wait(ctx); err != nil { log.Exitf("rl.Wait: %v", err) } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) if _, err := c.SayHello(ctx, &pb.HelloRequest{Name: *resource}); err != nil { log.Error(err) } cancel() } }() } } http.Handle("/metrics", prometheus.Handler()) http.ListenAndServe(fmt.Sprintf(":%v", *port), nil) }
func (cli *grpcClient) OnStart() error { cli.QuitService.OnStart() RETRY_LOOP: for { conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) if err != nil { if cli.mustConnect { return err } else { log.Warn(Fmt("tmsp.grpcClient failed to connect to %v. Retrying...\n", cli.addr)) time.Sleep(time.Second * 3) continue RETRY_LOOP } } client := types.NewTMSPApplicationClient(conn) ENSURE_CONNECTED: for { _, err := client.Echo(context.Background(), &types.RequestEcho{"hello"}, grpc.FailFast(true)) if err == nil { break ENSURE_CONNECTED } time.Sleep(time.Second) } cli.client = client return nil } }
func init() { pr = func(string, string, bool, int) (string, bool, error) { return "passphrase", false, nil } keyStore := trustmanager.NewKeyMemoryStore(pr) cryptoService := cryptoservice.NewCryptoService("", keyStore) cryptoServices := signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService} void = &pb.Void{} fakeHealth := func() map[string]string { return health } //server setup kms := &api.KeyManagementServer{CryptoServices: cryptoServices, HealthChecker: fakeHealth} ss := &api.SignerServer{CryptoServices: cryptoServices, HealthChecker: fakeHealth} grpcServer = grpc.NewServer() pb.RegisterKeyManagementServer(grpcServer, kms) pb.RegisterSignerServer(grpcServer, ss) lis, err := net.Listen("tcp", "127.0.0.1:7899") if err != nil { log.Fatalf("failed to listen %v", err) } go grpcServer.Serve(lis) //client setup conn, err := grpc.Dial("127.0.0.1:7899", grpc.WithInsecure()) if err != nil { log.Fatalf("fail to dial: %v", err) } kmClient = pb.NewKeyManagementClient(conn) sClient = pb.NewSignerClient(conn) }
// ConfigureRemoteETCD will reconfigure etcd server on remote node to either // join or remove itself from an etcd cluster. func ConfigureRemoteETCD(remoteAddress string, args *EtcdConfigReq) (*PeerGenericResp, error) { rpcConn, e := grpc.Dial(remoteAddress, grpc.WithInsecure()) if e != nil { log.WithFields(log.Fields{ "error": e, "remote": remoteAddress, }).Error("failed to grpc.Dial remote") rsp := &PeerGenericResp{ OpRet: -1, OpError: e.Error(), } return rsp, e } defer rpcConn.Close() client := NewPeerServiceClient(rpcConn) rsp, e := client.ExportAndStoreETCDConfig(netctx.TODO(), args) if e != nil { log.WithFields(log.Fields{ "error": e, "rpc": "PeerService.ExportAndStoreETCDConfig", "remote": remoteAddress, }).Error("failed RPC call") rsp := &PeerGenericResp{ OpRet: -1, OpError: e.Error(), } return rsp, e } return rsp, nil }
//newEventsClientConnectionWithAddress Returns a new grpc.ClientConn to the configured local PEER. func newEventsClientConnectionWithAddress(peerAddress string) (*grpc.ClientConn, error) { var opts []grpc.DialOption if peer.TLSEnabled() { var sn string if viper.GetString("peer.tls.serverhostoverride") != "" { sn = viper.GetString("peer.tls.serverhostoverride") } var creds credentials.TransportAuthenticator if viper.GetString("peer.tls.cert.file") != "" { var err error creds, err = credentials.NewClientTLSFromFile(viper.GetString("peer.tls.cert.file"), sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) } opts = append(opts, grpc.WithTimeout(defaultTimeout)) opts = append(opts, grpc.WithBlock()) opts = append(opts, grpc.WithInsecure()) return grpc.Dial(peerAddress, opts...) }
func getClusterAPIClient(address string) (pfs.ClusterAPIClient, error) { clientConn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { return nil, err } return pfs.NewClusterAPIClient(clientConn), nil }
func TestServerStreamRedirect(t *testing.T) { l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) addr := l.Addr().String() conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second)) require.NoError(t, err) defer conn.Close() cluster := &mockCluster{conn: conn} forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil } api := NewRaftProxyRouteGuideServer(testRouteGuide{}, cluster, forwardAsOwnRequest) srv := grpc.NewServer() RegisterRouteGuideServer(srv, api) go srv.Serve(l) defer srv.Stop() client := NewRouteGuideClient(conn) stream, err := client.ListFeatures(context.Background(), &Rectangle{}) // err not nil is only on network issues assert.Nil(t, err) _, err = stream.Recv() assert.NotNil(t, err) assert.Equal(t, codes.ResourceExhausted, grpc.Code(err)) }
// RegisterGRPCGateway starts the gateway (i.e. reverse proxy) that proxies // HTTP requests to the appropriate gRPC endpoints. func (s *adminServer) RegisterGRPCGateway(serverCtx *Context) error { // Setup HTTP<->gRPC handlers. var opts []grpc.DialOption if serverCtx.Insecure { opts = append(opts, grpc.WithInsecure()) } else { tlsConfig, err := serverCtx.GetClientTLSConfig() if err != nil { return err } opts = append( opts, // TODO(tamird): remove this timeout. It is currently necessary because // GRPC will not actually bail on a bad certificate error - it will just // retry indefinitely. See https://github.com/grpc/grpc-go/issues/622. grpc.WithTimeout(time.Second), grpc.WithBlock(), grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), ) } if err := RegisterAdminHandlerFromEndpoint(s.gwCtx, s.gwMux, serverCtx.Addr, opts); err != nil { return util.Errorf("error constructing grpc-gateway: %s. are your certificates valid?", err) } // Pass all requests for gRPC-based API endpoints to the gateway mux. s.ServeMux.Handle(apiEndpoint, s.gwMux) return nil }
func main() { // Set up a connection to the server. // 1. 创建一个Connection conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() // 2. 创建一个Client c := pb.NewGreeterClient(conn) // Contact the server and print out its response. name := defaultName if len(os.Args) > 1 { name = os.Args[1] } // 输入: Name // 返回: Hello + Name r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name}) if err != nil { log.Fatalf("could not greet: %v", err) } log.Printf("Greeting: %s", r.Message) }
func main() { if len(os.Args) != 2 { fmt.Println("Usage: %s <mountpath>") os.Exit(1) } root := os.Args[1] dialOpts := []grpc.DialOption{grpc.WithInsecure()} conn, err := grpc.Dial("127.0.0.1:50000", dialOpts...) if err != nil { log.Fatal(err) } cli := pb.NewPathFSClient(conn) fs := grpcfs.New(cli) nfs := pathfs.NewPathNodeFs(fs, nil) server, _, err := nodefs.MountRoot(root, nfs.Root(), nil) if err != nil { log.Fatal(err) } go server.Serve() log.Printf("Fs mounted to root %s", root) sigCh := make(chan os.Signal) signal.Notify(sigCh, os.Interrupt) for range sigCh { server.Unmount() os.Exit(0) } }
func TestReflectionEnd2end(t *testing.T) { // Start server. lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterSearchServiceServer(s, &server{}) // Register reflection service on s. Register(s) go s.Serve(lis) // Create client. conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { t.Fatalf("cannot connect to server: %v", err) } defer conn.Close() c := rpb.NewServerReflectionClient(conn) stream, err := c.ServerReflectionInfo(context.Background()) testFileByFilename(t, stream) testFileByFilenameError(t, stream) testFileContainingSymbol(t, stream) testFileContainingSymbolError(t, stream) testFileContainingExtension(t, stream) testFileContainingExtensionError(t, stream) testAllExtensionNumbersOfType(t, stream) testAllExtensionNumbersOfTypeError(t, stream) testListServices(t, stream) s.Stop() }
func (s *keyStresser) Stress() error { // TODO: add backoff option conn, err := grpc.Dial(s.Endpoint, grpc.WithInsecure()) if err != nil { return fmt.Errorf("%v (%s)", err, s.Endpoint) } ctx, cancel := context.WithCancel(context.Background()) s.wg.Add(s.N) s.conn = conn s.cancel = cancel kvc := pb.NewKVClient(conn) var stressEntries = []stressEntry{ {weight: 0.7, f: newStressPut(kvc, s.keySuffixRange, s.keySize)}, { weight: 0.7 * float32(s.keySize) / float32(s.keyLargeSize), f: newStressPut(kvc, s.keySuffixRange, s.keyLargeSize), }, {weight: 0.07, f: newStressRange(kvc, s.keySuffixRange)}, {weight: 0.07, f: newStressRangeInterval(kvc, s.keySuffixRange)}, {weight: 0.07, f: newStressDelete(kvc, s.keySuffixRange)}, {weight: 0.07, f: newStressDeleteInterval(kvc, s.keySuffixRange)}, } s.stressTable = createStressTable(stressEntries) for i := 0; i < s.N; i++ { go s.run(ctx) } plog.Infof("keyStresser %q is started", s.Endpoint) return nil }
func discovery() { var err error var opts []grpc.DialOption if insecureDiscovery { opts = append(opts, grpc.WithInsecure()) } else { auth := credentials.NewClientTLSFromCert(nil, getServerName(discoveryServiceURL)) opts = append(opts, grpc.WithTransportCredentials(auth)) } discoveryConn, err = grpc.Dial(discoveryServiceURL, opts...) if err != nil { logrus.Fatalf("grpc.go: error while connection to discovery service %v", err) } discoveryClient = pb.NewDiscoveryClient(discoveryConn) ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) res, err := discoveryClient.Get(ctx, &pb.DiscoveryRequest{ Environment: discoveryEnv, SdkVersion: Version + "-otsimoctl", OsName: runtime.GOOS, }) if err != nil { logrus.Fatalf("grpc:go: failed to get discovery config err=%v", err) } discoveryServices = res }
func getDriveAPIClient(address string) (drive.APIClient, error) { clientConn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { return nil, err } return drive.NewAPIClient(clientConn), nil }
func newServer(p *newServerParams) *server { poolOptions := resource_pool.Options{} poolOptions.MaxActiveHandles = int32(p.propMaxActive) poolOptions.MaxIdleHandles = uint32(p.propMaxIdle) poolOptions.OpenMaxConcurrency = p.propMaxConcurrency poolOptions.Open = func(resourceLocation string) (interface{}, error) { con, err := grpc.Dial(resourceLocation, grpc.WithInsecure()) if err != nil { rus.Error(err) return nil, err } return con, nil } poolOptions.Close = func(handle interface{}) error { con, ok := handle.(*grpc.ClientConn) if !ok { err := fmt.Errorf("connection handle is %+v but expected %+v", handle, "*grpc.ClientConn") rus.Error(err) return err } err := con.Close() if err != nil { rus.Error(err) return err } return nil } pool := resource_pool.NewSimpleResourcePool(poolOptions) pool.Register(p.prop) s := &server{} s.p = p s.grpcPool = pool return s }
// dialSetupOpts gives the dial opts prior to any authentication func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) { if c.cfg.DialTimeout > 0 { opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} } opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) if proto == "" { return nil, fmt.Errorf("unknown scheme for %q", host) } select { case <-c.ctx.Done(): return nil, c.ctx.Err() default: } return net.DialTimeout(proto, host, t) } opts = append(opts, grpc.WithDialer(f)) creds := c.creds if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 { creds = c.processCreds(scheme) } if creds != nil { opts = append(opts, grpc.WithTransportCredentials(*creds)) } else { opts = append(opts, grpc.WithInsecure()) } return opts }
func newPeerClientConnection() (*grpc.ClientConn, error) { var opts []grpc.DialOption if viper.GetBool("peer.tls.enabled") { var sn string if viper.GetString("peer.tls.serverhostoverride") != "" { sn = viper.GetString("peer.tls.serverhostoverride") } var creds credentials.TransportAuthenticator if viper.GetString("peer.tls.cert.file") != "" { var err error creds, err = credentials.NewClientTLSFromFile(viper.GetString("peer.tls.cert.file"), sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) } opts = append(opts, grpc.WithTimeout(1*time.Second)) opts = append(opts, grpc.WithBlock()) opts = append(opts, grpc.WithInsecure()) conn, err := grpc.Dial(getPeerAddress(), opts...) if err != nil { return nil, err } return conn, err }
func main() { flag.Parse() args := flag.Args() var values []int32 for _, arg := range args[1:] { value, _ := strconv.Atoi(arg) values = append(values, int32(value)) } arg, _ := strconv.Atoi(args[0]) testType := TestType(arg) // Accumulate gRPC options var opts []grpc.DialOption // Connect via insecure opts = append(opts, grpc.WithInsecure()) // Dial the server conn, err := grpc.Dial("127.0.0.1:1234", opts...) if err != nil { log.Fatalln("fail to dial:", err) } defer conn.Close() // Get a client using the connection client := NewBetterTestClient(conn) resp, err := client.Add(context.Background(), &BetterNumericRequest{Type: testType, Values: values}) if err != nil { log.Fatalln("failed server call:", err) } log.Println("resp:", resp) }
func NewContext() *Context { log := log.New(os.Stderr, "QPM: ", log.LstdFlags) address := os.Getenv("SERVER") if address == "" { address = Address } noTls := os.Getenv("NO_TLS") == "1" var tlsOption grpc.DialOption if noTls { tlsOption = grpc.WithInsecure() } else { tlsOption = grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")) } conn, err := grpc.Dial(address, tlsOption, grpc.WithUserAgent(UA)) if err != nil { log.Fatalf("did not connect: %v", err) } return &Context{ Log: log, Client: msg.NewQpmClient(conn), } }
// ValidateDeletePeer is the validation function for DeletePeer to invoke the rpc // server call func ValidateDeletePeer(remoteAddress string, id string) (*PeerGenericResp, error) { args := &PeerDeleteReq{ID: id} rpcConn, e := grpc.Dial(remoteAddress, grpc.WithInsecure()) if e != nil { log.WithFields(log.Fields{ "error": e, "remote": remoteAddress, }).Error("failed to grpc.Dial remote") rsp := &PeerGenericResp{ OpRet: -1, OpError: e.Error(), } return rsp, e } defer rpcConn.Close() client := NewPeerServiceClient(rpcConn) rsp, e := client.ValidateDelete(netctx.TODO(), args) if e != nil { log.WithFields(log.Fields{ "error": e, "rpc": "PeerService.ValidateDelete", "remote": remoteAddress, }).Error("failed RPC call") rsp := &PeerGenericResp{ OpRet: -1, OpError: e.Error(), } return rsp, e } return rsp, nil }
// GetClient attempts to dial the specified address flag and returns a service // client and its underlying connection. If it is unable to make a connection, // it dies. func GetClient() (*grpc.ClientConn, pb.BookServiceClient) { conn, err := grpc.Dial(*address, grpc.WithTimeout(5*time.Second), grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } return conn, pb.NewBookServiceClient(conn) }
func main() { config := config.Load() var chainID string var serverAddr string var windowSize uint64 flag.StringVar(&serverAddr, "server", fmt.Sprintf("%s:%d", config.General.ListenAddress, config.General.ListenPort), "The RPC server to connect to.") flag.StringVar(&chainID, "chainID", provisional.TestChainID, "The chain ID to deliver from.") flag.Uint64Var(&windowSize, "windowSize", 10, "The window size for the deliver.") flag.Parse() conn, err := grpc.Dial(serverAddr, grpc.WithInsecure()) if err != nil { fmt.Println("Error connecting:", err) return } client, err := ab.NewAtomicBroadcastClient(conn).Deliver(context.TODO()) if err != nil { fmt.Println("Error connecting:", err) return } s := newDeliverClient(client, chainID, windowSize) s.seekOldest() s.readUntilClose() }
func main() { go startDebug() addr := os.Args[1] conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { log.Fatal(err) } defer conn.Close() log.Print("connected") cl := test.NewFooClient(conn) barClient, err := cl.Bar(context.Background()) if err != nil { log.Fatalf("calling Bar: %v", err) } // Comment this out and watch it periodically fail. if err := barClient.Send(&test.Outgoing{}); err != nil { log.Fatalf("sending: %v", err) } _, err = barClient.Recv() if err != nil { log.Fatalf("receiving: %v", err) } }
func TestReplicationQuery(t *testing.T) { t.Parallel() backend := crdtBackend{} request := crdtRequest{} testServers := make([]*ReplicationTestServer, 0, NoOfPeers) for i := 0; i < NoOfPeers; i++ { srv, err := NewTestReplicationServer() if err != nil { t.Fatalf("cannot create test server: %s", err) } rsrv := NewReplicationServer(strconv.Itoa(i+1), 3*time.Second) rsrv.Backend = backend srv.srv.RegisterService(&_ReplicationTransport_serviceDesc, rsrv) go func() { if err := srv.srv.Serve(srv.listener); err != nil { t.Errorf("cannot start server:%d %q", i+1, err) } }() testServers = append(testServers, srv) } defer func() { for _, srv := range testServers { srv.srv.Stop() } }() clients := make([]*ReplicationClient, 0, len(testServers)) for i, srv := range testServers { conn, err := grpc.Dial(srv.listener.Addr().String(), grpc.WithInsecure()) if err != nil { t.Fatalf("cannot create client[%d]: %s", i, err) } // These defers will not be called until the entire test // completes running, which is exactly what we want. defer conn.Close() clients = append(clients, NewReplicationClient(conn, strconv.Itoa(i+1))) } for _, client := range clients { dChan, eChan := client.Query(request) for d := range dChan { t.Logf("retrieved: %q", d) } for e := range eChan { if e != nil { t.Errorf("error occurred while querying: %s", e) } } } }
func getVersionAPIClient(address string) (protoversion.APIClient, error) { clientConn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { return nil, err } return protoversion.NewAPIClient(clientConn), nil }
// GetBroadcastClient creates a simple instance of the BroadcastClient interface func GetBroadcastClient() (BroadcastClient, error) { var orderer string if viper.GetBool("peer.committer.enabled") { orderer = viper.GetString("peer.committer.ledger.orderer") } if orderer == "" { return nil, fmt.Errorf("Can't get orderer address") } var opts []grpc.DialOption opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithTimeout(3*time.Second)) opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(orderer, opts...) if err != nil { return nil, fmt.Errorf("Error connecting to %s due to %s", orderer, err) } client, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.TODO()) if err != nil { conn.Close() return nil, fmt.Errorf("Error connecting to %s due to %s", orderer, err) } return &broadcastClient{conn: conn, client: client}, nil }
func ExampleNewWriter(serverPort int, resourceName string) { ctx := context.Background() conn, err := grpc.Dial(fmt.Sprintf("localhost:%d", serverPort), grpc.WithInsecure()) if err != nil { log.Printf("grpc.Dial: %v", err) return } client := NewClient(conn) w, err := client.NewWriter(ctx, resourceName) if err != nil { log.Printf("NewWriter: %v", err) return } defer func() { err := w.Close() if err != nil { log.Printf("Close: %v", err) } }() buf := []byte("hello world") n, err := w.Write(buf) if err != nil { log.Printf("Write: %v", err) } log.Printf("Wrote %d bytes", n) }