Пример #1
0
func (b *balancer) callRemoteBalancer(lbc lbpb.LoadBalancerClient, seq int) (retry bool) {
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()
	stream, err := lbc.BalanceLoad(ctx, grpc.FailFast(false))
	if err != nil {
		grpclog.Printf("Failed to perform RPC to the remote balancer %v", err)
		return
	}
	b.mu.Lock()
	if b.done {
		b.mu.Unlock()
		return
	}
	b.mu.Unlock()
	initReq := &lbpb.LoadBalanceRequest{
		LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
			InitialRequest: new(lbpb.InitialLoadBalanceRequest),
		},
	}
	if err := stream.Send(initReq); err != nil {
		// TODO: backoff on retry?
		return true
	}
	reply, err := stream.Recv()
	if err != nil {
		// TODO: backoff on retry?
		return true
	}
	initResp := reply.GetInitialResponse()
	if initResp == nil {
		grpclog.Println("Failed to receive the initial response from the remote balancer.")
		return
	}
	// TODO: Support delegation.
	if initResp.LoadBalancerDelegate != "" {
		// delegation
		grpclog.Println("TODO: Delegation is not supported yet.")
		return
	}
	// Retrieve the server list.
	for {
		reply, err := stream.Recv()
		if err != nil {
			break
		}
		b.mu.Lock()
		if b.done || seq < b.seq {
			b.mu.Unlock()
			return
		}
		b.seq++ // tick when receiving a new list of servers.
		seq = b.seq
		b.mu.Unlock()
		if serverList := reply.GetServerList(); serverList != nil {
			b.processServerList(serverList, seq)
		}
	}
	return true
}
Пример #2
0
// Serve accepts incoming connections on the listener lis, creating a new
// ServerTransport and service goroutine for each. The service goroutines
// read gRPC request and then call the registered handlers to reply to them.
// Service returns when lis.Accept fails.
func (s *Server) Serve(lis net.Listener) error {
	s.mu.Lock()
	if s.lis == nil {
		s.mu.Unlock()
		return ErrServerStopped
	}
	s.lis[lis] = true
	s.mu.Unlock()
	defer func() {
		lis.Close()
		s.mu.Lock()
		delete(s.lis, lis)
		s.mu.Unlock()
	}()
	for {
		c, err := lis.Accept()
		if err != nil {
			return err
		}
		var authInfo credentials.AuthInfo
		if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok {
			c, authInfo, err = creds.ServerHandshake(c)
			if err != nil {
				grpclog.Println("grpc: Server.Serve failed to complete security handshake.")
				continue
			}
		}
		s.mu.Lock()
		if s.conns == nil {
			s.mu.Unlock()
			c.Close()
			return nil
		}
		st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
		if err != nil {
			s.mu.Unlock()
			c.Close()
			grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
			continue
		}
		s.conns[st] = true
		s.mu.Unlock()

		go func() {
			st.HandleStreams(func(stream *transport.Stream) {
				s.handleStream(st, stream)
			})
			s.mu.Lock()
			delete(s.conns, st)
			s.mu.Unlock()
		}()
	}
}
Пример #3
0
func main() {
	grpclog.Println("start server...")
	flag.Parse()
	lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))
	if err != nil {
		grpclog.Fatal("failed to listen: %v", err)
	}
	grpcServer := grpc.NewServer()
	pb.RegisterTalkMessageServer(grpcServer, newServer())
	grpcServer.Serve(lis)
	grpclog.Println("server shutdown...")

}
Пример #4
0
func (rr *roundRobin) watchAddrUpdates() error {
	updates, err := rr.w.Next()
	if err != nil {
		grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err)
		return err
	}
	rr.mu.Lock()
	defer rr.mu.Unlock()
	for _, update := range updates {
		addr := Address{
			Addr:     update.Addr,
			Metadata: update.Metadata,
		}
		switch update.Op {
		case naming.Add:
			var exist bool
			for _, v := range rr.addrs {
				if addr == v.addr {
					exist = true
					grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr)
					break
				}
			}
			if exist {
				continue
			}
			rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
		case naming.Delete:
			for i, v := range rr.addrs {
				if addr == v.addr {
					copy(rr.addrs[i:], rr.addrs[i+1:])
					rr.addrs = rr.addrs[:len(rr.addrs)-1]
					break
				}
			}
		default:
			grpclog.Println("Unknown update.Op ", update.Op)
		}
	}
	// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
	open := make([]Address, len(rr.addrs))
	for i, v := range rr.addrs {
		open[i] = v.addr
	}
	if rr.done {
		return ErrClientConnClosing
	}
	rr.addrCh <- open
	return nil
}
Пример #5
0
// Start starts the stats collection and reporting if there is a registered stats handle.
func Start() {
	if handler == nil {
		grpclog.Println("handler is nil when starting stats. Stats is not started")
		return
	}
	atomic.StoreInt32(on, 1)
}
Пример #6
0
Файл: main.go Проект: polvi/rolo
func main() {
	flag.Parse()
	lis, err := net.Listen("tcp", *serverAddr)
	if err != nil {
		grpclog.Fatalf("failed to listen: %v", err)
	}
	var opts []grpc.ServerOption
	if *tls {
		creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile)
		if err != nil {
			grpclog.Fatalf("Failed to generate credentials %v", err)
		}
		opts = append(opts, grpc.Creds(creds))
	}
	grpcServer := grpc.NewServer(opts...)
	oidcClient, err := util.GetOIDCClient(*clientID, *clientSecret, *discovery, *redirectURL)
	if err != nil {
		grpclog.Fatalf("unable to get oidc client: %s", err)
	}
	s, err := server.NewRoloServer(oidcClient, *policyFile)
	if err != nil {
		grpclog.Fatalln("unable to create ca from parent:", err)
	}
	pb.RegisterRoloServer(grpcServer, s)
	grpclog.Println("serving at", *serverAddr)
	grpcServer.Serve(lis)
}
Пример #7
0
func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
	s, ok := t.getStream(f)
	if !ok {
		return
	}
	s.mu.Lock()
	if s.state == streamDone {
		s.mu.Unlock()
		return
	}
	s.state = streamDone
	if !s.headerDone {
		close(s.headerChan)
		s.headerDone = true
	}
	s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)]
	if !ok {
		grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
		s.statusCode = codes.Unknown
	}
	s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode)
	close(s.done)
	s.mu.Unlock()
	s.write(recvMsg{err: io.EOF})
}
Пример #8
0
func doPerRPCCreds(tc testpb.TestServiceClient) {
	jsonKey := getServiceAccountJSONKey()
	pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize)
	req := &testpb.SimpleRequest{
		ResponseType:   testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseSize:   proto.Int32(int32(largeRespSize)),
		Payload:        pl,
		FillUsername:   proto.Bool(true),
		FillOauthScope: proto.Bool(true),
	}
	token := getToken()
	kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken}
	ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}})
	reply, err := tc.UnaryCall(ctx, req)
	if err != nil {
		grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err)
	}
	user := reply.GetUsername()
	scope := reply.GetOauthScope()
	if !strings.Contains(string(jsonKey), user) {
		grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey)
	}
	if !strings.Contains(*oauthScope, scope) {
		grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope)
	}
	grpclog.Println("PerRPCCreds done")
}
Пример #9
0
func doCancelAfterFirstResponse(tc testpb.TestServiceClient) {
	ctx, cancel := context.WithCancel(context.Background())
	stream, err := tc.FullDuplexCall(ctx)
	if err != nil {
		grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
	}
	respParam := []*testpb.ResponseParameters{
		{
			Size: proto.Int32(31415),
		},
	}
	pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182)
	req := &testpb.StreamingOutputCallRequest{
		ResponseType:       testpb.PayloadType_COMPRESSABLE.Enum(),
		ResponseParameters: respParam,
		Payload:            pl,
	}
	if err := stream.Send(req); err != nil {
		grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err)
	}
	if _, err := stream.Recv(); err != nil {
		grpclog.Fatalf("%v.Recv() = %v", stream, err)
	}
	cancel()
	if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled {
		grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled)
	}
	grpclog.Println("CancelAfterFirstResponse done")
}
Пример #10
0
func doClientStreaming(tc testpb.TestServiceClient) {
	stream, err := tc.StreamingInputCall(context.Background())
	if err != nil {
		grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err)
	}
	var sum int
	for _, s := range reqSizes {
		pl := newPayload(testpb.PayloadType_COMPRESSABLE, s)
		req := &testpb.StreamingInputCallRequest{
			Payload: pl,
		}
		if err := stream.Send(req); err != nil {
			grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err)
		}
		sum += s
		grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum)

	}
	reply, err := stream.CloseAndRecv()
	if err != nil {
		grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil)
	}
	if reply.GetAggregatedPayloadSize() != int32(sum) {
		grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum)
	}
	grpclog.Println("ClientStreaming done")
}
Пример #11
0
// Start starts the stats collection and processing if there is a registered stats handle.
func Start() {
	if rpcHandler == nil && connHandler == nil {
		grpclog.Println("rpcHandler and connHandler are both nil when starting stats. Stats is not started")
		return
	}
	atomic.StoreInt32(on, 1)
}
Пример #12
0
func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan remoteBalancerInfo) error {
	updates, err := w.Next()
	if err != nil {
		return err
	}
	b.mu.Lock()
	defer b.mu.Unlock()
	if b.done {
		return grpc.ErrClientConnClosing
	}
	var bAddr remoteBalancerInfo
	if len(b.rbs) > 0 {
		bAddr = b.rbs[0]
	}
	for _, update := range updates {
		addr := grpc.Address{
			Addr:     update.Addr,
			Metadata: update.Metadata,
		}
		switch update.Op {
		case naming.Add:
			var exist bool
			for _, v := range b.rbs {
				// TODO: Is the same addr with different server name a different balancer?
				if addr == v.addr {
					exist = true
					break
				}
			}
			if exist {
				continue
			}
			b.rbs = append(b.rbs, remoteBalancerInfo{addr: addr})
		case naming.Delete:
			for i, v := range b.rbs {
				if addr == v.addr {
					copy(b.rbs[i:], b.rbs[i+1:])
					b.rbs = b.rbs[:len(b.rbs)-1]
					break
				}
			}
		default:
			grpclog.Println("Unknown update.Op ", update.Op)
		}
	}
	// TODO: Fall back to the basic round-robin load balancing if the resulting address is
	// not a load balancer.
	if len(b.rbs) > 0 {
		// For simplicity, always use the first one now. May revisit this decision later.
		if b.rbs[0] != bAddr {
			select {
			case <-ch:
			default:
			}
			ch <- b.rbs[0]
		}
	}
	return nil
}
Пример #13
0
func getHelloWorld(client pb.TalkMessageClient, content string) {
	msg, err := client.GetHelloWorld(context.Background(), &pb.Msg{Content: content})
	if err != nil {
		grpclog.Fatalf("fail to call getHelloWorld: %v", err)
	}

	grpclog.Println("get Message from Server: ", msg.Content)
}
Пример #14
0
func main() {
	flag.Parse()
	go func() {
		lis, err := net.Listen("tcp", ":0")
		if err != nil {
			grpclog.Fatalf("Failed to listen: %v", err)
		}
		grpclog.Println("Server profiling address: ", lis.Addr().String())
		if err := http.Serve(lis, nil); err != nil {
			grpclog.Fatalf("Failed to serve: %v", err)
		}
	}()
	addr, stopper := benchmark.StartServer()
	grpclog.Println("Server Address: ", addr)
	<-time.After(time.Duration(*duration) * time.Second)
	stopper()
}
Пример #15
0
func logParameterInfo(addresses []string, tests []testCaseWithWeight) {
	grpclog.Printf("server_addresses: %s", *serverAddresses)
	grpclog.Printf("test_cases: %s", *testCases)
	grpclog.Printf("test_duration-secs: %d", *testDurationSecs)
	grpclog.Printf("num_channels_per_server: %d", *numChannelsPerServer)
	grpclog.Printf("num_stubs_per_channel: %d", *numStubsPerChannel)
	grpclog.Printf("metrics_port: %d", *metricsPort)

	grpclog.Println("addresses:")
	for i, addr := range addresses {
		grpclog.Printf("%d. %s\n", i+1, addr)
	}
	grpclog.Println("tests:")
	for i, test := range tests {
		grpclog.Printf("%d. %v\n", i+1, test)
	}
}
Пример #16
0
// printFeature gets the feature for the given point.
func printFeature(client pb.RouteGuideClient, point *pb.Point) {
	grpclog.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude)
	feature, err := client.GetFeature(context.Background(), point)
	if err != nil {
		grpclog.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err)
	}
	grpclog.Println(feature)
}
Пример #17
0
func doEmptyUnaryCall(tc testpb.TestServiceClient) {
	reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{})
	if err != nil {
		grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err)
	}
	if !proto.Equal(&testpb.Empty{}, reply) {
		grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{})
	}
	grpclog.Println("EmptyUnaryCall done")
}
Пример #18
0
func connect(srvAddr *string) *grpc.ClientConn {
	conn, err := grpc.Dial(*srvAddr, grpc.WithInsecure())

	if err != nil {
		grpclog.Fatalf("fail to dial: %v", err)
	}
	grpclog.Println("client started...")

	return conn

}
Пример #19
0
func doEmptyStream(tc testpb.TestServiceClient) {
	stream, err := tc.FullDuplexCall(context.Background())
	if err != nil {
		grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
	}
	if err := stream.CloseSend(); err != nil {
		grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil)
	}
	if _, err := stream.Recv(); err != io.EOF {
		grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err)
	}
	grpclog.Println("Emptystream done")
}
Пример #20
0
func doCancelAfterBegin(tc testpb.TestServiceClient) {
	ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata))
	stream, err := tc.StreamingInputCall(ctx)
	if err != nil {
		grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err)
	}
	cancel()
	_, err = stream.CloseAndRecv()
	if grpc.Code(err) != codes.Canceled {
		grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled)
	}
	grpclog.Println("CancelAfterBegin done")
}
Пример #21
0
func doTimeoutOnSleepingServer(tc testpb.TestServiceClient) {
	ctx, _ := context.WithTimeout(context.Background(), 1*time.Millisecond)
	stream, err := tc.FullDuplexCall(ctx)
	if err != nil {
		if grpc.Code(err) == codes.DeadlineExceeded {
			grpclog.Println("TimeoutOnSleepingServer done")
			return
		}
		grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err)
	}
	pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182)
	req := &testpb.StreamingOutputCallRequest{
		ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(),
		Payload:      pl,
	}
	if err := stream.Send(req); err != nil {
		grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err)
	}
	if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded {
		grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded)
	}
	grpclog.Println("TimeoutOnSleepingServer done")
}
Пример #22
0
func Chat(letters ...string) error {
	// get connection for chat
	conn := connect(serverAddr)
	defer conn.Close()
	client := pb.NewChatClient(conn)

	stream, err := client.Chat(context.Background())
	if err != nil {
		grpclog.Println("%v.Chat(_) = _, %v", client, err) // better logging
		return err
	}

	// receive msg
	waitc := make(chan struct{})
	var recevieErr error
	go func() {
		for {
			in, err := stream.Recv()
			if err == io.EOF {
				// read done
				close(waitc)
				return
			}
			if err != nil {
				grpclog.Printf("Failed to receive a msg : %v", err) // need better logging
				recevieErr = err
				return
			}
			grpclog.Printf("client -- server status: %s", in.Content)
		}
	}()

	if recevieErr != nil {
		return recevieErr
	}

	// send msg
	for _, str := range letters {
		grpclog.Printf("client -- send msg: %v", str)
		if err := stream.Send(&pb.Msg{Content: str, Title: title}); err != nil {
			grpclog.Printf("%v.Send(%v) = %v", stream, str, err) // need better logging
			return err
		}
	}

	// close send
	stream.CloseSend()
	<-waitc
	return nil
}
Пример #23
0
func main() {
	flag.Parse()
	go func() {
		lis, err := net.Listen("tcp", ":0")
		if err != nil {
			grpclog.Fatalf("Failed to listen: %v", err)
		}
		grpclog.Println("Client profiling address: ", lis.Addr().String())
		if err := http.Serve(lis, nil); err != nil {
			grpclog.Fatalf("Failed to serve: %v", err)
		}
	}()
	closeLoop()
}
Пример #24
0
// 	LetsStreaming(TalkMessage_LetsStreamingServer) error
func (*server) LetsStreaming(stream pb.TalkMessage_LetsStreamingServer) error {
	for {
		in, err := stream.Recv()
		// end of the streaming
		if err == io.EOF {
			grpclog.Println("finished stream")
			return nil
		}
		if err != nil {
			grpclog.Printf("returned with error %v", err)
			return err
		}
		content := in.Content
		revMsg := "received message from streaming: " + content + "->" + strings.ToUpper(content)
		grpclog.Println(revMsg)
		sleep := 5
		grpclog.Printf("wait for %v seconds for response", sleep)
		time.Sleep(time.Duration(sleep) * time.Second) //"sleep for 5 seconds"

		stream.Send(&pb.Msg{Content: revMsg})
	}

}
Пример #25
0
func closeLoopStream() {
	s, conn, tc := buildConnection()
	ch := make(chan int, *maxConcurrentRPCs*4)
	var (
		mu sync.Mutex
		wg sync.WaitGroup
	)
	wg.Add(*maxConcurrentRPCs)
	// Distribute RPCs over maxConcurrentCalls workers.
	for i := 0; i < *maxConcurrentRPCs; i++ {
		go func() {
			stream, err := tc.StreamingCall(context.Background())
			if err != nil {
				grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
			}
			// Do some warm up.
			for i := 0; i < 100; i++ {
				streamCaller(stream)
			}
			for range ch {
				start := time.Now()
				streamCaller(stream)
				elapse := time.Since(start)
				mu.Lock()
				s.Add(elapse)
				mu.Unlock()
			}
			wg.Done()
		}()
	}
	// Stop the client when time is up.
	done := make(chan struct{})
	go func() {
		<-time.After(time.Duration(*duration) * time.Second)
		close(done)
	}()
	ok := true
	for ok {
		select {
		case ch <- 0:
		case <-done:
			ok = false
		}
	}
	close(ch)
	wg.Wait()
	conn.Close()
	grpclog.Println(s.String())
}
Пример #26
0
// processUpdates calls Watcher.Next() once and processes the obtained updates.
func (p *unicastNamingPicker) processUpdates() error {
	// 1. 等待: updates的出现
	updates, err := p.watcher.Next()
	if err != nil {
		return err
	}

	// 2. 更新Updates
	for _, update := range updates {
		switch update.Op {
		case naming.Add:
			p.mu.Lock()
			p.addrs.PushBack(&addrInfo{
				addr: update.Addr,
			})
			p.mu.Unlock()
			// Initial connection setup
			if p.conn == nil {
				conn, err := NewConn(p.cc)
				if err != nil {
					return err
				}
				p.conn = conn
			}
		case naming.Delete:
			p.mu.Lock()
			// 删除: addrs
			for e := p.addrs.Front(); e != nil; e = e.Next() {
				if update.Addr == e.Value.(*addrInfo).addr {
					if e == p.pickedAddr {
						// Do not remove the element now if it is the current picked
						// one. We leave the deletion to the next PickAddr() call.
						e.Value.(*addrInfo).deleting = true
						// Notify Conn to close it. All the live RPCs on this connection
						// will be aborted.
						p.conn.NotifyReset()
					} else {
						p.addrs.Remove(e)
					}
				}
			}
			p.mu.Unlock()
		default:
			grpclog.Println("Unknown update.Op ", update.Op)
		}
	}
	return nil
}
Пример #27
0
// serveNewHTTP2Transport sets up a new http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go) and
// serves streams on it.
// This is run in its own goroutine (it does network I/O in
// transport.NewServerTransport).
func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
	st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
	if err != nil {
		s.mu.Lock()
		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
		s.mu.Unlock()
		c.Close()
		grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
		return
	}
	if !s.addConn(st) {
		st.Close()
		return
	}
	s.serveStreams(st)
}
Пример #28
0
func closeLoop() {
	s := stats.NewStats(256)
	conn := benchmark.NewClientConn(*server)
	tc := testpb.NewTestServiceClient(conn)
	// Warm up connection.
	for i := 0; i < 100; i++ {
		caller(tc)
	}
	ch := make(chan int, *maxConcurrentRPCs*4)
	var (
		mu sync.Mutex
		wg sync.WaitGroup
	)
	wg.Add(*maxConcurrentRPCs)
	// Distribute RPCs over maxConcurrentCalls workers.
	for i := 0; i < *maxConcurrentRPCs; i++ {
		go func() {
			for _ = range ch {
				start := time.Now()
				caller(tc)
				elapse := time.Since(start)
				mu.Lock()
				s.Add(elapse)
				mu.Unlock()
			}
			wg.Done()
		}()
	}
	// Stop the client when time is up.
	done := make(chan struct{})
	go func() {
		<-time.After(time.Duration(*duration) * time.Second)
		close(done)
	}()
	ok := true
	for ok {
		select {
		case ch <- 0:
		case <-done:
			ok = false
		}
	}
	close(ch)
	wg.Wait()
	conn.Close()
	grpclog.Println(s.String())
}
Пример #29
0
// Serve accepts incoming connections on the listener lis, creating a new
// ServerTransport and service goroutine for each. The service goroutines
// read gRPC request and then call the registered handlers to reply to them.
// Service returns when lis.Accept fails.
func (s *Server) Serve(lis net.Listener) error {
	s.mu.Lock()
	s.printf("serving")
	if s.lis == nil {
		s.mu.Unlock()
		return ErrServerStopped
	}
	s.lis[lis] = true
	s.mu.Unlock()
	defer func() {
		lis.Close()
		s.mu.Lock()
		delete(s.lis, lis)
		s.mu.Unlock()
	}()
	for {
		c, err := lis.Accept()
		if err != nil {
			s.mu.Lock()
			s.printf("done serving; Accept = %v", err)
			s.mu.Unlock()
			return err
		}
		var authInfo credentials.AuthInfo
		if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok {
			var conn net.Conn
			conn, authInfo, err = creds.ServerHandshake(c)
			if err != nil {
				s.mu.Lock()
				s.errorf("ServerHandshake(%q) failed: %v", c.RemoteAddr(), err)
				s.mu.Unlock()
				grpclog.Println("grpc: Server.Serve failed to complete security handshake.")
				continue
			}
			c = conn
		}
		s.mu.Lock()
		if s.conns == nil {
			s.mu.Unlock()
			c.Close()
			return nil
		}
		s.mu.Unlock()

		go s.serveNewHTTP2Transport(c, authInfo)
	}
}
Пример #30
0
func closeLoopUnary() {

	s, conn, tc := buildConnection()

	for i := 0; i < 100; i++ {
		unaryCaller(tc)
	}
	ch := make(chan int, *maxConcurrentRPCs*4)
	var (
		mu sync.Mutex
		wg sync.WaitGroup
	)
	wg.Add(*maxConcurrentRPCs)

	for i := 0; i < *maxConcurrentRPCs; i++ {
		go func() {
			for _ = range ch {
				start := time.Now()
				unaryCaller(tc)
				elapse := time.Since(start)
				mu.Lock()
				s.Add(elapse)
				mu.Unlock()
			}
			wg.Done()
		}()
	}
	// Stop the client when time is up.
	done := make(chan struct{})
	go func() {
		<-time.After(time.Duration(*duration) * time.Second)
		close(done)
	}()
	ok := true
	for ok {
		select {
		case ch <- 0:
		case <-done:
			ok = false
		}
	}
	close(ch)
	wg.Wait()
	conn.Close()
	grpclog.Println(s.String())

}