func TestValidateTTNAuthContext(t *testing.T) { for _, env := range strings.Split("ACCOUNT_SERVER_PROTO ACCOUNT_SERVER_URL", " ") { if os.Getenv(env) == "" { t.Skipf("Skipping auth server test: %s configured", env) } } accountServer := fmt.Sprintf("%s://%s", os.Getenv("ACCOUNT_SERVER_PROTO"), os.Getenv("ACCOUNT_SERVER_URL"), ) a := assertions.New(t) c := new(Component) c.Config.KeyDir = os.TempDir() c.Config.AuthServers = map[string]string{ "ttn-account-preview": accountServer, } err := c.initAuthServers() a.So(err, assertions.ShouldBeNil) { ctx := context.Background() _, err = c.ValidateTTNAuthContext(ctx) a.So(err, assertions.ShouldNotBeNil) } { md := metadata.Pairs() ctx := metadata.NewContext(context.Background(), md) _, err = c.ValidateTTNAuthContext(ctx) a.So(err, assertions.ShouldNotBeNil) } { md := metadata.Pairs( "id", "dev", ) ctx := metadata.NewContext(context.Background(), md) _, err = c.ValidateTTNAuthContext(ctx) a.So(err, assertions.ShouldNotBeNil) } { md := metadata.Pairs( "id", "dev", "token", "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJ0dG4tYWNjb3VudC1wcmV2aWV3Iiwic3ViIjoiZGV2IiwidHlwZSI6InJvdXRlciIsImlhdCI6MTQ3NjQzOTQzOH0.Duz-E5aMYEPY_Nf5Pky7Qmjbs1dMp9PN9nMqbSzoU079b8TPL4DH2SKcRHrrMqieB3yhJb3YaQBfY6dKWfgVz8BmTeKlGXfFrqEj91y30J7r9_VsHRzgDMJedlqXryvf0S_yD27TsJ7TMbGYyE00T4tAX3Uf6wQZDhdyHNGtdf4jtoAjzOxVAodNtXZp26LR7fFk56UstBxOxztBMzyzmAdiTG4lSyEqq7zsuJcFjmHB9MfEoD4ZT-iTRL1ohFjGuj2HN49oPyYlZAVPP7QajLyNsLnv-nDqXE_QecOjAcEq4PLNJ3DpXtX-lo8I_F1eV9yQnDdQQi4EUvxmxZWeBA", ) ctx := metadata.NewContext(context.Background(), md) _, err = c.ValidateTTNAuthContext(ctx) a.So(err, assertions.ShouldBeNil) } }
func (m authMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) { // get token from Authorization header authToken, err := authtoken.FromRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusForbidden) return } // context and metadata md := metadata.Pairs("traceID", "ABC", "fromName", "api") ctx := context.Background() ctx = metadata.NewContext(ctx, md) // verify token w/ auth service _, err = m.VerifyToken(ctx, &auth.AuthRequest{ AuthToken: authToken, }) if err != nil { http.Error(w, "Unauthorized", http.StatusForbidden) return } // Call the next handler on success. m.next.ServeHTTP(w, r) }
func (spider *spider) parseList(ctx context.Context, resp *crawl.Response) error { defer spider.c.Close() var currentTitle string resp.Find("div#unterMenu a").Each(func(_ int, s *goquery.Selection) { c, _ := s.Attr("class") switch c { case "unterMenuTitel": currentTitle = strings.ToLower(s.Text()) case "unterMenuName": ctx = metadata.NewContext(ctx, metadata.Pairs( "type", currentTitle, "title", s.Text(), )) href, _ := s.Attr("href") spider.c.Execute(ctx, &crawl.Request{ URL: strings.TrimSpace(href), Referer: resp.URL().String(), Callbacks: crawl.Callbacks("user-agents"), }) } }) close(spider.results) return nil }
func (ac *Application) Ping(c *cli.Context) { message := c.String("message") host := vapi.HostDefaultPort(c.String("host"), vapi.DefaultPortString) fmt.Printf("ping: host %s: message: %s\n", host, message) log.Tracef("request-secret:%s", ac.Cfg.Rpc.Secret) md := metadata.Pairs("request-secret", ac.Cfg.Rpc.Secret) ctx := context.Background() ctx = metadata.NewContext(ctx, md) dopts := vapi.DefaultDialOptions() con, err := vapi.Connect(host, dopts) if err != nil { log.Warnf("Connect %s: %s", host, err) } ping := &vapi.PingRequest{ Message: message, } res, err := con.Client.Ping(ctx, ping) if err != nil { log.Warnf("Ping %s: %s", host, err) } fmt.Printf("%#v\n", res) }
// ListLogMetrics lists logs-based metrics. func (c *Client) ListLogMetrics(ctx context.Context, req *google_logging_v2.ListLogMetricsRequest) *LogMetricIterator { ctx = metadata.NewContext(ctx, c.metadata) it := &LogMetricIterator{} it.apiCall = func() error { if it.atLastPage { return Done } var resp *google_logging_v2.ListLogMetricsResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error req.PageToken = it.nextPageToken req.PageSize = it.pageSize resp, err = c.client.ListLogMetrics(ctx, req) return err }, c.callOptions["ListLogMetrics"]...) if err != nil { return err } if resp.NextPageToken == "" { it.atLastPage = true } else { it.nextPageToken = resp.NextPageToken } it.items = resp.Metrics return nil } return it }
// Endpoint returns a usable endpoint that will invoke the gRPC specified by the // client. func (c Client) Endpoint() endpoint.Endpoint { return func(ctx context.Context, request interface{}) (interface{}, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() req, err := c.enc(ctx, request) if err != nil { return nil, fmt.Errorf("Encode: %v", err) } md := &metadata.MD{} for _, f := range c.before { ctx = f(ctx, md) } ctx = metadata.NewContext(ctx, *md) if err = grpc.Invoke(ctx, c.method, req, c.grpcReply, c.client); err != nil { return nil, fmt.Errorf("Invoke: %v", err) } response, err := c.dec(ctx, c.grpcReply) if err != nil { return nil, fmt.Errorf("Decode: %v", err) } return response, nil } }
// DoCustomMetadata checks that metadata is echoed back to the client. func DoCustomMetadata(tc testpb.TestServiceClient) { // Testing with UnaryCall. pl := clientNewPayload(testpb.PayloadType_COMPRESSABLE, 1) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(1)), Payload: pl, } ctx := metadata.NewContext(context.Background(), customMetadata) var header, trailer metadata.MD reply, err := tc.UnaryCall( ctx, req, grpc.Header(&header), grpc.Trailer(&trailer), ) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } t := reply.GetPayload().GetType() s := len(reply.GetPayload().GetBody()) if t != testpb.PayloadType_COMPRESSABLE || s != 1 { grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, 1) } validateMetadata(header, trailer) // Testing with FullDuplex. stream, err := tc.FullDuplexCall(ctx) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(1), }, } streamReq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(streamReq); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, streamReq, err) } streamHeader, err := stream.Header() if err != nil { grpclog.Fatalf("%v.Header() = %v", stream, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } if err := stream.CloseSend(); err != nil { grpclog.Fatalf("%v.CloseSend() = %v, want <nil>", stream, err) } if _, err := stream.Recv(); err != io.EOF { grpclog.Fatalf("%v failed to complete the custom metadata test: %v", stream, err) } streamTrailer := stream.Trailer() validateMetadata(streamHeader, streamTrailer) }
func (b *brokerRPC) associateRouter(md metadata.MD) (chan *pb.UplinkMessage, <-chan *pb.DownlinkMessage, func(), error) { ctx := metadata.NewContext(context.Background(), md) router, err := b.broker.ValidateNetworkContext(ctx) if err != nil { return nil, nil, nil, err } down, err := b.broker.ActivateRouter(router.Id) if err != nil { return nil, nil, nil, err } up := make(chan *pb.UplinkMessage, 1) cancel := func() { b.broker.DeactivateRouter(router.Id) } go func() { for message := range up { if waitTime := b.routerUpRate.Wait(router.Id); waitTime != 0 { b.broker.Ctx.WithField("RouterID", router.Id).WithField("Wait", waitTime).Warn("Router reached uplink rate limit") time.Sleep(waitTime) } go b.broker.HandleUplink(message) } }() return up, down, cancel, nil }
func main() { conn, err := grpc.Dial("localhost:4001", grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() c := projects.NewProjectsServiceClient(conn) if len(os.Args) < 3 { log.Fatal("You need to pass me a known command!") } ctx := metadata.NewContext(context.Background(), metadata.Pairs("client", "test-client")) switch os.Args[1] { case "search": Search(c, ctx, strings.Join(os.Args[2:], " ")) case "ssearch": SlowSearch(c, ctx, strings.Join(os.Args[2:], " ")) case "find": Find(c, ctx, os.Args[2]) case "create": if len(os.Args) < 4 { log.Fatal("pass me an id and a name") } Create(c, ctx, os.Args[2], strings.Join(os.Args[3:], " ")) default: log.Fatal("unknown command!") } }
func NewRaftProxyHealthServer(local HealthServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) HealthServer { redirectChecker := func(ctx context.Context) (context.Context, error) { s, ok := transport.StreamFromContext(ctx) if !ok { return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context") } addr := s.ServerTransport().RemoteAddr().String() md, ok := metadata.FromContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } if !ok { md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) return metadata.NewContext(ctx, md), nil } mods := []func(context.Context) (context.Context, error){redirectChecker} mods = append(mods, ctxMod) return &raftProxyHealthServer{ local: local, cluster: cluster, connSelector: connSelector, ctxMods: mods, } }
func testMetadataStreamingRPC(t *testing.T, e env) { s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e) cc := clientSetUp(t, addr, nil, nil, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx := metadata.NewContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want <nil>", tc, err) } go func() { headerMD, err := stream.Header() if e.security == "tls" { delete(headerMD, "transport_security_type") } if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#1 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) } // test the cached value. headerMD, err = stream.Header() if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#2 %v.Header() = %v, %v, want %v, <nil>", stream, headerMD, err, testMetadata) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(int32(respSizes[index])), }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: payload, } if err := stream.Send(req); err != nil { t.Errorf("%v.Send(%v) = %v, want <nil>", stream, req, err) return } index++ } // Tell the server we're done sending args. stream.CloseSend() }() for { if _, err := stream.Recv(); err != nil { break } } trailerMD := stream.Trailer() if !reflect.DeepEqual(testMetadata, trailerMD) { t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testMetadata) } }
// ListSubscriptions lists matching subscriptions. func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest) *SubscriptionIterator { md, _ := metadata.FromContext(ctx) ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) it := &SubscriptionIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) { var resp *pubsubpb.ListSubscriptionsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context) error { var err error resp, err = c.subscriberClient.ListSubscriptions(ctx, req) return err }, c.CallOptions.ListSubscriptions...) if err != nil { return nil, "", err } return resp.Subscriptions, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it }
func testMetadataUnaryRPC(t *testing.T, e env) { s, addr := serverSetUp(t, nil, math.MaxUint32, nil, nil, e) cc := clientSetUp(t, addr, nil, nil, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: payload, } var header, trailer metadata.MD ctx := metadata.NewContext(context.Background(), testMetadata) if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, <nil>", ctx, err) } if !reflect.DeepEqual(testMetadata, header) { t.Fatalf("Received header metadata %v, want %v", header, testMetadata) } if !reflect.DeepEqual(testMetadata, trailer) { t.Fatalf("Received trailer metadata %v, want %v", trailer, testMetadata) } }
// WithMetadataForwardTLSInfo reads certificate from context and returns context where // ForwardCert is set based on original certificate. func WithMetadataForwardTLSInfo(ctx context.Context) (context.Context, error) { md, ok := metadata.FromContext(ctx) if !ok { md = metadata.MD{} } ous := []string{} org := "" cn := "" certSubj, err := certSubjectFromContext(ctx) if err == nil { cn = certSubj.CommonName ous = certSubj.OrganizationalUnit if len(certSubj.Organization) > 0 { org = certSubj.Organization[0] } } // If there's no TLS cert, forward with blank TLS metadata. // Note that the presence of this blank metadata is extremely // important. Without it, it would look like manager is making // the request directly. md[certForwardedKey] = []string{"true"} md[certCNKey] = []string{cn} md[certOrgKey] = []string{org} md[certOUKey] = ous peer, ok := peer.FromContext(ctx) if ok { md[remoteAddrKey] = []string{peer.Addr.String()} } return metadata.NewContext(ctx, md), nil }
// encode metadata to context in grpc specific way func (f *framework) makeGRPCContext(ctx context.Context) context.Context { md := metadata.MD{ "taskID": strconv.FormatUint(f.taskID, 10), "epoch": strconv.FormatUint(f.epoch, 10), } return metadata.NewContext(ctx, md) }
func (h *handlerManager) validateTTNAuthAppContext(ctx context.Context, appID string) (context.Context, *claims.Claims, error) { md, err := api.MetadataFromContext(ctx) if err != nil { return ctx, nil, err } // If token is empty, try to get the access key and convert it into a token token, err := api.TokenFromMetadata(md) if err != nil || token == "" { key, err := api.KeyFromMetadata(md) if err != nil { return ctx, nil, errors.NewErrInvalidArgument("Metadata", "neither token nor key present") } token, err := h.handler.Component.ExchangeAppKeyForToken(appID, key) if err != nil { return ctx, nil, err } md = metadata.Join(md, metadata.Pairs("token", token)) ctx = metadata.NewContext(ctx, md) } claims, err := h.handler.Component.ValidateTTNAuthContext(ctx) if err != nil { return ctx, nil, err } if h.clientRate.Limit(claims.Subject) { return ctx, claims, grpc.Errorf(codes.ResourceExhausted, "Rate limit for client reached") } if h.applicationRate.Limit(appID) { return ctx, claims, grpc.Errorf(codes.ResourceExhausted, "Rate limit for application reached") } return ctx, claims, nil }
func (b *brokerRPC) getHandlerPublish(md metadata.MD) (chan *pb.DownlinkMessage, error) { ctx := metadata.NewContext(context.Background(), md) handler, err := b.broker.ValidateNetworkContext(ctx) if err != nil { return nil, err } ch := make(chan *pb.DownlinkMessage, 1) go func() { for message := range ch { go func(downlink *pb.DownlinkMessage) { // Get latest Handler metadata handler, err := b.broker.Component.Discover("handler", handler.Id) if err != nil { return } for _, announcedID := range handler.AppIDs() { if announcedID == downlink.AppId { if waitTime := b.handlerDownRate.Wait(handler.Id); waitTime != 0 { b.broker.Ctx.WithField("HandlerID", handler.Id).WithField("Wait", waitTime).Warn("Handler reached downlink rate limit") time.Sleep(waitTime) } b.broker.HandleDownlink(downlink) return } } }(message) } }() return ch, nil }
func main() { fs := flag.NewFlagSet("", flag.ExitOnError) var ( grpcAddr = fs.String("grpc.addr", ":8002", "Address for gRPC server") accessToken = fs.String("grpc.token", "test", "JWT used to gRPC calls") ) flag.Usage = fs.Usage // only show our flags fs.Parse(os.Args[1:]) conn, err := grpc.Dial(*grpcAddr) if err != nil { fmt.Printf("Error: %v", err) } // create client and call client := protobuf.NewAddClient(conn) // create context with JWT md := metadata.Pairs("token", *accessToken) ctx := context.Background() ctx = metadata.NewContext(ctx, md) var header, trailer metadata.MD result, err := client.Add(ctx, &protobuf.AddRequest{A: 1, B: 2}, grpc.Header(&header), grpc.Trailer(&trailer)) if err != nil { fmt.Printf("Error: %v", err) } fmt.Printf("Result: %v\n", result.V) conn.Close() }
func NewG5AuthenticatorContext() *G5AuthenticatorContext { mux := http.NewServeMux() srv := httptest.NewServer(mux) authCtx := metadata.NewContext( context.Background(), map[string][]string{"authorization": []string{"bearer 12345"}}, ) config := G5AuthenticatorConfig{ TimeoutDuration: 100 * time.Millisecond, MagicalTokenOfSupremePower: "bacon", AuthHostname: strings.TrimLeft(srv.URL, "http://"), } ctx := &G5AuthenticatorContext{ Context: authCtx, Authenticator: NewG5Authenticator(config), Server: srv, MeStatus: http.StatusOK, MeJSON: `{"email":"*****@*****.**"}`, } mux.HandleFunc("/v1/me", func(w http.ResponseWriter, r *http.Request) { ctx.AuthCalled = true ctx.PassedHeader = r.Header.Get("Authorization") if ctx.MeStatus != http.StatusOK { w.WriteHeader(ctx.MeStatus) return } fmt.Fprintf(w, ctx.MeJSON) }) return ctx }
// ListMonitoredResourceDescriptors lists the monitored resource descriptors used by Stackdriver Logging. func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator { md, _ := metadata.FromContext(ctx) ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata)) it := &MonitoredResourceDescriptorIterator{} fetch := func(pageSize int, pageToken string) (string, error) { var resp *loggingpb.ListMonitoredResourceDescriptorsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context) error { var err error resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req) return err }, c.CallOptions.ListMonitoredResourceDescriptors...) if err != nil { return "", err } it.items = append(it.items, resp.ResourceDescriptors...) return resp.NextPageToken, nil } bufLen := func() int { return len(it.items) } takeBuf := func() interface{} { b := it.items it.items = nil return b } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, bufLen, takeBuf) return it }
func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ResourceAllocatorServer { redirectChecker := func(ctx context.Context) (context.Context, error) { s, ok := transport.StreamFromContext(ctx) if !ok { return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context") } addr := s.ServerTransport().RemoteAddr().String() md, ok := metadata.FromContext(ctx) if ok && len(md["redirect"]) != 0 { return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) } if !ok { md = metadata.New(map[string]string{}) } md["redirect"] = append(md["redirect"], addr) return metadata.NewContext(ctx, md), nil } remoteMods := []func(context.Context) (context.Context, error){redirectChecker} remoteMods = append(remoteMods, remoteCtxMod) var localMods []func(context.Context) (context.Context, error) if localCtxMod != nil { localMods = []func(context.Context) (context.Context, error){localCtxMod} } return &raftProxyResourceAllocatorServer{ local: local, connSelector: connSelector, localCtxMods: localMods, remoteCtxMods: remoteMods, } }
func doPerRPCCreds(tc testpb.TestServiceClient) { jsonKey := getServiceAccountJSONKey() pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, FillUsername: proto.Bool(true), FillOauthScope: proto.Bool(true), } token := getToken() kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) reply, err := tc.UnaryCall(ctx, req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } user := reply.GetUsername() scope := reply.GetOauthScope() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } if !strings.Contains(*oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) } grpclog.Println("PerRPCCreds done") }
func TestGRPCRequireLeader(t *testing.T) { defer testutil.AfterTest(t) cfg := ClusterConfig{Size: 3} clus := newClusterV3NoClients(t, &cfg) defer clus.Terminate(t) clus.Members[1].Stop(t) clus.Members[2].Stop(t) client, err := NewClientV3(clus.Members[0]) if err != nil { t.Fatalf("cannot create client: %v", err) } defer client.Close() // wait for election timeout, then member[0] will not have a leader. time.Sleep(time.Duration(3*electionTicks) * tickDuration) md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) ctx := metadata.NewContext(context.Background(), md) reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} if _, err := toGRPC(client).KV.Put(ctx, reqput); grpc.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader) } }
func (s *ProxyHappySuite) TestDirectorErrorIsPropagated() { // See SetupSuite where the StreamDirector has a special case. ctx := metadata.NewContext(s.ctx, metadata.Pairs(rejectingMdKey, "true")) _, err := s.testClient.Ping(ctx, &pb.PingRequest{Value: "foo"}) require.Error(s.T(), err, "Director should reject this RPC") assert.Equal(s.T(), codes.PermissionDenied, grpc.Code(err)) assert.Equal(s.T(), "testing rejection", grpc.ErrorDesc(err)) }
// Context returns monitor connection context for gateway func (cl *gatewayClient) Context() (monitorContext context.Context) { cl.RLock() defer cl.RUnlock() return metadata.NewContext(context.Background(), metadata.Pairs( "id", cl.id, "token", cl.token, )) }
func (client *BfTrderClient) CancleOrder(req *BfCancelOrderReq) { ctx := context.Background() ctx = metadata.NewContext(ctx, metadata.Pairs("clientid", clientId_)) ctx, cancel := context.WithDeadline(ctx, time.Now().Add(deadline*time.Second)) defer cancel() client.Gateway.CancelOrder(ctx, req) }
//===datafeed api=== func (client *BfTrderClient) InsertContract(req *BfContractData) { ctx := context.Background() ctx = metadata.NewContext(ctx, metadata.Pairs("clientid", clientId_)) ctx, cancel := context.WithDeadline(ctx, time.Now().Add(deadline*time.Second)) defer cancel() client.Datafeed.InsertContract(ctx, req) }
func (client *BfTrderClient) DeleteTick(req *BfDeleteTickReq) { ctx := context.Background() ctx = metadata.NewContext(ctx, metadata.Pairs("clientid", clientId_)) ctx, cancel := context.WithDeadline(ctx, time.Now().Add(deadline*time.Second)) defer cancel() client.Datafeed.DeleteTick(ctx, req) }
func (client *BfTrderClient) QueryOrders() { ctx := context.Background() ctx = metadata.NewContext(ctx, metadata.Pairs("clientid", clientId_)) ctx, cancel := context.WithDeadline(ctx, time.Now().Add(deadline*time.Second)) defer cancel() client.Gateway.QueryOrders(ctx, &BfVoid{}) }
func TestPerRPCCredentials(t *testing.T) { l, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } s := grpc.NewServer() go func() { if err := s.Serve(l); err != nil { t.Fatal(err) } }() defer s.TestingCloseConns() var ms testMetaServer RegisterMetaServer(s, &ms) var wg sync.WaitGroup for i := 0; i < 50; i++ { wg.Add(1) go func(i int) { defer wg.Done() time.Sleep(time.Duration(i%10) * 5 * time.Millisecond) key := fmt.Sprintf("key%d", i) ctx := context.Background() ctx = WithGRPCEndpoint(ctx, &url.URL{Host: l.Addr().String()}) ctx = WithCredentials(ctx, oauth2.StaticTokenSource(&oauth2.Token{TokenType: "x", AccessToken: key})) ctx = metadata.NewContext(ctx, metadata.MD{"want-access-token": "x " + key}) c := NewClientFromContext(ctx) if _, err := c.Meta.Status(ctx, &pbtypes.Void{}); err != nil { t.Fatal(err) } }(i) } wg.Wait() out, err := exec.Command("netstat", "-ntap").CombinedOutput() if err == nil { lines := bytes.Split(out, []byte("\n")) var conns, timeWaits int addr := strings.Replace(l.Addr().String(), "[::]", "::1", 1) for _, line := range lines { if bytes.Contains(line, []byte(addr)) { conns++ if bytes.Contains(line, []byte("TIME_WAIT")) { timeWaits++ } } } t.Logf("lingering connections count: %d", conns) t.Logf(" in TIME_WAIT state: %d", timeWaits) t.Log("(ideally, there should be 0 lingering connections)") } else { t.Logf("warning: error running `netstat -ntap` to check # of TIME_WAIT conns: %s", err) } }