// New returns an AddService backed by an HTTP server living at the remote // instance. We expect instance to come from a service discovery system, so // likely of the form "host:port". func New(instance string, tracer stdopentracing.Tracer, logger log.Logger) (addsvc.Service, error) { if !strings.HasPrefix(instance, "http") { instance = "http://" + instance } u, err := url.Parse(instance) if err != nil { return nil, err } // We construct a single ratelimiter middleware, to limit the total outgoing // QPS from this client to all methods on the remote instance. We also // construct per-endpoint circuitbreaker middlewares to demonstrate how // that's done, although they could easily be combined into a single breaker // for the entire remote instance, too. limiter := ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(100, 100)) var sumEndpoint endpoint.Endpoint { sumEndpoint = httptransport.NewClient( "POST", copyURL(u, "/sum"), addsvc.EncodeHTTPGenericRequest, addsvc.DecodeHTTPSumResponse, httptransport.ClientBefore(opentracing.ToHTTPRequest(tracer, logger)), ).Endpoint() sumEndpoint = opentracing.TraceClient(tracer, "Sum")(sumEndpoint) sumEndpoint = limiter(sumEndpoint) sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ Name: "Sum", Timeout: 30 * time.Second, }))(sumEndpoint) } var concatEndpoint endpoint.Endpoint { concatEndpoint = httptransport.NewClient( "POST", copyURL(u, "/concat"), addsvc.EncodeHTTPGenericRequest, addsvc.DecodeHTTPConcatResponse, httptransport.ClientBefore(opentracing.ToHTTPRequest(tracer, logger)), ).Endpoint() concatEndpoint = opentracing.TraceClient(tracer, "Concat")(concatEndpoint) concatEndpoint = limiter(concatEndpoint) sumEndpoint = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{ Name: "Concat", Timeout: 30 * time.Second, }))(sumEndpoint) } return addsvc.Endpoints{ SumEndpoint: sumEndpoint, ConcatEndpoint: concatEndpoint, }, nil }
func main() { // Flag domain. Note that gRPC transitively registers flags via its import // of glog. So, we define a new flag set, to keep those domains distinct. fs := flag.NewFlagSet("", flag.ExitOnError) var ( debugAddr = fs.String("debug.addr", ":8000", "Address for HTTP debug/instrumentation server") httpAddr = fs.String("http.addr", ":8001", "Address for HTTP (JSON) server") grpcAddr = fs.String("grpc.addr", ":8002", "Address for gRPC server") thriftAddr = fs.String("thrift.addr", ":8003", "Address for Thrift server") thriftProtocol = fs.String("thrift.protocol", "binary", "binary, compact, json, simplejson") thriftBufferSize = fs.Int("thrift.buffer.size", 0, "0 for unbuffered") thriftFramed = fs.Bool("thrift.framed", false, "true to enable framing") proxyHTTPAddr = fs.String("proxy.http.url", "", "if set, proxy requests over HTTP to this addsvc") zipkinServiceName = fs.String("zipkin.service.name", "addsvc", "Zipkin service name") zipkinCollectorAddr = fs.String("zipkin.collector.addr", "", "Zipkin Scribe collector address (empty will log spans)") zipkinCollectorTimeout = fs.Duration("zipkin.collector.timeout", time.Second, "Zipkin collector timeout") zipkinCollectorBatchSize = fs.Int("zipkin.collector.batch.size", 100, "Zipkin collector batch size") zipkinCollectorBatchInterval = fs.Duration("zipkin.collector.batch.interval", time.Second, "Zipkin collector batch interval") ) flag.Usage = fs.Usage // only show our flags fs.Parse(os.Args[1:]) // `package log` domain var logger kitlog.Logger logger = kitlog.NewLogfmtLogger(os.Stderr) logger = kitlog.With(logger, "ts", kitlog.DefaultTimestampUTC, "caller", kitlog.DefaultCaller) stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) // redirect stdlib logging to us stdlog.SetFlags(0) // flags are handled in our logger // `package metrics` domain requests := metrics.NewMultiCounter( expvar.NewCounter("requests"), statsd.NewCounter(ioutil.Discard, "requests_total", time.Second), prometheus.NewCounter(stdprometheus.CounterOpts{ Namespace: "addsvc", Subsystem: "add", Name: "requests_total", Help: "Total number of received requests.", }, []string{}), ) duration := metrics.NewMultiHistogram( expvar.NewHistogram("duration_nanoseconds_total", 0, 100000000, 3), statsd.NewHistogram(ioutil.Discard, "duration_nanoseconds_total", time.Second), prometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "addsvc", Subsystem: "add", Name: "duration_nanoseconds_total", Help: "Total nanoseconds spend serving requests.", }, []string{}), ) // `package tracing` domain zipkinHostPort := "localhost:1234" // TODO Zipkin makes overly simple assumptions about services var zipkinCollector zipkin.Collector = loggingCollector{logger} if *zipkinCollectorAddr != "" { var err error if zipkinCollector, err = zipkin.NewScribeCollector( *zipkinCollectorAddr, *zipkinCollectorTimeout, *zipkinCollectorBatchSize, *zipkinCollectorBatchInterval, ); err != nil { logger.Log("err", err) os.Exit(1) } } zipkinMethodName := "add" zipkinSpanFunc := zipkin.MakeNewSpanFunc(zipkinHostPort, *zipkinServiceName, zipkinMethodName) zipkin.Log.Swap(logger) // log diagnostic/error details // Our business and operational domain var a Add = pureAdd if *proxyHTTPAddr != "" { codec := jsoncodec.New() makeResponse := func() interface{} { return &addResponse{} } var e endpoint.Endpoint e = httptransport.NewClient(*proxyHTTPAddr, codec, makeResponse, httptransport.ClientBefore(zipkin.ToRequest(zipkinSpanFunc))) e = zipkin.AnnotateClient(zipkinSpanFunc, zipkinCollector)(e) a = proxyAdd(e, logger) } a = logging(logger)(a) // Server domain var e endpoint.Endpoint e = makeEndpoint(a) e = zipkin.AnnotateServer(zipkinSpanFunc, zipkinCollector)(e) // Mechanical stuff rand.Seed(time.Now().UnixNano()) root := context.Background() errc := make(chan error) go func() { errc <- interrupt() }() // Transport: HTTP (debug/instrumentation) go func() { logger.Log("addr", *debugAddr, "transport", "debug") errc <- http.ListenAndServe(*debugAddr, nil) }() // Transport: HTTP (JSON) go func() { ctx, cancel := context.WithCancel(root) defer cancel() field := metrics.Field{Key: "transport", Value: "http"} before := httptransport.BindingBefore(zipkin.ToContext(zipkinSpanFunc)) after := httptransport.BindingAfter(httptransport.SetContentType("application/json")) makeRequest := func() interface{} { return &addRequest{} } var handler http.Handler handler = httptransport.NewBinding(ctx, makeRequest, jsoncodec.New(), e, before, after) handler = encoding.Gzip(handler) handler = cors.Middleware(cors.Config{})(handler) handler = httpInstrument(requests.With(field), duration.With(field))(handler) mux := http.NewServeMux() mux.Handle("/add", handler) logger.Log("addr", *httpAddr, "transport", "HTTP") errc <- http.ListenAndServe(*httpAddr, mux) }() // Transport: gRPC go func() { ln, err := net.Listen("tcp", *grpcAddr) if err != nil { errc <- err return } s := grpc.NewServer() // uses its own context? field := metrics.Field{Key: "transport", Value: "grpc"} var addServer pb.AddServer addServer = grpcBinding{e} addServer = grpcInstrument(requests.With(field), duration.With(field))(addServer) pb.RegisterAddServer(s, addServer) logger.Log("addr", *grpcAddr, "transport", "gRPC") errc <- s.Serve(ln) }() // Transport: Thrift go func() { ctx, cancel := context.WithCancel(root) defer cancel() var protocolFactory thrift.TProtocolFactory switch *thriftProtocol { case "binary": protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() case "compact": protocolFactory = thrift.NewTCompactProtocolFactory() case "json": protocolFactory = thrift.NewTJSONProtocolFactory() case "simplejson": protocolFactory = thrift.NewTSimpleJSONProtocolFactory() default: errc <- fmt.Errorf("invalid Thrift protocol %q", *thriftProtocol) return } var transportFactory thrift.TTransportFactory if *thriftBufferSize > 0 { transportFactory = thrift.NewTBufferedTransportFactory(*thriftBufferSize) } else { transportFactory = thrift.NewTTransportFactory() } if *thriftFramed { transportFactory = thrift.NewTFramedTransportFactory(transportFactory) } transport, err := thrift.NewTServerSocket(*thriftAddr) if err != nil { errc <- err return } field := metrics.Field{Key: "transport", Value: "thrift"} var service thriftadd.AddService service = thriftBinding{ctx, e} service = thriftInstrument(requests.With(field), duration.With(field))(service) logger.Log("addr", *thriftAddr, "transport", "Thrift") errc <- thrift.NewTSimpleServer4( thriftadd.NewAddServiceProcessor(service), transport, transportFactory, protocolFactory, ).Serve() }() logger.Log("fatal", <-errc) }
func TestHTTPClient(t *testing.T) { var ( testbody = "testbody" encode = func(context.Context, *http.Request, interface{}) error { return nil } decode = func(_ context.Context, r *http.Response) (interface{}, error) { buffer := make([]byte, len(testbody)) r.Body.Read(buffer) return TestResponse{r.Body, string(buffer)}, nil } headers = make(chan string, 1) headerKey = "X-Foo" headerVal = "abcde" afterHeaderKey = "X-The-Dude" afterHeaderVal = "Abides" afterVal = "" afterFunc = func(ctx context.Context, r *http.Response) context.Context { afterVal = r.Header.Get(afterHeaderKey) return ctx } ) server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { headers <- r.Header.Get(headerKey) w.Header().Set(afterHeaderKey, afterHeaderVal) w.WriteHeader(http.StatusOK) w.Write([]byte(testbody)) })) client := httptransport.NewClient( "GET", mustParse(server.URL), encode, decode, httptransport.ClientBefore(httptransport.SetRequestHeader(headerKey, headerVal)), httptransport.ClientAfter(afterFunc), ) res, err := client.Endpoint()(context.Background(), struct{}{}) if err != nil { t.Fatal(err) } var have string select { case have = <-headers: case <-time.After(time.Millisecond): t.Fatalf("timeout waiting for %s", headerKey) } // Check that Request Header was successfully received if want := headerVal; want != have { t.Errorf("want %q, have %q", want, have) } // Check that Response header set from server was received in SetClientAfter if want, have := afterVal, afterHeaderVal; want != have { t.Errorf("want %q, have %q", want, have) } // Check that the response was successfully decoded response, ok := res.(TestResponse) if !ok { t.Fatal("response should be TestResponse") } if want, have := testbody, response.String; want != have { t.Errorf("want %q, have %q", want, have) } // Check that response body was closed b := make([]byte, 1) _, err = response.Body.Read(b) if err == nil { t.Fatal("wanted error, got none") } if doNotWant, have := io.EOF, err; doNotWant == have { t.Errorf("do not want %q, have %q", doNotWant, have) } }