func TestClientWithMisbehavedServer(t *testing.T) { server, ct := setUp(t, 0, math.MaxUint32, misbehaved) callHdr := &CallHdr{ Host: "localhost", Method: "foo", } conn, ok := ct.(*http2Client) if !ok { t.Fatalf("Failed to convert %v to *http2Client", ct) } // Test the logic for the violation of stream flow control window size restriction. s, err := ct.NewStream(context.Background(), callHdr) if err != nil { t.Fatalf("Failed to open stream: %v", err) } if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { t.Fatalf("Failed to write: %v", err) } // Read without window update. for { p := make([]byte, http2MaxFrameLen) if _, err = s.dec.Read(p); err != nil { break } } if s.fc.pendingData != initialWindowSize || s.fc.pendingUpdate != 0 || conn.fc.pendingData != initialWindowSize || conn.fc.pendingUpdate != 0 { t.Fatalf("Client mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize, 0, initialWindowSize, 0) } if err != io.EOF || s.statusCode != codes.Internal { t.Fatalf("Got err %v and the status code %d, want <EOF> and the code %d", err, s.statusCode, codes.Internal) } conn.CloseStream(s, err) if s.fc.pendingData != 0 || s.fc.pendingUpdate != 0 || conn.fc.pendingData != 0 || conn.fc.pendingUpdate != initialWindowSize { t.Fatalf("Client mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize) } // Test the logic for the violation of the connection flow control window size restriction. // // Generate enough streams to drain the connection window. callHdr = &CallHdr{ Host: "localhost", Method: "foo.MaxFrame", } for i := 0; i < int(initialConnWindowSize/initialWindowSize+10); i++ { s, err := ct.NewStream(context.Background(), callHdr) if err != nil { t.Fatalf("Failed to open stream: %v", err) } if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { break } } // http2Client.errChan is closed due to connection flow control window size violation. <-conn.Error() ct.Close() server.stop() }
func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) { configKey := path.Join("/", d.cluster, "_config") ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) // find cluster size resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil) cancel() if err != nil { if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound { return nil, 0, 0, ErrSizeNotFound } if err == client.ErrInvalidJSON { return nil, 0, 0, ErrBadDiscoveryEndpoint } if ce, ok := err.(*client.ClusterError); ok { plog.Error(ce.Detail()) return d.checkClusterRetry() } return nil, 0, 0, err } size, err := strconv.Atoi(resp.Node.Value) if err != nil { return nil, 0, 0, ErrBadSizeKey } ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout) resp, err = d.c.Get(ctx, d.cluster, nil) cancel() if err != nil { if ce, ok := err.(*client.ClusterError); ok { plog.Error(ce.Detail()) return d.checkClusterRetry() } return nil, 0, 0, err } nodes := make([]*client.Node, 0) // append non-config keys to nodes for _, n := range resp.Node.Nodes { if !(path.Base(n.Key) == path.Base(configKey)) { nodes = append(nodes, n) } } snodes := sortableNodes{nodes} sort.Sort(snodes) // find self position for i := range nodes { if path.Base(nodes[i].Key) == path.Base(d.selfKey()) { break } if i >= size-1 { return nodes[:size], size, resp.Index, ErrFullCluster } } return nodes, size, resp.Index, nil }
func BenchmarkOneNode(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() n := newNode() s := NewMemoryStorage() r := newTestRaft(1, []uint64{1}, 10, 1, s) go n.run(r) defer n.Stop() n.Campaign(ctx) go func() { for i := 0; i < b.N; i++ { n.Propose(ctx, []byte("foo")) } }() for { rd := <-n.Ready() s.Append(rd.Entries) // a reasonable disk sync latency time.Sleep(1 * time.Millisecond) n.Advance() if rd.HardState.Commit == uint64(b.N+1) { return } } }
// deleteRangeCommandFunc executes the "delegeRange" command. func deleteRangeCommandFunc(c *cli.Context) { if len(c.Args()) == 0 { panic("bad arg") } var rangeEnd []byte key := []byte(c.Args()[0]) if len(c.Args()) > 1 { rangeEnd = []byte(c.Args()[1]) } conn, err := grpc.Dial(c.GlobalString("endpoint")) if err != nil { panic(err) } etcd := pb.NewEtcdClient(conn) req := &pb.DeleteRangeRequest{Key: key, RangeEnd: rangeEnd} etcd.DeleteRange(context.Background(), req) if rangeEnd != nil { fmt.Printf("range [%s, %s) is deleted\n", string(key), string(rangeEnd)) } else { fmt.Printf("key %s is deleted\n", string(key)) } }
// txnCommandFunc executes the "txn" command. func txnCommandFunc(c *cli.Context) { if len(c.Args()) != 0 { panic("unexpected args") } reader := bufio.NewReader(os.Stdin) next := compareState txn := &pb.TxnRequest{} for next != nil { next = next(txn, reader) } conn, err := grpc.Dial(c.GlobalString("endpoint")) if err != nil { panic(err) } etcd := pb.NewEtcdClient(conn) resp, err := etcd.Txn(context.Background(), txn) if err != nil { fmt.Println(err) } if resp.Succeeded { fmt.Println("executed success request list") } else { fmt.Println("executed failure request list") } }
func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(31415), }, } pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterFirstResponse done") }
func doClientStreaming(tc testpb.TestServiceClient) { stream, err := tc.StreamingInputCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } var sum int for _, s := range reqSizes { pl := newPayload(testpb.PayloadType_COMPRESSABLE, s) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } sum += s grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum) } reply, err := stream.CloseAndRecv() if err != nil { grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } grpclog.Println("ClientStreaming done") }
func (d *discovery) waitNodes(nodes []*client.Node, size int, index uint64) ([]*client.Node, error) { if len(nodes) > size { nodes = nodes[:size] } // watch from the next index w := d.c.Watcher(d.cluster, &client.WatcherOptions{AfterIndex: index, Recursive: true}) all := make([]*client.Node, len(nodes)) copy(all, nodes) for _, n := range all { if path.Base(n.Key) == path.Base(d.selfKey()) { plog.Noticef("found self %s in the cluster", path.Base(d.selfKey())) } else { plog.Noticef("found peer %s in the cluster", path.Base(n.Key)) } } // wait for others for len(all) < size { plog.Noticef("found %d peer(s), waiting for %d more", len(all), size-len(all)) resp, err := w.Next(context.Background()) if err != nil { if ce, ok := err.(*client.ClusterError); ok { plog.Error(ce.Detail()) return d.waitNodesRetry() } return nil, err } plog.Noticef("found peer %s in the cluster", path.Base(resp.Node.Key)) all = append(all, resp.Node) } plog.Noticef("found %d needed peer(s)", len(all)) return all, nil }
func (c *cluster) RemoveMember(t *testing.T, id uint64) { // send remove request to the cluster cc := mustNewHTTPClient(t, c.URLs()) ma := client.NewMembersAPI(cc) ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) if err := ma.Remove(ctx, types.ID(id).String()); err != nil { t.Fatalf("unexpected remove error %v", err) } cancel() newMembers := make([]*member, 0) for _, m := range c.Members { if uint64(m.s.ID()) != id { newMembers = append(newMembers, m) } else { select { case <-m.s.StopNotify(): m.Terminate(t) // 1s stop delay + election timeout + 1s disk and network delay + connection write timeout // TODO: remove connection write timeout by selecting on http response closeNotifier // blocking on https://github.com/golang/go/issues/9524 case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout): t.Fatalf("failed to remove member %s in time", m.s.ID()) } } } c.Members = newMembers c.waitMembersMatch(t, c.HTTPMembers()) }
func (d *discovery) createSelf(contents string) error { ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) resp, err := d.c.Create(ctx, d.selfKey(), contents) cancel() if err != nil { if eerr, ok := err.(client.Error); ok && eerr.Code == client.ErrorCodeNodeExist { return ErrDuplicateID } return err } // ensure self appears on the server we connected to w := d.c.Watcher(d.selfKey(), &client.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1}) _, err = w.Next(context.Background()) return err }
func TestHTTPClusterClientDoDeadlineExceedContext(t *testing.T) { fakeURL := url.URL{} tr := newFakeTransport() tr.finishCancel <- struct{}{} c := &httpClusterClient{ clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0), endpoints: []url.URL{fakeURL}, } errc := make(chan error) go func() { ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() _, _, err := c.Do(ctx, &fakeAction{}) errc <- err }() select { case err := <-errc: if err != context.DeadlineExceeded { t.Errorf("err = %+v, want %+v", err, context.DeadlineExceeded) } case <-time.After(time.Second): t.Fatalf("unexpected timeout when waitting for request to deadline exceed") } }
func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) { tr := newFakeTransport() c := &simpleHTTPClient{transport: tr} // create an already-cancelled context ctx, cancel := context.WithCancel(context.Background()) cancel() body := &checkableReadCloser{ReadCloser: ioutil.NopCloser(strings.NewReader("foo"))} go func() { // wait that simpleHTTPClient knows the context is already timed out, // and calls CancelRequest testutil.WaitSchedule() // response is returned before cancel effects tr.respchan <- &http.Response{Body: body} }() _, _, err := c.Do(ctx, &fakeAction{}) if err == nil { t.Fatalf("expected non-nil error, got nil") } if !body.closed { t.Fatalf("expected closed body") } }
func testClientStreaming(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) stream, err := tc.StreamingInputCall(context.Background()) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err) } var sum int for _, s := range reqSizes { pl := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err) } sum += s } reply, err := stream.CloseAndRecv() if err != nil { t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } }
func TestHTTPMembersAPIListSuccess(t *testing.T) { wantAction := &membersAPIActionList{} mAPI := &httpMembersAPI{ client: &actionAssertingHTTPClient{ t: t, act: wantAction, resp: http.Response{ StatusCode: http.StatusOK, }, body: []byte(`{"members":[{"id":"94088180e21eb87b","name":"node2","peerURLs":["http://127.0.0.1:7002"],"clientURLs":["http://127.0.0.1:4002"]}]}`), }, } wantResponseMembers := []Member{ { ID: "94088180e21eb87b", Name: "node2", PeerURLs: []string{"http://127.0.0.1:7002"}, ClientURLs: []string{"http://127.0.0.1:4002"}, }, } m, err := mAPI.List(context.Background()) if err != nil { t.Errorf("got non-nil err: %#v", err) } if !reflect.DeepEqual(wantResponseMembers, m) { t.Errorf("incorrect Members: want=%#v got=%#v", wantResponseMembers, m) } }
func TestHTTPMembersAPIListError(t *testing.T) { tests := []httpClient{ // generic httpClient failure &staticHTTPClient{err: errors.New("fail!")}, // unrecognized HTTP status code &staticHTTPClient{ resp: http.Response{StatusCode: http.StatusTeapot}, }, // fail to unmarshal body on StatusOK &staticHTTPClient{ resp: http.Response{ StatusCode: http.StatusOK, }, body: []byte(`[{"id":"XX`), }, } for i, tt := range tests { mAPI := &httpMembersAPI{client: tt} ms, err := mAPI.List(context.Background()) if err == nil { t.Errorf("#%d: got nil err", i) } if ms != nil { t.Errorf("#%d: got non-nil Member slice", i) } } }
func performOneRPC(ct ClientTransport) { callHdr := &CallHdr{ Host: "localhost", Method: "foo.Small", } s, err := ct.NewStream(context.Background(), callHdr) if err != nil { return } opts := Options{ Last: true, Delay: false, } if err := ct.Write(s, expectedRequest, &opts); err == nil { time.Sleep(5 * time.Millisecond) // The following s.Recv()'s could error out because the // underlying transport is gone. // // Read response p := make([]byte, len(expectedResponse)) io.ReadFull(s, p) // Read io.EOF io.ReadFull(s, p) } }
func TestHTTPMembersAPIAddSuccess(t *testing.T) { wantAction := &membersAPIActionAdd{ peerURLs: types.URLs([]url.URL{ {Scheme: "http", Host: "127.0.0.1:7002"}, }), } mAPI := &httpMembersAPI{ client: &actionAssertingHTTPClient{ t: t, act: wantAction, resp: http.Response{ StatusCode: http.StatusCreated, }, body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"]}`), }, } wantResponseMember := &Member{ ID: "94088180e21eb87b", PeerURLs: []string{"http://127.0.0.1:7002"}, } m, err := mAPI.Add(context.Background(), "http://127.0.0.1:7002") if err != nil { t.Errorf("got non-nil err: %#v", err) } if !reflect.DeepEqual(wantResponseMember, m) { t.Errorf("incorrect Member: want=%#v got=%#v", wantResponseMember, m) } }
func (c *cluster) addMember(t *testing.T, usePeerTLS bool) { m := mustNewMember(t, c.name(rand.Int()), usePeerTLS) scheme := "http" if usePeerTLS { scheme = "https" } // send add request to the cluster cc := mustNewHTTPClient(t, []string{c.URL(0)}) ma := client.NewMembersAPI(cc) ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) peerURL := scheme + "://" + m.PeerListeners[0].Addr().String() if _, err := ma.Add(ctx, peerURL); err != nil { t.Fatalf("add member on %s error: %v", c.URL(0), err) } cancel() // wait for the add node entry applied in the cluster members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}}) c.waitMembersMatch(t, members) m.InitialPeerURLsMap = types.URLsMap{} for _, mm := range c.Members { m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs } m.InitialPeerURLsMap[m.Name] = m.PeerURLs m.NewCluster = false if err := m.Launch(); err != nil { t.Fatal(err) } c.Members = append(c.Members, m) // wait cluster to be stable to receive future client requests c.waitMembersMatch(t, c.HTTPMembers()) }
// Ensure etcd will not panic when removing a just started member. func TestIssue2904(t *testing.T) { defer afterTest(t) // start 1-member cluster to ensure member 0 is the leader of the cluster. c := NewCluster(t, 1) c.Launch(t) defer c.Terminate(t) c.AddMember(t) c.Members[1].Stop(t) // send remove member-1 request to the cluster. cc := mustNewHTTPClient(t, c.URLs()) ma := client.NewMembersAPI(cc) ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) // the proposal is not committed because member 1 is stopped, but the // proposal is appended to leader's raft log. ma.Remove(ctx, c.Members[1].s.ID().String()) cancel() // restart member, and expect it to send updateAttr request. // the log in the leader is like this: // [..., remove 1, ..., update attr 1, ...] c.Members[1].Restart(t) // when the member comes back, it ack the proposal to remove itself, // and apply it. <-c.Members[1].s.StopNotify() // terminate removed member c.Members[1].Terminate(t) c.Members = c.Members[:1] // wait member to be removed. c.waitMembersMatch(t, c.HTTPMembers()) }
func TestLargeMessage(t *testing.T) { server, ct := setUp(t, 0, math.MaxUint32, normal) callHdr := &CallHdr{ Host: "localhost", Method: "foo.Large", } var wg sync.WaitGroup for i := 0; i < 2; i++ { wg.Add(1) go func() { s, err := ct.NewStream(context.Background(), callHdr) if err != nil { t.Fatalf("failed to open stream: %v", err) } if err := ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil { t.Fatalf("failed to send data: %v", err) } p := make([]byte, len(expectedResponseLarge)) _, recvErr := io.ReadFull(s, p) if recvErr != nil || !bytes.Equal(p, expectedResponseLarge) { t.Fatalf("Error: %v, want <nil>; Result len: %d, want len %d", recvErr, len(p), len(expectedResponseLarge)) } _, recvErr = io.ReadFull(s, p) if recvErr != io.EOF { t.Fatalf("Error: %v; want <EOF>", recvErr) } wg.Done() }() } wg.Wait() ct.Close() server.stop() }
func TestHTTPClusterClientSyncFail(t *testing.T) { cf := newStaticHTTPClientFactory([]staticHTTPResponse{ {err: errors.New("fail!")}, }) hc := &httpClusterClient{ clientFactory: cf, rand: rand.New(rand.NewSource(0)), } err := hc.reset([]string{"http://127.0.0.1:2379"}) if err != nil { t.Fatalf("unexpected error during setup: %#v", err) } want := []string{"http://127.0.0.1:2379"} got := hc.Endpoints() if !reflect.DeepEqual(want, got) { t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got) } err = hc.Sync(context.Background()) if err == nil { t.Fatalf("got nil error during Sync") } got = hc.Endpoints() if !reflect.DeepEqual(want, got) { t.Fatalf("incorrect endpoints after failed Sync: want=%#v got=%#v", want, got) } }
func TestSimpleHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) { tr := newFakeTransport() c := &simpleHTTPClient{transport: tr} donechan := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) go func() { c.Do(ctx, &fakeAction{}) close(donechan) }() // This should call CancelRequest and begin the cancellation process cancel() select { case <-donechan: t.Fatalf("simpleHTTPClient.Do should not have exited yet") default: } tr.finishCancel <- struct{}{} select { case <-donechan: //expected behavior return case <-time.After(time.Second): t.Fatalf("simpleHTTPClient.Do did not exit within 1s") } }
func (s *store) ensureAuthDirectories() error { if s.ensuredOnce { return nil } for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} { ctx, cancel := context.WithTimeout(context.Background(), s.timeout) defer cancel() pe := false rr := etcdserverpb.Request{ Method: "PUT", Path: res, Dir: true, PrevExist: &pe, } _, err := s.server.Do(ctx, rr) if err != nil { if e, ok := err.(*etcderr.Error); ok { if e.ErrorCode == etcderr.EcodeNodeExist { continue } } plog.Errorf("failed to create auth directories in the store (%v)", err) return err } } ctx, cancel := context.WithTimeout(context.Background(), s.timeout) defer cancel() pe := false rr := etcdserverpb.Request{ Method: "PUT", Path: StorePermsPrefix + "/enabled", Val: "false", PrevExist: &pe, } _, err := s.server.Do(ctx, rr) if err != nil { if e, ok := err.(*etcderr.Error); ok { if e.ErrorCode == etcderr.EcodeNodeExist { s.ensuredOnce = true return nil } } return err } s.ensuredOnce = true return nil }
func TestExceedMaxStreamsLimit(t *testing.T) { server, ct := setUp(t, 0, 1, normal) defer func() { ct.Close() server.stop() }() callHdr := &CallHdr{ Host: "localhost", Method: "foo.Small", } // Creates the 1st stream and keep it alive. _, err1 := ct.NewStream(context.Background(), callHdr) if err1 != nil { t.Fatalf("failed to open stream: %v", err1) } // Creates the 2nd stream. It has chance to succeed when the settings // frame from the server has not received at the client. s, err2 := ct.NewStream(context.Background(), callHdr) if err2 != nil { se, ok := err2.(StreamError) if !ok { t.Fatalf("Received unexpected error %v", err2) } if se.Code != codes.Unavailable { t.Fatalf("Got error code: %d, want: %d", se.Code, codes.Unavailable) } return } // If the 2nd stream is created successfully, sends the request. if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { t.Fatalf("failed to send data: %v", err) } // The 2nd stream was rejected by the server via a reset. p := make([]byte, len(expectedResponse)) _, recvErr := io.ReadFull(s, p) if recvErr != io.EOF || s.StatusCode() != codes.Unavailable { t.Fatalf("Error: %v, StatusCode: %d; want <EOF>, %d", recvErr, s.StatusCode(), codes.Unavailable) } // Server's setting has been received. From now on, new stream will be rejected instantly. _, err3 := ct.NewStream(context.Background(), callHdr) if err3 == nil { t.Fatalf("Received unexpected <nil>, want an error with code %d", codes.Unavailable) } if se, ok := err3.(StreamError); !ok || se.Code != codes.Unavailable { t.Fatalf("Got: %v, want a StreamError with error code %d", err3, codes.Unavailable) } }
func TestStreamContext(t *testing.T) { expectedStream := Stream{} ctx := newContextWithStream(context.Background(), &expectedStream) s, ok := StreamFromContext(ctx) if !ok || !reflect.DeepEqual(expectedStream, *s) { t.Fatalf("GetStreamFromContext(%v) = %v, %t, want: %v, true", ctx, *s, ok, expectedStream) } }
func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") { return } w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) ctx, cancel := context.WithTimeout(context.Background(), h.timeout) defer cancel() clock := clockwork.NewRealClock() startTime := clock.Now() rr, err := parseKeyRequest(r, clock) if err != nil { writeKeyError(w, err) return } // The path must be valid at this point (we've parsed the request successfully). if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive) { writeKeyNoAuth(w) return } if !rr.Wait { reportRequestReceived(rr) } resp, err := h.server.Do(ctx, rr) if err != nil { err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix) writeKeyError(w, err) reportRequestFailed(rr, err) return } switch { case resp.Event != nil: if err := writeKeyEvent(w, resp.Event, h.timer); err != nil { // Should never be reached plog.Errorf("error writing event (%v)", err) } reportRequestCompleted(rr, resp, startTime) case resp.Watcher != nil: ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout) defer cancel() handleKeyWatch(ctx, w, resp.Watcher, rr.Stream, h.timer) default: writeKeyError(w, errors.New("received response with no Event/Watcher!")) } }
// printFeature gets the feature for the given point. func printFeature(client pb.RouteGuideClient, point *pb.Point) { grpclog.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) feature, err := client.GetFeature(context.Background(), point) if err != nil { grpclog.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) } grpclog.Println(feature) }
func testFailedEmptyUnary(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) ctx := metadata.NewContext(context.Background(), testMetadata) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != grpc.Errorf(codes.DataLoss, "got extra metadata") { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, grpc.Errorf(codes.DataLoss, "got extra metadata")) } }
func testEmptyUnary(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) if err != nil || !proto.Equal(&testpb.Empty{}, reply) { t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{}) } }
func doEmptyUnaryCall(tc testpb.TestServiceClient) { reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}) if err != nil { grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err) } if !proto.Equal(&testpb.Empty{}, reply) { grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) } grpclog.Println("EmptyUnaryCall done") }