func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") { return } w.Header().Set("X-Etcd-Cluster-ID", h.clusterInfo.ID().String()) ctx, cancel := context.WithTimeout(context.Background(), h.timeout) defer cancel() rr, err := parseKeyRequest(r, clockwork.NewRealClock()) if err != nil { writeError(w, err) return } resp, err := h.server.Do(ctx, rr) if err != nil { err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix) writeError(w, err) return } switch { case resp.Event != nil: if err := writeKeyEvent(w, resp.Event, h.timer); err != nil { // Should never be reached log.Printf("error writing event: %v", err) } case resp.Watcher != nil: ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout) defer cancel() handleKeyWatch(ctx, w, resp.Watcher, rr.Stream, h.timer) default: writeError(w, errors.New("received response with no Event/Watcher!")) } }
func TestClientWithMisbehavedServer(t *testing.T) { server, ct := setUp(t, 0, math.MaxUint32, misbehaved) callHdr := &CallHdr{ Host: "localhost", Method: "foo", } conn, ok := ct.(*http2Client) if !ok { t.Fatalf("Failed to convert %v to *http2Client", ct) } // Test the logic for the violation of stream flow control window size restriction. s, err := ct.NewStream(context.Background(), callHdr) if err != nil { t.Fatalf("Failed to open stream: %v", err) } if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { t.Fatalf("Failed to write: %v", err) } // Read without window update. for { p := make([]byte, http2MaxFrameLen) if _, err = s.dec.Read(p); err != nil { break } } if s.fc.pendingData != initialWindowSize || s.fc.pendingUpdate != 0 || conn.fc.pendingData != initialWindowSize || conn.fc.pendingUpdate != 0 { t.Fatalf("Client mistakenly updates inbound flow control params: got %d, %d, %d, %d; want %d, %d, %d, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize, 0, initialWindowSize, 0) } if err != io.EOF || s.statusCode != codes.Internal { t.Fatalf("Got err %v and the status code %d, want <EOF> and the code %d", err, s.statusCode, codes.Internal) } conn.CloseStream(s, err) if s.fc.pendingData != 0 || s.fc.pendingUpdate != 0 || conn.fc.pendingData != 0 || conn.fc.pendingUpdate != initialWindowSize { t.Fatalf("Client mistakenly resets inbound flow control params: got %d, %d, %d, %d; want 0, 0, 0, %d", s.fc.pendingData, s.fc.pendingUpdate, conn.fc.pendingData, conn.fc.pendingUpdate, initialWindowSize) } // Test the logic for the violation of the connection flow control window size restriction. // // Generate enough streams to drain the connection window. callHdr = &CallHdr{ Host: "localhost", Method: "foo.MaxFrame", } for i := 0; i < int(initialConnWindowSize/initialWindowSize+10); i++ { s, err := ct.NewStream(context.Background(), callHdr) if err != nil { break } if err := ct.Write(s, expectedRequest, &Options{Last: true, Delay: false}); err != nil { break } } // http2Client.errChan is closed due to connection flow control window size violation. <-conn.Error() ct.Close() server.stop() }
func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) { configKey := path.Join("/", d.cluster, "_config") ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) // find cluster size resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil) cancel() if err != nil { if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound { return nil, 0, 0, ErrSizeNotFound } if err == client.ErrInvalidJSON { return nil, 0, 0, ErrBadDiscoveryEndpoint } if ce, ok := err.(*client.ClusterError); ok { plog.Error(ce.Detail()) return d.checkClusterRetry() } return nil, 0, 0, err } size, err := strconv.Atoi(resp.Node.Value) if err != nil { return nil, 0, 0, ErrBadSizeKey } ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout) resp, err = d.c.Get(ctx, d.cluster, nil) cancel() if err != nil { if ce, ok := err.(*client.ClusterError); ok { plog.Error(ce.Detail()) return d.checkClusterRetry() } return nil, 0, 0, err } nodes := make([]*client.Node, 0) // append non-config keys to nodes for _, n := range resp.Node.Nodes { if !(path.Base(n.Key) == path.Base(configKey)) { nodes = append(nodes, n) } } snodes := sortableNodes{nodes} sort.Sort(snodes) // find self position for i := range nodes { if path.Base(nodes[i].Key) == path.Base(d.selfKey()) { break } if i >= size-1 { return nodes[:size], size, resp.Index, ErrFullCluster } } return nodes, size, resp.Index, nil }
func runStream(b *testing.B, maxConcurrentCalls int) { s := stats.AddStats(b, 38) b.StopTimer() target, stopper := StartServer("localhost:0") defer stopper() conn := NewClientConn(target) tc := testpb.NewTestServiceClient(conn) // Warm up connection. stream, err := tc.StreamingCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) } for i := 0; i < 10; i++ { streamCaller(tc, stream) } ch := make(chan int, maxConcurrentCalls*4) var ( mu sync.Mutex wg sync.WaitGroup ) wg.Add(maxConcurrentCalls) // Distribute the b.N calls over maxConcurrentCalls workers. for i := 0; i < maxConcurrentCalls; i++ { go func() { stream, err := tc.StreamingCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) } for _ = range ch { start := time.Now() streamCaller(tc, stream) elapse := time.Since(start) mu.Lock() s.Add(elapse) mu.Unlock() } wg.Done() }() } b.StartTimer() for i := 0; i < b.N; i++ { ch <- i } b.StopTimer() close(ch) wg.Wait() conn.Close() }
func TestHTTPKeysAPIGetResponse(t *testing.T) { client := &staticHTTPClient{ resp: http.Response{ StatusCode: http.StatusOK, Header: http.Header{"X-Etcd-Index": []string{"42"}}, }, body: []byte(`{"action":"get","node":{"key":"/pants/foo/bar","modifiedIndex":25,"createdIndex":19,"nodes":[{"key":"/pants/foo/bar/baz","value":"snarf","createdIndex":21,"modifiedIndex":25}]}}`), } wantResponse := &Response{ Action: "get", Node: &Node{ Key: "/pants/foo/bar", Nodes: []*Node{ &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: 21, ModifiedIndex: 25}, }, CreatedIndex: uint64(19), ModifiedIndex: uint64(25), }, Index: uint64(42), } kAPI := &httpKeysAPI{client: client, prefix: "/pants"} resp, err := kAPI.Get(context.Background(), "/foo/bar", &GetOptions{Recursive: true}) if err != nil { t.Errorf("non-nil error: %#v", err) } if !reflect.DeepEqual(wantResponse, resp) { t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) } }
func TestNodeAdvance(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() storage := NewMemoryStorage() c := &Config{ ID: 1, ElectionTick: 10, HeartbeatTick: 1, Storage: storage, MaxSizePerMsg: noLimit, MaxInflightMsgs: 256, } n := StartNode(c, []Peer{{ID: 1}}) n.Campaign(ctx) <-n.Ready() n.Propose(ctx, []byte("foo")) var rd Ready select { case rd = <-n.Ready(): t.Fatalf("unexpected Ready before Advance: %+v", rd) case <-time.After(time.Millisecond): } storage.Append(rd.Entries) n.Advance() select { case <-n.Ready(): case <-time.After(time.Millisecond): t.Errorf("expect Ready after Advance, but there is no Ready available") } }
func TestLargeMessage(t *testing.T) { server, ct := setUp(t, 0, math.MaxUint32, normal) callHdr := &CallHdr{ Host: "localhost", Method: "foo.Large", } var wg sync.WaitGroup for i := 0; i < 2; i++ { wg.Add(1) go func() { defer wg.Done() s, err := ct.NewStream(context.Background(), callHdr) if err != nil { t.Errorf("failed to open stream: %v", err) } if err := ct.Write(s, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil { t.Errorf("failed to send data: %v", err) } p := make([]byte, len(expectedResponseLarge)) _, recvErr := io.ReadFull(s, p) if recvErr != nil || !bytes.Equal(p, expectedResponseLarge) { t.Errorf("Error: %v, want <nil>; Result len: %d, want len %d", recvErr, len(p), len(expectedResponseLarge)) } _, recvErr = io.ReadFull(s, p) if recvErr != io.EOF { t.Errorf("Error: %v; want <EOF>", recvErr) } }() } wg.Wait() ct.Close() server.stop() }
func (d *discovery) waitNodes(nodes []*client.Node, size int, index uint64) ([]*client.Node, error) { if len(nodes) > size { nodes = nodes[:size] } // watch from the next index w := d.c.Watcher(d.cluster, &client.WatcherOptions{AfterIndex: index, Recursive: true}) all := make([]*client.Node, len(nodes)) copy(all, nodes) for _, n := range all { if path.Base(n.Key) == path.Base(d.selfKey()) { plog.Noticef("found self %s in the cluster", path.Base(d.selfKey())) } else { plog.Noticef("found peer %s in the cluster", path.Base(n.Key)) } } // wait for others for len(all) < size { plog.Noticef("found %d peer(s), waiting for %d more", len(all), size-len(all)) resp, err := w.Next(context.Background()) if err != nil { if ce, ok := err.(*client.ClusterError); ok { plog.Error(ce.Detail()) return d.waitNodesRetry() } return nil, err } plog.Noticef("found peer %s in the cluster", path.Base(resp.Node.Key)) all = append(all, resp.Node) } plog.Noticef("found %d needed peer(s)", len(all)) return all, nil }
func testExceedMaxStreamsLimit(t *testing.T, e env) { // Only allows 1 live stream per server transport. s, cc := setUp(nil, 1, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) done := make(chan struct{}) ch := make(chan int) go func() { for { select { case <-time.After(5 * time.Millisecond): ch <- 0 case <-time.After(5 * time.Second): close(done) return } } }() // Loop until a stream creation hangs due to the new max stream setting. for { select { case <-ch: ctx, _ := context.WithTimeout(context.Background(), time.Second) if _, err := tc.StreamingInputCall(ctx); err != nil { if grpc.Code(err) == codes.DeadlineExceeded { return } t.Fatalf("%v.StreamingInputCall(_) = %v, want <nil>", tc, err) } case <-done: t.Fatalf("Client has not received the max stream setting in 5 seconds.") } } }
func testClientStreaming(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) stream, err := tc.StreamingInputCall(context.Background()) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want <nil>", tc, err) } var sum int for _, s := range reqSizes { pl := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want <nil>", stream, req, err) } sum += s } reply, err := stream.CloseAndRecv() if err != nil { t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } }
func testEmptyUnaryWithUserAgent(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, testAppUA, e) // Wait until cc is connected. if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) } if ok := cc.WaitForStateChange(10*time.Second, grpc.Connecting); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) } if cc.State() != grpc.Ready { t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) } if ok := cc.WaitForStateChange(time.Second, grpc.Ready); ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) } tc := testpb.NewTestServiceClient(cc) var header metadata.MD reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header)) if err != nil || !proto.Equal(&testpb.Empty{}, reply) { t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, <nil>", reply, err, &testpb.Empty{}) } if v, ok := header["ua"]; !ok || v[0] != testAppUA { t.Fatalf("header[\"ua\"] = %q, %t, want %q, true", v, ok, testAppUA) } tearDown(s, cc) if ok := cc.WaitForStateChange(5*time.Second, grpc.Ready); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) } if cc.State() != grpc.Shutdown { t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Shutdown) } }
func testTimeoutOnDeadServer(t *testing.T, e env) { s, cc := setUp(nil, math.MaxUint32, "", e) tc := testpb.NewTestServiceClient(cc) if ok := cc.WaitForStateChange(time.Second, grpc.Idle); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Idle, ok) } if ok := cc.WaitForStateChange(time.Second, grpc.Connecting); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Connecting, ok) } if cc.State() != grpc.Ready { t.Fatalf("cc.State() = %s, want %s", cc.State(), grpc.Ready) } if ok := cc.WaitForStateChange(time.Millisecond, grpc.Ready); ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want false", grpc.Ready, ok) } s.Stop() // Set -1 as the timeout to make sure if transportMonitor gets error // notification in time the failure path of the 1st invoke of // ClientConn.wait hits the deadline exceeded error. ctx, _ := context.WithTimeout(context.Background(), -1) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); grpc.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(%v, _) = _, error %v, want _, error code: %d", ctx, err, codes.DeadlineExceeded) } if ok := cc.WaitForStateChange(time.Second, grpc.Ready); !ok { t.Fatalf("cc.WaitForStateChange(_, %s) = %t, want true", grpc.Ready, ok) } state := cc.State() if state != grpc.Connecting && state != grpc.TransientFailure { t.Fatalf("cc.State() = %s, want %s or %s", state, grpc.Connecting, grpc.TransientFailure) } cc.Close() }
func TestHTTPMembersAPIListError(t *testing.T) { tests := []httpClient{ // generic httpClient failure &staticHTTPClient{err: errors.New("fail!")}, // unrecognized HTTP status code &staticHTTPClient{ resp: http.Response{StatusCode: http.StatusTeapot}, }, // fail to unmarshal body on StatusOK &staticHTTPClient{ resp: http.Response{ StatusCode: http.StatusOK, }, body: []byte(`[{"id":"XX`), }, } for i, tt := range tests { mAPI := &httpMembersAPI{client: tt} ms, err := mAPI.List(context.Background()) if err == nil { t.Errorf("#%d: got nil err", i) } if ms != nil { t.Errorf("#%d: got non-nil Member slice", i) } } }
func TestHTTPMembersAPIListSuccess(t *testing.T) { wantAction := &membersAPIActionList{} mAPI := &httpMembersAPI{ client: &actionAssertingHTTPClient{ t: t, act: wantAction, resp: http.Response{ StatusCode: http.StatusOK, }, body: []byte(`{"members":[{"id":"94088180e21eb87b","name":"node2","peerURLs":["http://127.0.0.1:7002"],"clientURLs":["http://127.0.0.1:4002"]}]}`), }, } wantResponseMembers := []Member{ Member{ ID: "94088180e21eb87b", Name: "node2", PeerURLs: []string{"http://127.0.0.1:7002"}, ClientURLs: []string{"http://127.0.0.1:4002"}, }, } m, err := mAPI.List(context.Background()) if err != nil { t.Errorf("got non-nil err: %#v", err) } if !reflect.DeepEqual(wantResponseMembers, m) { t.Errorf("incorrect Members: want=%#v got=%#v", wantResponseMembers, m) } }
func BenchmarkOneNode(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() n := newNode() s := NewMemoryStorage() r := newTestRaft(1, []uint64{1}, 10, 1, s) go n.run(r) defer n.Stop() n.Campaign(ctx) go func() { for i := 0; i < b.N; i++ { n.Propose(ctx, []byte("foo")) } }() for { rd := <-n.Ready() s.Append(rd.Entries) // a reasonable disk sync latency time.Sleep(1 * time.Millisecond) n.Advance() if rd.HardState.Commit == uint64(b.N+1) { return } } }
func TestHTTPKeysAPIDeleteResponse(t *testing.T) { client := &staticHTTPClient{ resp: http.Response{ StatusCode: http.StatusOK, Header: http.Header{"X-Etcd-Index": []string{"22"}}, }, body: []byte(`{"action":"delete","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":22,"createdIndex":19},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`), } wantResponse := &Response{ Action: "delete", Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(19), ModifiedIndex: uint64(22)}, PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)}, Index: uint64(22), } kAPI := &httpKeysAPI{client: client, prefix: "/pants"} resp, err := kAPI.Delete(context.Background(), "/foo/bar/baz", nil) if err != nil { t.Errorf("non-nil error: %#v", err) } if !reflect.DeepEqual(wantResponse, resp) { t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp) } }
func (d *discovery) createSelf(contents string) error { ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) resp, err := d.c.Create(ctx, d.selfKey(), contents, -1) cancel() if err != nil { if err == client.ErrKeyExists { return ErrDuplicateID } return err } // ensure self appears on the server we connected to w := d.c.Watch(d.selfKey(), resp.Node.CreatedIndex) _, err = w.Next(context.Background()) return err }
func TestHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) { tr := newFakeTransport() c := &httpClient{transport: tr} donechan := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) go func() { c.Do(ctx, &fakeAction{}) close(donechan) }() // This should call CancelRequest and begin the cancellation process cancel() select { case <-donechan: t.Fatalf("httpClient.do should not have exited yet") default: } tr.finishCancel <- struct{}{} select { case <-donechan: //expected behavior return case <-time.After(time.Second): t.Fatalf("httpClient.do did not exit within 1s") } }
func (d *discovery) waitNodes(nodes client.Nodes, size int, index uint64) (client.Nodes, error) { if len(nodes) > size { nodes = nodes[:size] } // watch from the next index w := d.c.RecursiveWatch(d.cluster, index+1) all := make(client.Nodes, len(nodes)) copy(all, nodes) for _, n := range all { if path.Base(n.Key) == path.Base(d.selfKey()) { log.Printf("discovery: found self %s in the cluster", path.Base(d.selfKey())) } else { log.Printf("discovery: found peer %s in the cluster", path.Base(n.Key)) } } // wait for others for len(all) < size { log.Printf("discovery: found %d peer(s), waiting for %d more", len(all), size-len(all)) resp, err := w.Next(context.Background()) if err != nil { if err == client.ErrTimeout { return d.waitNodesRetry() } return nil, err } log.Printf("discovery: found peer %s in the cluster", path.Base(resp.Node.Key)) all = append(all, resp.Node) } log.Printf("discovery: found %d needed peer(s)", len(all)) return all, nil }
// publish registers server information into the cluster. The information // is the JSON representation of this server's member struct, updated with the // static clientURLs of the server. // The function keeps attempting to register until it succeeds, // or its server is stopped. func (s *EtcdServer) publish(retryInterval time.Duration) { b, err := json.Marshal(s.attributes) if err != nil { plog.Panicf("json marshal error: %v", err) return } req := pb.Request{ Method: "PUT", Path: MemberAttributesStorePath(s.id), Val: string(b), } for { ctx, cancel := context.WithTimeout(context.Background(), retryInterval) _, err := s.Do(ctx, req) cancel() switch err { case nil: plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID()) return case ErrStopped: plog.Infof("aborting publish because server is stopped") return default: plog.Errorf("publish error: %v", err) } } }
func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) { tr := newFakeTransport() c := &simpleHTTPClient{transport: tr} // create an already-cancelled context ctx, cancel := context.WithCancel(context.Background()) cancel() body := &checkableReadCloser{ReadCloser: ioutil.NopCloser(strings.NewReader("foo"))} go func() { // wait that simpleHTTPClient knows the context is already timed out, // and calls CancelRequest testutil.WaitSchedule() // response is returned before cancel effects tr.respchan <- &http.Response{Body: body} }() _, _, err := c.Do(ctx, &fakeAction{}) if err == nil { t.Fatalf("expected non-nil error, got nil") } if !body.closed { t.Fatalf("expected closed body") } }
func (d *discovery) createSelf(contents string) error { ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) resp, err := d.c.Create(ctx, d.selfKey(), contents) cancel() if err != nil { if eerr, ok := err.(client.Error); ok && eerr.Code == client.ErrorCodeNodeExist { return ErrDuplicateID } return err } // ensure self appears on the server we connected to w := d.c.Watcher(d.selfKey(), &client.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1}) _, err = w.Next(context.Background()) return err }
func TestHTTPClusterClientSyncFail(t *testing.T) { cf := newStaticHTTPClientFactory([]staticHTTPResponse{ staticHTTPResponse{err: errors.New("fail!")}, }) hc := &httpClusterClient{clientFactory: cf} err := hc.reset([]string{"http://127.0.0.1:2379"}) if err != nil { t.Fatalf("unexpected error during setup: %#v", err) } want := []string{"http://127.0.0.1:2379"} got := hc.Endpoints() if !reflect.DeepEqual(want, got) { t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got) } err = hc.Sync(context.Background()) if err == nil { t.Fatalf("got nil error during Sync") } got = hc.Endpoints() if !reflect.DeepEqual(want, got) { t.Fatalf("incorrect endpoints after failed Sync: want=%#v got=%#v", want, got) } }
func performOneRPC(ct ClientTransport) { callHdr := &CallHdr{ Host: "localhost", Method: "foo.Small", } s, err := ct.NewStream(context.Background(), callHdr) if err != nil { return } opts := Options{ Last: true, Delay: false, } if err := ct.Write(s, expectedRequest, &opts); err == nil { time.Sleep(5 * time.Millisecond) // The following s.Recv()'s could error out because the // underlying transport is gone. // // Read response p := make([]byte, len(expectedResponse)) io.ReadFull(s, p) // Read io.EOF io.ReadFull(s, p) } }
func TestHTTPKeysAPIDeleteError(t *testing.T) { tests := []httpClient{ // generic HTTP client failure &staticHTTPClient{ err: errors.New("fail!"), }, // unusable status code &staticHTTPClient{ resp: http.Response{ StatusCode: http.StatusTeapot, }, }, // etcd Error response &staticHTTPClient{ resp: http.Response{ StatusCode: http.StatusInternalServerError, }, body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`), }, } for i, tt := range tests { kAPI := httpKeysAPI{client: tt} resp, err := kAPI.Delete(context.Background(), "/foo", nil) if err == nil { t.Errorf("#%d: received nil error", i) } if resp != nil { t.Errorf("#%d: received non-nil Response: %#v", i, resp) } } }
func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(31415), }, } pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterFirstResponse done") }
func TestHTTPMembersAPIAddSuccess(t *testing.T) { wantAction := &membersAPIActionAdd{ peerURLs: types.URLs([]url.URL{ url.URL{Scheme: "http", Host: "127.0.0.1:7002"}, }), } mAPI := &httpMembersAPI{ client: &actionAssertingHTTPClient{ t: t, act: wantAction, resp: http.Response{ StatusCode: http.StatusCreated, }, body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"]}`), }, } wantResponseMember := &Member{ ID: "94088180e21eb87b", PeerURLs: []string{"http://127.0.0.1:7002"}, } m, err := mAPI.Add(context.Background(), "http://127.0.0.1:7002") if err != nil { t.Errorf("got non-nil err: %#v", err) } if !reflect.DeepEqual(wantResponseMember, m) { t.Errorf("incorrect Member: want=%#v got=%#v", wantResponseMember, m) } }
func doClientStreaming(tc testpb.TestServiceClient) { stream, err := tc.StreamingInputCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } var sum int for _, s := range reqSizes { pl := newPayload(testpb.PayloadType_COMPRESSABLE, s) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } sum += s grpclog.Printf("Sent a request of size %d, aggregated size %d", s, sum) } reply, err := stream.CloseAndRecv() if err != nil { grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } grpclog.Println("ClientStreaming done") }
func doPerRPCCreds(tc testpb.TestServiceClient) { jsonKey := getServiceAccountJSONKey() pl := newPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(largeRespSize)), Payload: pl, FillUsername: proto.Bool(true), FillOauthScope: proto.Bool(true), } token := getToken() kv := map[string]string{"authorization": token.TokenType + " " + token.AccessToken} ctx := metadata.NewContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) reply, err := tc.UnaryCall(ctx, req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } user := reply.GetUsername() scope := reply.GetOauthScope() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } if !strings.Contains(*oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, *oauthScope) } grpclog.Println("PerRPCCreds done") }
func (s *store) ensureAuthDirectories() error { if s.ensuredOnce { return nil } for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} { ctx, cancel := context.WithTimeout(context.Background(), s.timeout) defer cancel() pe := false rr := etcdserverpb.Request{ Method: "PUT", Path: res, Dir: true, PrevExist: &pe, } _, err := s.server.Do(ctx, rr) if err != nil { if e, ok := err.(*etcderr.Error); ok { if e.ErrorCode == etcderr.EcodeNodeExist { continue } } plog.Errorf("failed to create auth directories in the store (%v)", err) return err } } ctx, cancel := context.WithTimeout(context.Background(), s.timeout) defer cancel() pe := false rr := etcdserverpb.Request{ Method: "PUT", Path: StorePermsPrefix + "/enabled", Val: "false", PrevExist: &pe, } _, err := s.server.Do(ctx, rr) if err != nil { if e, ok := err.(*etcderr.Error); ok { if e.ErrorCode == etcderr.EcodeNodeExist { s.ensuredOnce = true return nil } } return err } s.ensuredOnce = true return nil }