func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) { tr := newFakeTransport() c := &simpleHTTPClient{transport: tr} // create an already-cancelled context ctx, cancel := context.WithCancel(context.Background()) cancel() body := &checkableReadCloser{ReadCloser: ioutil.NopCloser(strings.NewReader("foo"))} go func() { // wait that simpleHTTPClient knows the context is already timed out, // and calls CancelRequest testutil.WaitSchedule() // response is returned before cancel effects tr.respchan <- &http.Response{Body: body} }() _, _, err := c.Do(ctx, &fakeAction{}) if err == nil { t.Fatalf("expected non-nil error, got nil") } if !body.closed { t.Fatalf("expected closed body") } }
func doCancelAfterFirstResponse(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: proto.Int32(31415), }, } pl := newPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, req, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterFirstResponse done") }
func TestSimpleHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) { tr := newFakeTransport() c := &simpleHTTPClient{transport: tr} donechan := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) go func() { c.Do(ctx, &fakeAction{}) close(donechan) }() // This should call CancelRequest and begin the cancellation process cancel() select { case <-donechan: t.Fatalf("simpleHTTPClient.Do should not have exited yet") default: } tr.finishCancel <- struct{}{} select { case <-donechan: //expected behavior return case <-time.After(time.Second): t.Fatalf("simpleHTTPClient.Do did not exit within 1s") } }
func BenchmarkOneNode(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() n := newNode() s := NewMemoryStorage() r := newTestRaft(1, []uint64{1}, 10, 1, s) go n.run(r) defer n.Stop() n.Campaign(ctx) go func() { for i := 0; i < b.N; i++ { n.Propose(ctx, []byte("foo")) } }() for { rd := <-n.Ready() s.Append(rd.Entries) // a reasonable disk sync latency time.Sleep(1 * time.Millisecond) n.Advance() if rd.HardState.Commit == uint64(b.N+1) { return } } }
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { fc := &inFlow{ limit: initialWindowSize, conn: t.fc, } // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ id: t.nextID, method: callHdr.Method, buf: newRecvBuffer(), fc: fc, sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), headerChan: make(chan struct{}), } t.nextID += 2 s.windowHandler = func(n int) { t.updateWindow(s, uint32(n)) } // Make a stream be able to cancel the pending operations by itself. s.ctx, s.cancel = context.WithCancel(ctx) s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } return s }
// TestMultiNodeStart ensures that a node can be started correctly. The node should // start with correct configuration change entries, and can accept and commit // proposals. func TestMultiNodeStart(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cc := raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 1} ccdata, err := cc.Marshal() if err != nil { t.Fatalf("unexpected marshal error: %v", err) } wants := []Ready{ { SoftState: &SoftState{Lead: 1, RaftState: StateLeader}, HardState: raftpb.HardState{Term: 2, Commit: 2, Vote: 1}, Entries: []raftpb.Entry{ {Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata}, {Term: 2, Index: 2}, }, CommittedEntries: []raftpb.Entry{ {Type: raftpb.EntryConfChange, Term: 1, Index: 1, Data: ccdata}, {Term: 2, Index: 2}, }, }, { HardState: raftpb.HardState{Term: 2, Commit: 3, Vote: 1}, Entries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}}, CommittedEntries: []raftpb.Entry{{Term: 2, Index: 3, Data: []byte("foo")}}, }, } mn := StartMultiNode(1) storage := NewMemoryStorage() mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage), []Peer{{ID: 1}}) mn.Campaign(ctx, 1) gs := <-mn.Ready() g := gs[1] if !reflect.DeepEqual(g, wants[0]) { t.Fatalf("#%d: g = %+v,\n w %+v", 1, g, wants[0]) } else { storage.Append(g.Entries) mn.Advance(gs) } mn.Propose(ctx, 1, []byte("foo")) if gs2 := <-mn.Ready(); !reflect.DeepEqual(gs2[1], wants[1]) { t.Errorf("#%d: g = %+v,\n w %+v", 2, gs2[1], wants[1]) } else { storage.Append(gs2[1].Entries) mn.Advance(gs2) } select { case rd := <-mn.Ready(): t.Errorf("unexpected Ready: %+v", rd) case <-time.After(time.Millisecond): } }
func doCancelAfterBegin(tc testpb.TestServiceClient) { ctx, cancel := context.WithCancel(metadata.NewContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } cancel() _, err = stream.CloseAndRecv() if grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } grpclog.Println("CancelAfterBegin done") }
func testCancel(t *testing.T, e env) { s, cc := setUp(math.MaxUint32, e) tc := testpb.NewTestServiceClient(cc) defer tearDown(s, cc) argSize := 2718 respSize := 314 req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE.Enum(), ResponseSize: proto.Int32(int32(respSize)), Payload: newPayload(testpb.PayloadType_COMPRESSABLE, int32(argSize)), } ctx, cancel := context.WithCancel(context.Background()) time.AfterFunc(1*time.Millisecond, cancel) reply, err := tc.UnaryCall(ctx, req) if grpc.Code(err) != codes.Canceled { t.Fatalf(`TestService/UnaryCall(_, _) = %v, %v; want <nil>, error code: %d`, reply, err, codes.Canceled) } }
// Cancel and Stop should unblock Step() func TestMultiNodeStepUnblock(t *testing.T) { // a node without buffer to block step mn := &multiNode{ propc: make(chan multiMessage), done: make(chan struct{}), } ctx, cancel := context.WithCancel(context.Background()) stopFunc := func() { close(mn.done) } tests := []struct { unblock func() werr error }{ {stopFunc, ErrStopped}, {cancel, context.Canceled}, } for i, tt := range tests { errc := make(chan error, 1) go func() { err := mn.Step(ctx, 1, raftpb.Message{Type: raftpb.MsgProp}) errc <- err }() tt.unblock() select { case err := <-errc: if err != tt.werr { t.Errorf("#%d: err = %v, want %v", i, err, tt.werr) } //clean up side-effect if ctx.Err() != nil { ctx = context.TODO() } select { case <-mn.done: mn.done = make(chan struct{}) default: } case <-time.After(time.Millisecond * 100): t.Errorf("#%d: failed to unblock step", i) } } }
func (s *stresser) Stress() error { cfg := client.Config{ Endpoints: []string{s.Endpoint}, Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: time.Second, KeepAlive: 30 * time.Second, }).Dial, MaxIdleConnsPerHost: s.N, }, } c, err := client.New(cfg) if err != nil { return err } kv := client.NewKeysAPI(c) ctx, cancel := context.WithCancel(context.Background()) s.cancel = cancel for i := 0; i < s.N; i++ { go func() { for { setctx, setcancel := context.WithTimeout(ctx, time.Second) key := fmt.Sprintf("foo%d", rand.Intn(s.KeySuffixRange)) _, err := kv.Set(setctx, key, randStr(s.KeySize), nil) setcancel() if err == context.Canceled { return } s.mu.Lock() if err != nil { s.failure++ } else { s.success++ } s.mu.Unlock() } }() } <-ctx.Done() return nil }
// TestProposeAfterRemoveLeader ensures that we gracefully handle // proposals that are attempted after a leader has been removed from // the active configuration, but before that leader has called // MultiNode.RemoveGroup. func TestProposeAfterRemoveLeader(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() mn := newMultiNode(1) go mn.run() defer mn.Stop() storage := NewMemoryStorage() if err := mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage), []Peer{{ID: 1}}); err != nil { t.Fatal(err) } if err := mn.Campaign(ctx, 1); err != nil { t.Fatal(err) } if err := mn.ProposeConfChange(ctx, 1, raftpb.ConfChange{ Type: raftpb.ConfChangeRemoveNode, NodeID: 1, }); err != nil { t.Fatal(err) } gs := <-mn.Ready() g := gs[1] if err := storage.Append(g.Entries); err != nil { t.Fatal(err) } for _, e := range g.CommittedEntries { if e.Type == raftpb.EntryConfChange { var cc raftpb.ConfChange if err := cc.Unmarshal(e.Data); err != nil { t.Fatal(err) } mn.ApplyConfChange(1, cc) } } mn.Advance(gs) if err := mn.Propose(ctx, 1, []byte("somedata")); err != nil { t.Errorf("err = %v, want nil", err) } }
func TestSimpleHTTPClientDoCancelContextResponseBodyClosedWithBlockingBody(t *testing.T) { tr := newFakeTransport() c := &simpleHTTPClient{transport: tr} ctx, cancel := context.WithCancel(context.Background()) body := &checkableReadCloser{ReadCloser: &blockingBody{c: make(chan struct{})}} go func() { tr.respchan <- &http.Response{Body: body} time.Sleep(2 * time.Millisecond) // cancel after the body is received cancel() }() _, _, err := c.Do(ctx, &fakeAction{}) if err != context.Canceled { t.Fatalf("expected %+v, got %+v", context.Canceled, err) } if !body.closed { t.Fatalf("expected closed body") } }
func TestMultiNodeAdvance(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() storage := NewMemoryStorage() mn := StartMultiNode(1) mn.CreateGroup(1, newTestConfig(1, nil, 10, 1, storage), []Peer{{ID: 1}}) mn.Campaign(ctx, 1) rd1 := <-mn.Ready() mn.Propose(ctx, 1, []byte("foo")) select { case rd2 := <-mn.Ready(): t.Fatalf("unexpected Ready before Advance: %+v", rd2) case <-time.After(time.Millisecond): } storage.Append(rd1[1].Entries) mn.Advance(rd1) select { case <-mn.Ready(): case <-time.After(time.Millisecond): t.Errorf("expect Ready after Advance, but there is no Ready available") } }
func TestHTTPClusterClientAutoSyncCancelContext(t *testing.T) { cf := newStaticHTTPClientFactory([]staticHTTPResponse{ { resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}}, body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), }, }) hc := &httpClusterClient{ clientFactory: cf, rand: rand.New(rand.NewSource(0)), } err := hc.reset([]string{"http://127.0.0.1:2379"}) if err != nil { t.Fatalf("unexpected error during setup: %#v", err) } ctx, cancel := context.WithCancel(context.Background()) cancel() err = hc.AutoSync(ctx, time.Hour) if err != context.Canceled { t.Fatalf("incorrect error value: want=%v got=%v", context.Canceled, err) } }
// operateHeader takes action on the decoded headers. It returns the current // stream if there are remaining headers on the wire (in the following // Continuation frame). func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream), wg *sync.WaitGroup) (pendingStream *Stream) { defer func() { if pendingStream == nil { hDec.state = decodeState{} } }() endHeaders, err := hDec.decodeServerHTTP2Headers(frame) if s == nil { // s has been closed. return nil } if err != nil { grpclog.Printf("transport: http2Server.operateHeader found %v", err) if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } return nil } if endStream { // s is just created by the caller. No lock needed. s.state = streamReadDone } if !endHeaders { return s } t.mu.Lock() if t.state != reachable { t.mu.Unlock() return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) return nil } s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) t.activeStreams[s.id] = s t.mu.Unlock() s.windowHandler = func(n int) { t.updateWindow(s, uint32(n)) } if hDec.state.timeoutSet { s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } // Cache the current stream to the context so that the server application // can find out. Required when the server wants to send some metadata // back to the client (unary call only). s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. if len(hDec.state.mdata) > 0 { s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) } s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } s.method = hDec.state.method wg.Add(1) go func() { handle(s) wg.Done() }() return nil }
func startPeer(streamRt, pipelineRt http.RoundTripper, urls types.URLs, local, to, cid types.ID, snapst *snapshotStore, r Raft, fs *stats.FollowerStats, errorc chan error, term uint64, v3demo bool) *peer { picker := newURLPicker(urls) status := newPeerStatus(to) p := &peer{ id: to, r: r, v3demo: v3demo, status: status, msgAppWriter: startStreamWriter(to, status, fs, r), writer: startStreamWriter(to, status, fs, r), pipeline: newPipeline(pipelineRt, picker, local, to, cid, status, fs, r, errorc), snapSender: newSnapshotSender(pipelineRt, picker, local, to, cid, status, snapst, r, errorc), sendc: make(chan raftpb.Message), recvc: make(chan raftpb.Message, recvBufSize), propc: make(chan raftpb.Message, maxPendingProposals), newURLsC: make(chan types.URLs), termc: make(chan uint64), pausec: make(chan struct{}), resumec: make(chan struct{}), stopc: make(chan struct{}), done: make(chan struct{}), } // Use go-routine for process of MsgProp because it is // blocking when there is no leader. ctx, cancel := context.WithCancel(context.Background()) go func() { for { select { case mm := <-p.propc: if err := r.Process(ctx, mm); err != nil { plog.Warningf("failed to process raft message (%v)", err) } case <-p.stopc: return } } }() p.msgAppReader = startStreamReader(streamRt, picker, streamTypeMsgAppV2, local, to, cid, status, p.recvc, p.propc, errorc, term) reader := startStreamReader(streamRt, picker, streamTypeMessage, local, to, cid, status, p.recvc, p.propc, errorc, term) go func() { var paused bool for { select { case m := <-p.sendc: if paused { continue } if p.v3demo && isMsgSnap(m) { go p.snapSender.send(m) continue } writec, name := p.pick(m) select { case writec <- m: default: p.r.ReportUnreachable(m.To) if isMsgSnap(m) { p.r.ReportSnapshot(m.To, raft.SnapshotFailure) } if status.isActive() { plog.Warningf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name) } else { plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name) } } case mm := <-p.recvc: if err := r.Process(context.TODO(), mm); err != nil { plog.Warningf("failed to process raft message (%v)", err) } case urls := <-p.newURLsC: picker.update(urls) case <-p.pausec: paused = true case <-p.resumec: paused = false case <-p.stopc: cancel() p.msgAppWriter.stop() p.writer.stop() p.pipeline.stop() p.snapSender.stop() p.msgAppReader.stop() reader.stop() close(p.done) return } } }() return p }
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { req := act.HTTPRequest(c.endpoint) if err := printcURL(req); err != nil { return nil, nil, err } hctx, hcancel := context.WithCancel(ctx) if c.headerTimeout > 0 { hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) } defer hcancel() reqcancel := requestCanceler(c.transport, req) rtchan := make(chan roundTripResponse, 1) go func() { resp, err := c.transport.RoundTrip(req) rtchan <- roundTripResponse{resp: resp, err: err} close(rtchan) }() var resp *http.Response var err error select { case rtresp := <-rtchan: resp, err = rtresp.resp, rtresp.err case <-hctx.Done(): // cancel and wait for request to actually exit before continuing reqcancel() rtresp := <-rtchan resp = rtresp.resp switch { case ctx.Err() != nil: err = ctx.Err() case hctx.Err() != nil: err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) default: panic("failed to get error from context") } } // always check for resp nil-ness to deal with possible // race conditions between channels above defer func() { if resp != nil { resp.Body.Close() } }() if err != nil { return nil, nil, err } var body []byte done := make(chan struct{}) go func() { body, err = ioutil.ReadAll(resp.Body) done <- struct{}{} }() select { case <-ctx.Done(): resp.Body.Close() <-done return nil, nil, ctx.Err() case <-done: } return resp, body, err }