func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) { tr := newFakeTransport() c := &simpleHTTPClient{transport: tr} // create an already-cancelled context ctx, cancel := context.WithCancel(context.Background()) cancel() body := &checkableReadCloser{ReadCloser: ioutil.NopCloser(strings.NewReader("foo"))} go func() { // wait that simpleHTTPClient knows the context is already timed out, // and calls CancelRequest testutil.WaitSchedule() // response is returned before cancel effects tr.respchan <- &http.Response{Body: body} }() _, _, err := c.Do(ctx, &fakeAction{}) if err == nil { t.Fatalf("expected non-nil error, got nil") } if !body.closed { t.Fatalf("expected closed body") } }
func TestBackendBatchIntervalCommit(t *testing.T) { // start backend with super short batch interval b := newBackend(tmpPath, time.Nanosecond, 10000) defer cleanup(b, tmpPath) tx := b.BatchTx() tx.Lock() tx.UnsafeCreateBucket([]byte("test")) tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar")) tx.Unlock() // give time for batch interval commit to happen time.Sleep(time.Nanosecond) testutil.WaitSchedule() // give time for commit to finish, including possible disk IO time.Sleep(50 * time.Millisecond) // check whether put happens via db view b.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte("test")) if bucket == nil { t.Errorf("bucket test does not exit") return nil } v := bucket.Get([]byte("foo")) if v == nil { t.Errorf("foo key failed to written in backend") } return nil }) }
func TestTransportErrorc(t *testing.T) { errorc := make(chan error, 1) tr := &Transport{ LeaderStats: stats.NewLeaderStats(""), ErrorC: errorc, streamRt: newRespRoundTripper(http.StatusForbidden, nil), pipelineRt: newRespRoundTripper(http.StatusForbidden, nil), peers: make(map[types.ID]Peer), prober: probing.NewProber(nil), } tr.AddPeer(1, []string{"http://localhost:2380"}) defer tr.Stop() select { case <-errorc: t.Fatalf("received unexpected from errorc") case <-time.After(10 * time.Millisecond): } tr.peers[1].Send(raftpb.Message{}) testutil.WaitSchedule() select { case <-errorc: default: t.Fatalf("cannot receive error from errorc") } }
// TestStreamWriterAttachOutgoingConn tests that outgoingConn can be attached // to streamWriter. After that, streamWriter can use it to send messages // continuously, and closes it when stopped. func TestStreamWriterAttachOutgoingConn(t *testing.T) { sw := startStreamWriter(types.ID(1), newPeerStatus(types.ID(1)), &stats.FollowerStats{}, &fakeRaft{}) // the expected initial state of streamWrite is not working if _, ok := sw.writec(); ok != false { t.Errorf("initial working status = %v, want false", ok) } // repeatitive tests to ensure it can use latest connection var wfc *fakeWriteFlushCloser for i := 0; i < 3; i++ { prevwfc := wfc wfc = &fakeWriteFlushCloser{} sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc}) testutil.WaitSchedule() // previous attached connection should be closed if prevwfc != nil && prevwfc.closed != true { t.Errorf("#%d: close of previous connection = %v, want true", i, prevwfc.closed) } // starts working if _, ok := sw.writec(); ok != true { t.Errorf("#%d: working status = %v, want true", i, ok) } sw.msgc <- raftpb.Message{} testutil.WaitSchedule() // still working if _, ok := sw.writec(); ok != true { t.Errorf("#%d: working status = %v, want true", i, ok) } if wfc.written == 0 { t.Errorf("#%d: failed to write to the underlying connection", i) } } sw.stop() // no longer in working status now if _, ok := sw.writec(); ok != false { t.Errorf("working status after stop = %v, want false", ok) } if wfc.closed != true { t.Errorf("failed to close the underlying connection") } }
func TestPipelineExceedMaximumServing(t *testing.T) { tr := newRoundTripperBlocker() picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) fs := &stats.FollowerStats{} p := newPipeline(tr, picker, types.ID(2), types.ID(1), types.ID(1), newPeerStatus(types.ID(1)), fs, &fakeRaft{}, nil) // keep the sender busy and make the buffer full // nothing can go out as we block the sender testutil.WaitSchedule() for i := 0; i < connPerPipeline+pipelineBufSize; i++ { select { case p.msgc <- raftpb.Message{}: default: t.Errorf("failed to send out message") } // force the sender to grab data testutil.WaitSchedule() } // try to send a data when we are sure the buffer is full select { case p.msgc <- raftpb.Message{}: t.Errorf("unexpected message sendout") default: } // unblock the senders and force them to send out the data tr.unblock() testutil.WaitSchedule() // It could send new data after previous ones succeed select { case p.msgc <- raftpb.Message{}: default: t.Errorf("failed to send out message") } p.stop() }
// TestPipelineSendFailed tests that when send func meets the post error, // it increases fail count in stats. func TestPipelineSendFailed(t *testing.T) { picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) fs := &stats.FollowerStats{} p := newPipeline(newRespRoundTripper(0, errors.New("blah")), picker, types.ID(2), types.ID(1), types.ID(1), newPeerStatus(types.ID(1)), fs, &fakeRaft{}, nil) p.msgc <- raftpb.Message{Type: raftpb.MsgApp} testutil.WaitSchedule() p.stop() fs.Lock() defer fs.Unlock() if fs.Counts.Fail != 1 { t.Errorf("fail = %d, want 1", fs.Counts.Fail) } }
// TestStreamWriterAttachBadOutgoingConn tests that streamWriter with bad // outgoingConn will close the outgoingConn and fall back to non-working status. func TestStreamWriterAttachBadOutgoingConn(t *testing.T) { sw := startStreamWriter(types.ID(1), newPeerStatus(types.ID(1)), &stats.FollowerStats{}, &fakeRaft{}) defer sw.stop() wfc := &fakeWriteFlushCloser{err: errors.New("blah")} sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc}) sw.msgc <- raftpb.Message{} testutil.WaitSchedule() // no longer working if _, ok := sw.writec(); ok != false { t.Errorf("working = %v, want false", ok) } if wfc.closed != true { t.Errorf("failed to close the underlying connection") } }
func TestKVRestore(t *testing.T) { tests := []func(kv KV){ func(kv KV) { kv.Put([]byte("foo"), []byte("bar0")) kv.Put([]byte("foo"), []byte("bar1")) kv.Put([]byte("foo"), []byte("bar2")) }, func(kv KV) { kv.Put([]byte("foo"), []byte("bar0")) kv.DeleteRange([]byte("foo"), nil) kv.Put([]byte("foo"), []byte("bar1")) }, func(kv KV) { kv.Put([]byte("foo"), []byte("bar0")) kv.Put([]byte("foo"), []byte("bar1")) kv.Compact(1) }, } for i, tt := range tests { s := New(tmpPath) tt(s) var kvss [][]storagepb.KeyValue for k := int64(0); k < 10; k++ { kvs, _, _ := s.Range([]byte("a"), []byte("z"), 0, k) kvss = append(kvss, kvs) } s.Close() ns := New(tmpPath) ns.Restore() // wait for possible compaction to finish testutil.WaitSchedule() var nkvss [][]storagepb.KeyValue for k := int64(0); k < 10; k++ { nkvs, _, _ := ns.Range([]byte("a"), []byte("z"), 0, k) nkvss = append(nkvss, nkvs) } cleanup(ns, tmpPath) if !reflect.DeepEqual(nkvss, kvss) { t.Errorf("#%d: kvs history = %+v, want %+v", i, nkvss, kvss) } } }
// TestPipelineSend tests that pipeline could send data using roundtripper // and increase success count in stats. func TestPipelineSend(t *testing.T) { tr := &roundTripperRecorder{} picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) fs := &stats.FollowerStats{} p := newPipeline(tr, picker, types.ID(2), types.ID(1), types.ID(1), newPeerStatus(types.ID(1)), fs, &fakeRaft{}, nil) p.msgc <- raftpb.Message{Type: raftpb.MsgApp} testutil.WaitSchedule() p.stop() if tr.Request() == nil { t.Errorf("sender fails to post the data") } fs.Lock() defer fs.Unlock() if fs.Counts.Success != 1 { t.Errorf("success = %d, want 1", fs.Counts.Success) } }