示例#1
0
// TestBlockProposal ensures that node will block proposal when it does not
// know who is the current leader; node will accept proposal when it knows
// who is the current leader.
func TestBlockProposal(t *testing.T) {
	n := newNode()
	r := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage())
	go n.run(r)
	defer n.Stop()

	errc := make(chan error, 1)
	go func() {
		errc <- n.Propose(context.TODO(), []byte("somedata"))
	}()

	testutil.WaitSchedule()
	select {
	case err := <-errc:
		t.Errorf("err = %v, want blocking", err)
	default:
	}

	n.Campaign(context.TODO())
	testutil.WaitSchedule()
	select {
	case err := <-errc:
		if err != nil {
			t.Errorf("err = %v, want %v", err, nil)
		}
	default:
		t.Errorf("blocking proposal, want unblocking")
	}
}
示例#2
0
文件: stream_test.go 项目: lrita/etcd
// TestStreamWriterAttachOutgoingConn tests that outgoingConn can be attached
// to streamWriter. After that, streamWriter can use it to send messages
// continuously, and closes it when stopped.
func TestStreamWriterAttachOutgoingConn(t *testing.T) {
	sw := startStreamWriter(types.ID(1), newPeerStatus(types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
	// the expected initial state of streamWriter is not working
	if _, ok := sw.writec(); ok != false {
		t.Errorf("initial working status = %v, want false", ok)
	}

	// repeat tests to ensure streamWriter can use last attached connection
	var wfc *fakeWriteFlushCloser
	for i := 0; i < 3; i++ {
		prevwfc := wfc
		wfc = &fakeWriteFlushCloser{}
		sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc})

		// sw.attach happens asynchronously. Waits for its result in a for loop to make the
		// test more robust on slow CI.
		for j := 0; j < 3; j++ {
			testutil.WaitSchedule()
			// previous attached connection should be closed
			if prevwfc != nil && prevwfc.Closed() != true {
				continue
			}
			// write chan is available
			if _, ok := sw.writec(); ok != true {
				continue
			}
		}

		// previous attached connection should be closed
		if prevwfc != nil && prevwfc.Closed() != true {
			t.Errorf("#%d: close of previous connection = %v, want true", i, prevwfc.Closed())
		}
		// write chan is available
		if _, ok := sw.writec(); ok != true {
			t.Errorf("#%d: working status = %v, want true", i, ok)
		}

		sw.msgc <- raftpb.Message{}
		testutil.WaitSchedule()
		// write chan is available
		if _, ok := sw.writec(); ok != true {
			t.Errorf("#%d: working status = %v, want true", i, ok)
		}
		if wfc.Written() == 0 {
			t.Errorf("#%d: failed to write to the underlying connection", i)
		}
	}

	sw.stop()
	// write chan is unavailable since the writer is stopped.
	if _, ok := sw.writec(); ok != false {
		t.Errorf("working status after stop = %v, want false", ok)
	}
	if wfc.Closed() != true {
		t.Errorf("failed to close the underlying connection")
	}
}
示例#3
0
文件: client_test.go 项目: Zex/etcd
func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) {
	tr := newFakeTransport()
	c := &simpleHTTPClient{transport: tr}

	// create an already-cancelled context
	ctx, cancel := context.WithCancel(context.Background())
	cancel()

	body := &checkableReadCloser{ReadCloser: ioutil.NopCloser(strings.NewReader("foo"))}
	go func() {
		// wait that simpleHTTPClient knows the context is already timed out,
		// and calls CancelRequest
		testutil.WaitSchedule()

		// response is returned before cancel effects
		tr.respchan <- &http.Response{Body: body}
	}()

	_, _, err := c.Do(ctx, &fakeAction{})
	if err == nil {
		t.Fatalf("expected non-nil error, got nil")
	}

	if !body.closed {
		t.Fatalf("expected closed body")
	}
}
示例#4
0
func TestBackendBatchIntervalCommit(t *testing.T) {
	// start backend with super short batch interval
	b := newBackend(tmpPath, time.Nanosecond, 10000)
	defer cleanup(b, tmpPath)

	tx := b.BatchTx()
	tx.Lock()
	tx.UnsafeCreateBucket([]byte("test"))
	tx.UnsafePut([]byte("test"), []byte("foo"), []byte("bar"))
	tx.Unlock()

	// give time for batch interval commit to happen
	time.Sleep(time.Nanosecond)
	testutil.WaitSchedule()

	// check whether put happens via db view
	b.db.View(func(tx *bolt.Tx) error {
		bucket := tx.Bucket([]byte("test"))
		if bucket == nil {
			t.Errorf("bucket test does not exit")
			return nil
		}
		v := bucket.Get([]byte("foo"))
		if v == nil {
			t.Errorf("foo key failed to written in backend")
		}
		return nil
	})
}
示例#5
0
func TestTransportErrorc(t *testing.T) {
	errorc := make(chan error, 1)
	tr := &Transport{
		Raft:        &fakeRaft{},
		LeaderStats: stats.NewLeaderStats(""),
		ErrorC:      errorc,
		streamRt:    newRespRoundTripper(http.StatusForbidden, nil),
		pipelineRt:  newRespRoundTripper(http.StatusForbidden, nil),
		peers:       make(map[types.ID]Peer),
		prober:      probing.NewProber(nil),
	}
	tr.AddPeer(1, []string{"http://localhost:2380"})
	defer tr.Stop()

	select {
	case <-errorc:
		t.Fatalf("received unexpected from errorc")
	case <-time.After(10 * time.Millisecond):
	}
	tr.peers[1].send(raftpb.Message{})

	testutil.WaitSchedule()
	select {
	case <-errorc:
	default:
		t.Fatalf("cannot receive error from errorc")
	}
}
示例#6
0
func TestSnapshotStoreCreateSnap(t *testing.T) {
	snap := raftpb.Snapshot{
		Metadata: raftpb.SnapshotMetadata{Index: 1},
	}
	ss := newSnapshotStore("", &nopKV{})
	fakeClock := clockwork.NewFakeClock()
	ss.clock = fakeClock
	go func() {
		<-ss.reqsnapc
		ss.raftsnapc <- snap
	}()

	// create snapshot
	ss.createSnap()
	if !reflect.DeepEqual(ss.snap.raft(), snap) {
		t.Errorf("raftsnap = %+v, want %+v", ss.snap.raft(), snap)
	}

	// unused snapshot is cleared after clearUnusedSnapshotInterval
	fakeClock.BlockUntil(1)
	fakeClock.Advance(clearUnusedSnapshotInterval)
	testutil.WaitSchedule()
	ss.mu.Lock()
	if ss.snap != nil {
		t.Errorf("snap = %+v, want %+v", ss.snap, nil)
	}
	ss.mu.Unlock()
}
示例#7
0
func TestSnapshotStoreCloseSnapBefore(t *testing.T) {
	snapIndex := uint64(5)

	tests := []struct {
		index uint64
		wok   bool
	}{
		{snapIndex - 2, false},
		{snapIndex - 1, false},
		{snapIndex, true},
	}
	for i, tt := range tests {
		rs := raftpb.Snapshot{
			Metadata: raftpb.SnapshotMetadata{Index: 5},
		}
		s := &fakeSnapshot{}
		ss := &snapshotStore{
			snap: newSnapshot(rs, s),
		}

		ok := ss.closeSnapBefore(tt.index)
		if ok != tt.wok {
			t.Errorf("#%d: closeSnapBefore = %v, want %v", i, ok, tt.wok)
		}
		if ok {
			// wait for underlying KV snapshot closed
			testutil.WaitSchedule()
			s.mu.Lock()
			if !s.closed {
				t.Errorf("#%d: snapshot closed = %v, want true", i, s.closed)
			}
			s.mu.Unlock()
		}
	}
}
示例#8
0
func TestSnapshotStoreClearUsedSnap(t *testing.T) {
	s := &fakeSnapshot{}
	var once sync.Once
	once.Do(func() {})
	ss := &snapshotStore{
		snap:       newSnapshot(raftpb.Snapshot{}, s),
		inUse:      true,
		createOnce: once,
	}

	ss.clearUsedSnap()
	// wait for underlying KV snapshot closed
	testutil.WaitSchedule()
	s.mu.Lock()
	if !s.closed {
		t.Errorf("snapshot closed = %v, want true", s.closed)
	}
	s.mu.Unlock()
	if ss.snap != nil {
		t.Errorf("snapshot = %v, want nil", ss.snap)
	}
	if ss.inUse {
		t.Errorf("isUse = %v, want false", ss.inUse)
	}
	// test createOnce is reset
	if ss.createOnce == once {
		t.Errorf("createOnce fails to reset")
	}
}
示例#9
0
// TestNodeStop ensures that node.Stop() blocks until the node has stopped
// processing, and that it is idempotent
func TestNodeStop(t *testing.T) {
	n := newNode()
	s := NewMemoryStorage()
	r := newTestRaft(1, []uint64{1}, 10, 1, s)
	donec := make(chan struct{})

	go func() {
		n.run(r)
		close(donec)
	}()

	elapsed := r.electionElapsed
	n.Tick()
	testutil.WaitSchedule()
	n.Stop()

	select {
	case <-donec:
	case <-time.After(time.Second):
		t.Fatalf("timed out waiting for node to stop!")
	}

	if r.electionElapsed != elapsed+1 {
		t.Errorf("elapsed = %d, want %d", r.electionElapsed, elapsed+1)
	}
	// Further ticks should have no effect, the node is stopped.
	n.Tick()
	if r.electionElapsed != elapsed+1 {
		t.Errorf("elapsed = %d, want %d", r.electionElapsed, elapsed+1)
	}
	// Subsequent Stops should have no effect.
	n.Stop()
}
示例#10
0
// TestStreamWriterAttachOutgoingConn tests that outgoingConn can be attached
// to streamWriter. After that, streamWriter can use it to send messages
// continuously, and closes it when stopped.
func TestStreamWriterAttachOutgoingConn(t *testing.T) {
	sw := startStreamWriter(types.ID(1), newPeerStatus(types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
	// the expected initial state of streamWrite is not working
	if _, ok := sw.writec(); ok != false {
		t.Errorf("initial working status = %v, want false", ok)
	}

	// repeatitive tests to ensure it can use latest connection
	var wfc *fakeWriteFlushCloser
	for i := 0; i < 3; i++ {
		prevwfc := wfc
		wfc = &fakeWriteFlushCloser{}
		sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc})
		testutil.WaitSchedule()
		// previous attached connection should be closed
		if prevwfc != nil && prevwfc.Closed() != true {
			t.Errorf("#%d: close of previous connection = %v, want true", i, prevwfc.Closed())
		}
		// starts working
		if _, ok := sw.writec(); ok != true {
			t.Errorf("#%d: working status = %v, want true", i, ok)
		}

		sw.msgc <- raftpb.Message{}
		testutil.WaitSchedule()
		// still working
		if _, ok := sw.writec(); ok != true {
			t.Errorf("#%d: working status = %v, want true", i, ok)
		}
		if wfc.Written() == 0 {
			t.Errorf("#%d: failed to write to the underlying connection", i)
		}
	}

	sw.stop()
	// no longer in working status now
	if _, ok := sw.writec(); ok != false {
		t.Errorf("working status after stop = %v, want false", ok)
	}
	if wfc.Closed() != true {
		t.Errorf("failed to close the underlying connection")
	}
}
示例#11
0
// TestNodeTick ensures that node.Tick() will increase the
// elapsed of the underlying raft state machine.
func TestNodeTick(t *testing.T) {
	n := newNode()
	s := NewMemoryStorage()
	r := newTestRaft(1, []uint64{1}, 10, 1, s)
	go n.run(r)
	elapsed := r.electionElapsed
	n.Tick()
	testutil.WaitSchedule()
	n.Stop()
	if r.electionElapsed != elapsed+1 {
		t.Errorf("elapsed = %d, want %d", r.electionElapsed, elapsed+1)
	}
}
示例#12
0
func TestPipelineExceedMaximumServing(t *testing.T) {
	tr := newRoundTripperBlocker()
	picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
	fs := &stats.FollowerStats{}
	tp := &Transport{pipelineRt: tr}
	p := newPipeline(tp, picker, types.ID(2), types.ID(1), types.ID(1), newPeerStatus(types.ID(1)), fs, &fakeRaft{}, nil)

	// keep the sender busy and make the buffer full
	// nothing can go out as we block the sender
	testutil.WaitSchedule()
	for i := 0; i < connPerPipeline+pipelineBufSize; i++ {
		select {
		case p.msgc <- raftpb.Message{}:
		default:
			t.Errorf("failed to send out message")
		}
		// force the sender to grab data
		testutil.WaitSchedule()
	}

	// try to send a data when we are sure the buffer is full
	select {
	case p.msgc <- raftpb.Message{}:
		t.Errorf("unexpected message sendout")
	default:
	}

	// unblock the senders and force them to send out the data
	tr.unblock()
	testutil.WaitSchedule()

	// It could send new data after previous ones succeed
	select {
	case p.msgc <- raftpb.Message{}:
	default:
		t.Errorf("failed to send out message")
	}
	p.stop()
}
示例#13
0
// TestPipelineSendFailed tests that when send func meets the post error,
// it increases fail count in stats.
func TestPipelineSendFailed(t *testing.T) {
	picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
	tp := &Transport{pipelineRt: newRespRoundTripper(0, errors.New("blah"))}
	p := startTestPipeline(tp, picker)

	p.msgc <- raftpb.Message{Type: raftpb.MsgApp}
	testutil.WaitSchedule()
	p.stop()

	if p.followerStats.Counts.Fail != 1 {
		t.Errorf("fail = %d, want 1", p.followerStats.Counts.Fail)
	}
}
示例#14
0
// TestPipelineSendFailed tests that when send func meets the post error,
// it increases fail count in stats.
func TestPipelineSendFailed(t *testing.T) {
	picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
	fs := &stats.FollowerStats{}
	p := newPipeline(newRespRoundTripper(0, errors.New("blah")), picker, types.ID(2), types.ID(1), types.ID(1), newPeerStatus(types.ID(1)), fs, &fakeRaft{}, nil)

	p.msgc <- raftpb.Message{Type: raftpb.MsgApp}
	testutil.WaitSchedule()
	p.stop()

	fs.Lock()
	defer fs.Unlock()
	if fs.Counts.Fail != 1 {
		t.Errorf("fail = %d, want 1", fs.Counts.Fail)
	}
}
示例#15
0
// TestStreamWriterAttachBadOutgoingConn tests that streamWriter with bad
// outgoingConn will close the outgoingConn and fall back to non-working status.
func TestStreamWriterAttachBadOutgoingConn(t *testing.T) {
	sw := startStreamWriter(types.ID(1), newPeerStatus(types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
	defer sw.stop()
	wfc := &fakeWriteFlushCloser{err: errors.New("blah")}
	sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc})

	sw.msgc <- raftpb.Message{}
	testutil.WaitSchedule()
	// no longer working
	if _, ok := sw.writec(); ok != false {
		t.Errorf("working = %v, want false", ok)
	}
	if wfc.Closed() != true {
		t.Errorf("failed to close the underlying connection")
	}
}
示例#16
0
// TestPipelineSend tests that pipeline could send data using roundtripper
// and increase success count in stats.
func TestPipelineSend(t *testing.T) {
	tr := &roundTripperRecorder{}
	picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
	tp := &Transport{pipelineRt: tr}
	p := startTestPipeline(tp, picker)

	p.msgc <- raftpb.Message{Type: raftpb.MsgApp}
	testutil.WaitSchedule()
	p.stop()

	if tr.Request() == nil {
		t.Errorf("sender fails to post the data")
	}
	if p.followerStats.Counts.Success != 1 {
		t.Errorf("success = %d, want 1", p.followerStats.Counts.Success)
	}
}
示例#17
0
func TestKVRestore(t *testing.T) {
	tests := []func(kv KV){
		func(kv KV) {
			kv.Put([]byte("foo"), []byte("bar0"), 1)
			kv.Put([]byte("foo"), []byte("bar1"), 2)
			kv.Put([]byte("foo"), []byte("bar2"), 3)
		},
		func(kv KV) {
			kv.Put([]byte("foo"), []byte("bar0"), 1)
			kv.DeleteRange([]byte("foo"), nil)
			kv.Put([]byte("foo"), []byte("bar1"), 2)
		},
		func(kv KV) {
			kv.Put([]byte("foo"), []byte("bar0"), 1)
			kv.Put([]byte("foo"), []byte("bar1"), 2)
			kv.Compact(1)
		},
	}
	for i, tt := range tests {
		b, tmpPath := backend.NewDefaultTmpBackend()
		s := NewStore(b, &lease.FakeLessor{}, nil)
		tt(s)
		var kvss [][]storagepb.KeyValue
		for k := int64(0); k < 10; k++ {
			kvs, _, _ := s.Range([]byte("a"), []byte("z"), 0, k)
			kvss = append(kvss, kvs)
		}
		s.Close()

		// ns should recover the the previous state from backend.
		ns := NewStore(b, &lease.FakeLessor{}, nil)
		// wait for possible compaction to finish
		testutil.WaitSchedule()
		var nkvss [][]storagepb.KeyValue
		for k := int64(0); k < 10; k++ {
			nkvs, _, _ := ns.Range([]byte("a"), []byte("z"), 0, k)
			nkvss = append(nkvss, nkvs)
		}
		cleanup(ns, b, tmpPath)

		if !reflect.DeepEqual(nkvss, kvss) {
			t.Errorf("#%d: kvs history = %+v, want %+v", i, nkvss, kvss)
		}
	}
}
示例#18
0
文件: kv_test.go 项目: nikitinsm/etcd
func TestKVRestore(t *testing.T) {
	tests := []func(kv KV){
		func(kv KV) {
			kv.Put([]byte("foo"), []byte("bar0"))
			kv.Put([]byte("foo"), []byte("bar1"))
			kv.Put([]byte("foo"), []byte("bar2"))
		},
		func(kv KV) {
			kv.Put([]byte("foo"), []byte("bar0"))
			kv.DeleteRange([]byte("foo"), nil)
			kv.Put([]byte("foo"), []byte("bar1"))
		},
		func(kv KV) {
			kv.Put([]byte("foo"), []byte("bar0"))
			kv.Put([]byte("foo"), []byte("bar1"))
			kv.Compact(1)
		},
	}
	for i, tt := range tests {
		s := New(tmpPath)
		tt(s)
		var kvss [][]storagepb.KeyValue
		for k := int64(0); k < 10; k++ {
			kvs, _, _ := s.Range([]byte("a"), []byte("z"), 0, k)
			kvss = append(kvss, kvs)
		}
		s.Close()

		ns := New(tmpPath)
		ns.Restore()
		// wait for possible compaction to finish
		testutil.WaitSchedule()
		var nkvss [][]storagepb.KeyValue
		for k := int64(0); k < 10; k++ {
			nkvs, _, _ := ns.Range([]byte("a"), []byte("z"), 0, k)
			nkvss = append(nkvss, nkvs)
		}
		cleanup(ns, tmpPath)

		if !reflect.DeepEqual(nkvss, kvss) {
			t.Errorf("#%d: kvs history = %+v, want %+v", i, nkvss, kvss)
		}
	}
}
示例#19
0
// TestPipelineSend tests that pipeline could send data using roundtripper
// and increase success count in stats.
func TestPipelineSend(t *testing.T) {
	tr := &roundTripperRecorder{}
	picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
	fs := &stats.FollowerStats{}
	p := newPipeline(tr, picker, types.ID(2), types.ID(1), types.ID(1), newPeerStatus(types.ID(1)), fs, &fakeRaft{}, nil)

	p.msgc <- raftpb.Message{Type: raftpb.MsgApp}
	testutil.WaitSchedule()
	p.stop()

	if tr.Request() == nil {
		t.Errorf("sender fails to post the data")
	}
	fs.Lock()
	defer fs.Unlock()
	if fs.Counts.Success != 1 {
		t.Errorf("success = %d, want 1", fs.Counts.Success)
	}
}
示例#20
0
// TestPipelineKeepSendingWhenPostError tests that pipeline can keep
// sending messages if previous messages meet post error.
func TestPipelineKeepSendingWhenPostError(t *testing.T) {
	tr := &respRoundTripper{err: fmt.Errorf("roundtrip error")}
	picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
	fs := &stats.FollowerStats{}
	p := newPipeline(tr, picker, types.ID(2), types.ID(1), types.ID(1), newPeerStatus(types.ID(1)), fs, &fakeRaft{}, nil)

	for i := 0; i < 50; i++ {
		p.msgc <- raftpb.Message{Type: raftpb.MsgApp}
	}
	testutil.WaitSchedule()
	p.stop()

	// check it send out 50 requests
	tr.mu.Lock()
	defer tr.mu.Unlock()
	if tr.reqCount != 50 {
		t.Errorf("request count = %d, want 50", tr.reqCount)
	}
}
示例#21
0
func TestSnapshotStoreGetSnap(t *testing.T) {
	snap := raftpb.Snapshot{
		Metadata: raftpb.SnapshotMetadata{Index: 1},
	}
	ss := newSnapshotStore("", &nopKV{})
	fakeClock := clockwork.NewFakeClock()
	ss.clock = fakeClock
	ss.tr = &nopTransporter{}
	go func() {
		<-ss.reqsnapc
		ss.raftsnapc <- snap
	}()

	// get snap when no snapshot stored
	_, err := ss.getSnap()
	if err != raft.ErrSnapshotTemporarilyUnavailable {
		t.Fatalf("getSnap error = %v, want %v", err, raft.ErrSnapshotTemporarilyUnavailable)
	}

	// wait for asynchronous snapshot creation to finish
	testutil.WaitSchedule()
	// get the created snapshot
	s, err := ss.getSnap()
	if err != nil {
		t.Fatalf("getSnap error = %v, want nil", err)
	}
	if !reflect.DeepEqual(s.raft(), snap) {
		t.Errorf("raftsnap = %+v, want %+v", s.raft(), snap)
	}
	if !ss.inUse {
		t.Errorf("inUse = %v, want true", ss.inUse)
	}

	// get snap when snapshot stored has been in use
	_, err = ss.getSnap()
	if err != raft.ErrSnapshotTemporarilyUnavailable {
		t.Fatalf("getSnap error = %v, want %v", err, raft.ErrSnapshotTemporarilyUnavailable)
	}

	// clean up
	fakeClock.Advance(clearUnusedSnapshotInterval)
}