// TestApplyRepeat tests that server handles repeat raft messages gracefully func TestApplyRepeat(t *testing.T) { n := newNodeConfChangeCommitterStream() n.readyc <- raft.Ready{ SoftState: &raft.SoftState{RaftState: raft.StateLeader}, } cl := newTestCluster(nil) st := store.New() cl.SetStore(store.New()) cl.AddMember(&membership.Member{ID: 1234}) s := &EtcdServer{ r: raftNode{ Node: n, raftStorage: raft.NewMemoryStorage(), storage: mockstorage.NewStorageRecorder(""), transport: rafthttp.NewNopTransporter(), }, cfg: &ServerConfig{}, store: st, cluster: cl, reqIDGen: idutil.NewGenerator(0, time.Time{}), } s.applyV2 = &applierV2store{s} s.start() req := &pb.Request{Method: "QGET", ID: uint64(1)} ents := []raftpb.Entry{{Index: 1, Data: pbutil.MustMarshal(req)}} n.readyc <- raft.Ready{CommittedEntries: ents} // dup msg n.readyc <- raft.Ready{CommittedEntries: ents} // use a conf change to block until dup msgs are all processed cc := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 2} ents = []raftpb.Entry{{ Index: 2, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(cc), }} n.readyc <- raft.Ready{CommittedEntries: ents} // wait for conf change message act, err := n.Wait(1) // wait for stop message (async to avoid deadlock) stopc := make(chan error) go func() { _, werr := n.Wait(1) stopc <- werr }() s.Stop() // only want to confirm etcdserver won't panic; no data to check if err != nil { t.Fatal(err) } if len(act) == 0 { t.Fatalf("expected len(act)=0, got %d", len(act)) } if err = <-stopc; err != nil { t.Fatalf("error on stop (%v)", err) } }
// TestApplyMultiConfChangeShouldStop ensures that apply will return shouldStop // if the local member is removed along with other conf updates. func TestApplyMultiConfChangeShouldStop(t *testing.T) { cl := membership.NewCluster("") cl.SetStore(store.New()) for i := 1; i <= 5; i++ { cl.AddMember(&membership.Member{ID: types.ID(i)}) } srv := &EtcdServer{ id: 2, r: raftNode{ Node: newNodeNop(), transport: rafthttp.NewNopTransporter(), }, cluster: cl, w: wait.New(), } ents := []raftpb.Entry{} for i := 1; i <= 4; i++ { ent := raftpb.Entry{ Term: 1, Index: uint64(i), Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal( &raftpb.ConfChange{ Type: raftpb.ConfChangeRemoveNode, NodeID: uint64(i)}), } ents = append(ents, ent) } _, shouldStop := srv.apply(ents, &raftpb.ConfState{}) if !shouldStop { t.Errorf("shouldStop = %t, want %t", shouldStop, true) } }
// Applied > SnapCount should trigger a SaveSnap event func TestTriggerSnap(t *testing.T) { snapc := 10 st := store.NewRecorder() p := &storageRecorder{} srv := &EtcdServer{ cfg: &ServerConfig{TickMs: 1}, snapCount: uint64(snapc), r: raftNode{ Node: newNodeCommitter(), raftStorage: raft.NewMemoryStorage(), storage: p, transport: rafthttp.NewNopTransporter(), }, store: st, reqIDGen: idutil.NewGenerator(0, time.Time{}), } srv.start() for i := 0; i < snapc+1; i++ { srv.Do(context.Background(), pb.Request{Method: "PUT"}) } wcnt := 2 + snapc gaction, _ := p.Wait(wcnt) srv.Stop() // each operation is recorded as a Save // (SnapCount+1) * Puts + SaveSnap = (SnapCount+1) * Save + SaveSnap if len(gaction) != wcnt { t.Fatalf("len(action) = %d, want %d", len(gaction), wcnt) } if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "SaveSnap"}) { t.Errorf("action = %s, want SaveSnap", gaction[wcnt-1]) } }
// Applied > SnapCount should trigger a SaveSnap event func TestTriggerSnap(t *testing.T) { be, tmpPath := backend.NewDefaultTmpBackend() defer func() { os.RemoveAll(tmpPath) }() snapc := 10 st := mockstore.NewRecorder() p := mockstorage.NewStorageRecorderStream("") srv := &EtcdServer{ cfg: &ServerConfig{TickMs: 1}, snapCount: uint64(snapc), r: raftNode{ Node: newNodeCommitter(), raftStorage: raft.NewMemoryStorage(), storage: p, transport: rafthttp.NewNopTransporter(), }, store: st, reqIDGen: idutil.NewGenerator(0, time.Time{}), } srv.applyV2 = &applierV2store{srv} srv.kv = mvcc.New(be, &lease.FakeLessor{}, &srv.consistIndex) srv.be = be srv.start() donec := make(chan struct{}) go func() { wcnt := 2 + snapc gaction, _ := p.Wait(wcnt) // each operation is recorded as a Save // (SnapCount+1) * Puts + SaveSnap = (SnapCount+1) * Save + SaveSnap if len(gaction) != wcnt { t.Fatalf("len(action) = %d, want %d", len(gaction), wcnt) } if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "SaveSnap"}) { t.Errorf("action = %s, want SaveSnap", gaction[wcnt-1]) } close(donec) }() for i := 0; i < snapc+1; i++ { srv.Do(context.Background(), pb.Request{Method: "PUT"}) } srv.Stop() <-donec }
// TestSyncTrigger tests that the server proposes a SYNC request when its sync timer ticks func TestSyncTrigger(t *testing.T) { n := newReadyNode() st := make(chan time.Time, 1) srv := &EtcdServer{ cfg: &ServerConfig{TickMs: 1}, r: raftNode{ Node: n, raftStorage: raft.NewMemoryStorage(), transport: rafthttp.NewNopTransporter(), storage: mockstorage.NewStorageRecorder(""), }, store: mockstore.NewNop(), SyncTicker: st, reqIDGen: idutil.NewGenerator(0, time.Time{}), } // trigger the server to become a leader and accept sync requests go func() { srv.start() n.readyc <- raft.Ready{ SoftState: &raft.SoftState{ RaftState: raft.StateLeader, }, } // trigger a sync request st <- time.Time{} }() action, _ := n.Wait(1) go srv.Stop() if len(action) != 1 { t.Fatalf("len(action) = %d, want 1", len(action)) } if action[0].Name != "Propose" { t.Fatalf("action = %s, want Propose", action[0].Name) } data := action[0].Params[0].([]byte) var req pb.Request if err := req.Unmarshal(data); err != nil { t.Fatalf("error unmarshalling data: %v", err) } if req.Method != "SYNC" { t.Fatalf("unexpected proposed request: %#v", req.Method) } // wait on stop message <-n.Chan() }
// TestPublishStopped tests that publish will be stopped if server is stopped. func TestPublishStopped(t *testing.T) { srv := &EtcdServer{ cfg: &ServerConfig{TickMs: 1}, r: raftNode{ Node: newNodeNop(), transport: rafthttp.NewNopTransporter(), }, cluster: &membership.RaftCluster{}, w: mockwait.NewNop(), done: make(chan struct{}), stop: make(chan struct{}), reqIDGen: idutil.NewGenerator(0, time.Time{}), } close(srv.done) srv.publish(time.Hour) }
// TestApplySnapshotAndCommittedEntries tests that server applies snapshot // first and then committed entries. func TestApplySnapshotAndCommittedEntries(t *testing.T) { n := newNopReadyNode() st := store.NewRecorder() cl := newCluster("abc") cl.SetStore(store.New()) storage := raft.NewMemoryStorage() s := &EtcdServer{ cfg: &ServerConfig{}, r: raftNode{ Node: n, storage: &storageRecorder{}, raftStorage: storage, transport: rafthttp.NewNopTransporter(), }, store: st, cluster: cl, } s.start() req := &pb.Request{Method: "QGET"} n.readyc <- raft.Ready{ Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1}}, CommittedEntries: []raftpb.Entry{ {Index: 2, Data: pbutil.MustMarshal(req)}, }, } // make goroutines move forward to receive snapshot actions, _ := st.Wait(2) s.Stop() if len(actions) != 2 { t.Fatalf("len(action) = %d, want 2", len(actions)) } if actions[0].Name != "Recovery" { t.Errorf("actions[0] = %s, want %s", actions[0].Name, "Recovery") } if actions[1].Name != "Get" { t.Errorf("actions[1] = %s, want %s", actions[1].Name, "Get") } }
func TestDoProposal(t *testing.T) { tests := []pb.Request{ {Method: "POST", ID: 1}, {Method: "PUT", ID: 1}, {Method: "DELETE", ID: 1}, {Method: "GET", ID: 1, Quorum: true}, } for i, tt := range tests { st := mockstore.NewRecorder() srv := &EtcdServer{ cfg: &ServerConfig{TickMs: 1}, r: raftNode{ Node: newNodeCommitter(), storage: mockstorage.NewStorageRecorder(""), raftStorage: raft.NewMemoryStorage(), transport: rafthttp.NewNopTransporter(), }, store: st, reqIDGen: idutil.NewGenerator(0, time.Time{}), } srv.applyV2 = &applierV2store{srv} srv.start() resp, err := srv.Do(context.Background(), tt) srv.Stop() action := st.Action() if len(action) != 1 { t.Errorf("#%d: len(action) = %d, want 1", i, len(action)) } if err != nil { t.Fatalf("#%d: err = %v, want nil", i, err) } wresp := Response{Event: &store.Event{}} if !reflect.DeepEqual(resp, wresp) { t.Errorf("#%d: resp = %v, want %v", i, resp, wresp) } } }
func TestStopRaftWhenWaitingForApplyDone(t *testing.T) { n := newNopReadyNode() r := raftNode{ Node: n, storage: mockstorage.NewStorageRecorder(""), raftStorage: raft.NewMemoryStorage(), transport: rafthttp.NewNopTransporter(), } r.start(&EtcdServer{r: r}) n.readyc <- raft.Ready{} select { case <-r.applyc: case <-time.After(time.Second): t.Fatalf("failed to receive apply struct") } r.stopped <- struct{}{} select { case <-r.done: case <-time.After(time.Second): t.Fatalf("failed to stop raft loop") } }
func TestStopRaftWhenWaitingForApplyDone(t *testing.T) { n := newNopReadyNode() srv := &EtcdServer{r: raftNode{ Node: n, storage: mockstorage.NewStorageRecorder(""), raftStorage: raft.NewMemoryStorage(), transport: rafthttp.NewNopTransporter(), }} srv.r.start(&raftReadyHandler{sendMessage: func(msgs []raftpb.Message) { srv.send(msgs) }}) n.readyc <- raft.Ready{} select { case <-srv.r.applyc: case <-time.After(time.Second): t.Fatalf("failed to receive apply struct") } srv.r.stopped <- struct{}{} select { case <-srv.r.done: case <-time.After(time.Second): t.Fatalf("failed to stop raft loop") } }
func TestApplyConfChangeShouldStop(t *testing.T) { cl := membership.NewCluster("") cl.SetStore(store.New()) for i := 1; i <= 3; i++ { cl.AddMember(&membership.Member{ID: types.ID(i)}) } srv := &EtcdServer{ id: 1, r: raftNode{ Node: newNodeNop(), transport: rafthttp.NewNopTransporter(), }, cluster: cl, } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeRemoveNode, NodeID: 2, } // remove non-local member shouldStop, err := srv.applyConfChange(cc, &raftpb.ConfState{}) if err != nil { t.Fatalf("unexpected error %v", err) } if shouldStop { t.Errorf("shouldStop = %t, want %t", shouldStop, false) } // remove local member cc.NodeID = 1 shouldStop, err = srv.applyConfChange(cc, &raftpb.ConfState{}) if err != nil { t.Fatalf("unexpected error %v", err) } if !shouldStop { t.Errorf("shouldStop = %t, want %t", shouldStop, true) } }
// TestUpdateMember tests RemoveMember can propose and perform node update. func TestUpdateMember(t *testing.T) { n := newNodeConfChangeCommitterRecorder() n.readyc <- raft.Ready{ SoftState: &raft.SoftState{RaftState: raft.StateLeader}, } cl := newTestCluster(nil) st := store.New() cl.SetStore(st) cl.AddMember(&membership.Member{ID: 1234}) s := &EtcdServer{ r: raftNode{ Node: n, raftStorage: raft.NewMemoryStorage(), storage: mockstorage.NewStorageRecorder(""), transport: rafthttp.NewNopTransporter(), }, store: st, cluster: cl, reqIDGen: idutil.NewGenerator(0, time.Time{}), } s.start() wm := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://127.0.0.1:1"}}} err := s.UpdateMember(context.TODO(), wm) gaction := n.Action() s.Stop() if err != nil { t.Fatalf("UpdateMember error: %v", err) } wactions := []testutil.Action{{Name: "ProposeConfChange:ConfChangeUpdateNode"}, {Name: "ApplyConfChange:ConfChangeUpdateNode"}} if !reflect.DeepEqual(gaction, wactions) { t.Errorf("action = %v, want %v", gaction, wactions) } if !reflect.DeepEqual(cl.Member(1234), &wm) { t.Errorf("member = %v, want %v", cl.Member(1234), &wm) } }
// TestRemoveMember tests RemoveMember can propose and perform node removal. func TestRemoveMember(t *testing.T) { n := newNodeConfChangeCommitterRecorder() n.readyc <- raft.Ready{ SoftState: &raft.SoftState{RaftState: raft.StateLeader}, } cl := newTestCluster(nil) st := store.New() cl.SetStore(store.New()) cl.AddMember(&membership.Member{ID: 1234}) s := &EtcdServer{ r: raftNode{ Node: n, raftStorage: raft.NewMemoryStorage(), storage: mockstorage.NewStorageRecorder(""), transport: rafthttp.NewNopTransporter(), }, cfg: &ServerConfig{}, store: st, cluster: cl, reqIDGen: idutil.NewGenerator(0, time.Time{}), } s.start() err := s.RemoveMember(context.TODO(), 1234) gaction := n.Action() s.Stop() if err != nil { t.Fatalf("RemoveMember error: %v", err) } wactions := []testutil.Action{{Name: "ProposeConfChange:ConfChangeRemoveNode"}, {Name: "ApplyConfChange:ConfChangeRemoveNode"}} if !reflect.DeepEqual(gaction, wactions) { t.Errorf("action = %v, want %v", gaction, wactions) } if cl.Member(1234) != nil { t.Errorf("member with id 1234 is not removed") } }
// TestRecvSnapshot tests when it receives a snapshot from raft leader, // it should trigger storage.SaveSnap and also store.Recover. func TestRecvSnapshot(t *testing.T) { n := newNopReadyNode() st := store.NewRecorder() p := &storageRecorder{} cl := newCluster("abc") cl.SetStore(store.New()) s := &EtcdServer{ cfg: &ServerConfig{}, r: raftNode{ Node: n, transport: rafthttp.NewNopTransporter(), storage: p, raftStorage: raft.NewMemoryStorage(), }, store: st, cluster: cl, } s.start() n.readyc <- raft.Ready{Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1}}} // wait for actions happened on the storage for len(p.Action()) == 0 { time.Sleep(10 * time.Millisecond) } s.Stop() wactions := []testutil.Action{{Name: "Recovery"}} if g := st.Action(); !reflect.DeepEqual(g, wactions) { t.Errorf("store action = %v, want %v", g, wactions) } wactions = []testutil.Action{{Name: "SaveSnap"}, {Name: "Save"}} if g := p.Action(); !reflect.DeepEqual(g, wactions) { t.Errorf("storage action = %v, want %v", g, wactions) } }