// TestApplyRepeat tests that server handles repeat raft messages gracefully func TestApplyRepeat(t *testing.T) { n := newNodeConfChangeCommitterStream() n.readyc <- raft.Ready{ SoftState: &raft.SoftState{RaftState: raft.StateLeader}, } cl := newTestCluster(nil) st := store.New() cl.SetStore(store.New()) cl.AddMember(&membership.Member{ID: 1234}) s := &EtcdServer{ r: raftNode{ Node: n, raftStorage: raft.NewMemoryStorage(), storage: mockstorage.NewStorageRecorder(""), transport: rafthttp.NewNopTransporter(), }, cfg: &ServerConfig{}, store: st, cluster: cl, reqIDGen: idutil.NewGenerator(0, time.Time{}), } s.applyV2 = &applierV2store{s} s.start() req := &pb.Request{Method: "QGET", ID: uint64(1)} ents := []raftpb.Entry{{Index: 1, Data: pbutil.MustMarshal(req)}} n.readyc <- raft.Ready{CommittedEntries: ents} // dup msg n.readyc <- raft.Ready{CommittedEntries: ents} // use a conf change to block until dup msgs are all processed cc := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 2} ents = []raftpb.Entry{{ Index: 2, Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(cc), }} n.readyc <- raft.Ready{CommittedEntries: ents} // wait for conf change message act, err := n.Wait(1) // wait for stop message (async to avoid deadlock) stopc := make(chan error) go func() { _, werr := n.Wait(1) stopc <- werr }() s.Stop() // only want to confirm etcdserver won't panic; no data to check if err != nil { t.Fatal(err) } if len(act) == 0 { t.Fatalf("expected len(act)=0, got %d", len(act)) } if err = <-stopc; err != nil { t.Fatalf("error on stop (%v)", err) } }
// snapshot should snapshot the store and cut the persistent func TestSnapshot(t *testing.T) { s := raft.NewMemoryStorage() s.Append([]raftpb.Entry{{Index: 1}}) st := mockstore.NewRecorder() p := mockstorage.NewStorageRecorder("") srv := &EtcdServer{ cfg: &ServerConfig{}, r: raftNode{ Node: newNodeNop(), raftStorage: s, storage: p, }, store: st, } srv.snapshot(1, raftpb.ConfState{Nodes: []uint64{1}}) gaction, _ := st.Wait(2) if len(gaction) != 2 { t.Fatalf("len(action) = %d, want 1", len(gaction)) } if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "Clone"}) { t.Errorf("action = %s, want Clone", gaction[0]) } if !reflect.DeepEqual(gaction[1], testutil.Action{Name: "SaveNoCopy"}) { t.Errorf("action = %s, want SaveNoCopy", gaction[1]) } gaction = p.Action() if len(gaction) != 1 { t.Fatalf("len(action) = %d, want 1", len(gaction)) } if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "SaveSnap"}) { t.Errorf("action = %s, want SaveSnap", gaction[0]) } }
// TestSyncTrigger tests that the server proposes a SYNC request when its sync timer ticks func TestSyncTrigger(t *testing.T) { n := newReadyNode() st := make(chan time.Time, 1) srv := &EtcdServer{ cfg: &ServerConfig{TickMs: 1}, r: raftNode{ Node: n, raftStorage: raft.NewMemoryStorage(), transport: rafthttp.NewNopTransporter(), storage: mockstorage.NewStorageRecorder(""), }, store: mockstore.NewNop(), SyncTicker: st, reqIDGen: idutil.NewGenerator(0, time.Time{}), } // trigger the server to become a leader and accept sync requests go func() { srv.start() n.readyc <- raft.Ready{ SoftState: &raft.SoftState{ RaftState: raft.StateLeader, }, } // trigger a sync request st <- time.Time{} }() action, _ := n.Wait(1) go srv.Stop() if len(action) != 1 { t.Fatalf("len(action) = %d, want 1", len(action)) } if action[0].Name != "Propose" { t.Fatalf("action = %s, want Propose", action[0].Name) } data := action[0].Params[0].([]byte) var req pb.Request if err := req.Unmarshal(data); err != nil { t.Fatalf("error unmarshalling data: %v", err) } if req.Method != "SYNC" { t.Fatalf("unexpected proposed request: %#v", req.Method) } // wait on stop message <-n.Chan() }
// snapshot should snapshot the store and cut the persistent func TestSnapshot(t *testing.T) { be, tmpPath := backend.NewDefaultTmpBackend() defer func() { os.RemoveAll(tmpPath) }() s := raft.NewMemoryStorage() s.Append([]raftpb.Entry{{Index: 1}}) st := mockstore.NewRecorder() p := mockstorage.NewStorageRecorder("") srv := &EtcdServer{ cfg: &ServerConfig{}, r: raftNode{ Node: newNodeNop(), raftStorage: s, storage: p, }, store: st, } srv.kv = dstorage.New(be, &lease.FakeLessor{}, &srv.consistIndex) srv.be = be srv.snapshot(1, raftpb.ConfState{Nodes: []uint64{1}}) gaction, _ := st.Wait(2) if len(gaction) != 2 { t.Fatalf("len(action) = %d, want 1", len(gaction)) } if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "Clone"}) { t.Errorf("action = %s, want Clone", gaction[0]) } if !reflect.DeepEqual(gaction[1], testutil.Action{Name: "SaveNoCopy"}) { t.Errorf("action = %s, want SaveNoCopy", gaction[1]) } gaction = p.Action() if len(gaction) != 1 { t.Fatalf("len(action) = %d, want 1", len(gaction)) } if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "SaveSnap"}) { t.Errorf("action = %s, want SaveSnap", gaction[0]) } }
// TestApplySnapshotAndCommittedEntries tests that server applies snapshot // first and then committed entries. func TestApplySnapshotAndCommittedEntries(t *testing.T) { n := newNopReadyNode() st := mockstore.NewRecorderStream() cl := newCluster("abc") cl.SetStore(store.New()) storage := raft.NewMemoryStorage() s := &EtcdServer{ cfg: &ServerConfig{}, r: raftNode{ Node: n, storage: mockstorage.NewStorageRecorder(""), raftStorage: storage, transport: rafthttp.NewNopTransporter(), }, store: st, cluster: cl, } s.start() req := &pb.Request{Method: "QGET"} n.readyc <- raft.Ready{ Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1}}, CommittedEntries: []raftpb.Entry{ {Index: 2, Data: pbutil.MustMarshal(req)}, }, } // make goroutines move forward to receive snapshot actions, _ := st.Wait(2) s.Stop() if len(actions) != 2 { t.Fatalf("len(action) = %d, want 2", len(actions)) } if actions[0].Name != "Recovery" { t.Errorf("actions[0] = %s, want %s", actions[0].Name, "Recovery") } if actions[1].Name != "Get" { t.Errorf("actions[1] = %s, want %s", actions[1].Name, "Get") } }
func TestDoProposal(t *testing.T) { tests := []pb.Request{ {Method: "POST", ID: 1}, {Method: "PUT", ID: 1}, {Method: "DELETE", ID: 1}, {Method: "GET", ID: 1, Quorum: true}, } for i, tt := range tests { st := mockstore.NewRecorder() srv := &EtcdServer{ cfg: &ServerConfig{TickMs: 1}, r: raftNode{ Node: newNodeCommitter(), storage: mockstorage.NewStorageRecorder(""), raftStorage: raft.NewMemoryStorage(), transport: rafthttp.NewNopTransporter(), }, store: st, reqIDGen: idutil.NewGenerator(0, time.Time{}), } srv.applyV2 = &applierV2store{srv} srv.start() resp, err := srv.Do(context.Background(), tt) srv.Stop() action := st.Action() if len(action) != 1 { t.Errorf("#%d: len(action) = %d, want 1", i, len(action)) } if err != nil { t.Fatalf("#%d: err = %v, want nil", i, err) } wresp := Response{Event: &store.Event{}} if !reflect.DeepEqual(resp, wresp) { t.Errorf("#%d: resp = %v, want %v", i, resp, wresp) } } }
func TestStopRaftWhenWaitingForApplyDone(t *testing.T) { n := newNopReadyNode() r := raftNode{ Node: n, storage: mockstorage.NewStorageRecorder(""), raftStorage: raft.NewMemoryStorage(), transport: rafthttp.NewNopTransporter(), } r.start(&EtcdServer{r: r}) n.readyc <- raft.Ready{} select { case <-r.applyc: case <-time.After(time.Second): t.Fatalf("failed to receive apply struct") } r.stopped <- struct{}{} select { case <-r.done: case <-time.After(time.Second): t.Fatalf("failed to stop raft loop") } }
func TestStopRaftWhenWaitingForApplyDone(t *testing.T) { n := newNopReadyNode() srv := &EtcdServer{r: raftNode{ Node: n, storage: mockstorage.NewStorageRecorder(""), raftStorage: raft.NewMemoryStorage(), transport: rafthttp.NewNopTransporter(), }} srv.r.start(&raftReadyHandler{sendMessage: func(msgs []raftpb.Message) { srv.send(msgs) }}) n.readyc <- raft.Ready{} select { case <-srv.r.applyc: case <-time.After(time.Second): t.Fatalf("failed to receive apply struct") } srv.r.stopped <- struct{}{} select { case <-srv.r.done: case <-time.After(time.Second): t.Fatalf("failed to stop raft loop") } }
// TestUpdateMember tests RemoveMember can propose and perform node update. func TestUpdateMember(t *testing.T) { n := newNodeConfChangeCommitterRecorder() n.readyc <- raft.Ready{ SoftState: &raft.SoftState{RaftState: raft.StateLeader}, } cl := newTestCluster(nil) st := store.New() cl.SetStore(st) cl.AddMember(&membership.Member{ID: 1234}) s := &EtcdServer{ r: raftNode{ Node: n, raftStorage: raft.NewMemoryStorage(), storage: mockstorage.NewStorageRecorder(""), transport: rafthttp.NewNopTransporter(), }, store: st, cluster: cl, reqIDGen: idutil.NewGenerator(0, time.Time{}), } s.start() wm := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://127.0.0.1:1"}}} err := s.UpdateMember(context.TODO(), wm) gaction := n.Action() s.Stop() if err != nil { t.Fatalf("UpdateMember error: %v", err) } wactions := []testutil.Action{{Name: "ProposeConfChange:ConfChangeUpdateNode"}, {Name: "ApplyConfChange:ConfChangeUpdateNode"}} if !reflect.DeepEqual(gaction, wactions) { t.Errorf("action = %v, want %v", gaction, wactions) } if !reflect.DeepEqual(cl.Member(1234), &wm) { t.Errorf("member = %v, want %v", cl.Member(1234), &wm) } }
// TestRemoveMember tests RemoveMember can propose and perform node removal. func TestRemoveMember(t *testing.T) { n := newNodeConfChangeCommitterRecorder() n.readyc <- raft.Ready{ SoftState: &raft.SoftState{RaftState: raft.StateLeader}, } cl := newTestCluster(nil) st := store.New() cl.SetStore(store.New()) cl.AddMember(&membership.Member{ID: 1234}) s := &EtcdServer{ r: raftNode{ Node: n, raftStorage: raft.NewMemoryStorage(), storage: mockstorage.NewStorageRecorder(""), transport: rafthttp.NewNopTransporter(), }, cfg: &ServerConfig{}, store: st, cluster: cl, reqIDGen: idutil.NewGenerator(0, time.Time{}), } s.start() err := s.RemoveMember(context.TODO(), 1234) gaction := n.Action() s.Stop() if err != nil { t.Fatalf("RemoveMember error: %v", err) } wactions := []testutil.Action{{Name: "ProposeConfChange:ConfChangeRemoveNode"}, {Name: "ApplyConfChange:ConfChangeRemoveNode"}} if !reflect.DeepEqual(gaction, wactions) { t.Errorf("action = %v, want %v", gaction, wactions) } if cl.Member(1234) != nil { t.Errorf("member with id 1234 is not removed") } }
// TestRecvSnapshot tests when it receives a snapshot from raft leader, // it should trigger storage.SaveSnap and also store.Recover. func TestRecvSnapshot(t *testing.T) { n := newNopReadyNode() st := mockstore.NewRecorder() p := mockstorage.NewStorageRecorder("") cl := newCluster("abc") cl.SetStore(store.New()) s := &EtcdServer{ cfg: &ServerConfig{}, r: raftNode{ Node: n, transport: rafthttp.NewNopTransporter(), storage: p, raftStorage: raft.NewMemoryStorage(), }, store: st, cluster: cl, } s.start() n.readyc <- raft.Ready{Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1}}} // wait for actions happened on the storage for len(p.Action()) == 0 { time.Sleep(10 * time.Millisecond) } s.Stop() wactions := []testutil.Action{{Name: "Recovery"}} if g := st.Action(); !reflect.DeepEqual(g, wactions) { t.Errorf("store action = %v, want %v", g, wactions) } wactions = []testutil.Action{{Name: "SaveSnap"}, {Name: "Save"}} if g := p.Action(); !reflect.DeepEqual(g, wactions) { t.Errorf("storage action = %v, want %v", g, wactions) } }
// TestConcurrentApplyAndSnapshotV3 will send out snapshots concurrently with // proposals. func TestConcurrentApplyAndSnapshotV3(t *testing.T) { const ( // snapshots that may queue up at once without dropping maxInFlightMsgSnap = 16 ) n := newNopReadyNode() st := store.New() cl := membership.NewCluster("abc") cl.SetStore(st) testdir, err := ioutil.TempDir(os.TempDir(), "testsnapdir") if err != nil { t.Fatalf("Couldn't open tempdir (%v)", err) } defer os.RemoveAll(testdir) if err := os.MkdirAll(testdir+"/member/snap", 0755); err != nil { t.Fatalf("Couldn't make snap dir (%v)", err) } rs := raft.NewMemoryStorage() tr, snapDoneC := rafthttp.NewSnapTransporter(testdir) s := &EtcdServer{ cfg: &ServerConfig{ DataDir: testdir, }, r: raftNode{ Node: n, transport: tr, storage: mockstorage.NewStorageRecorder(testdir), raftStorage: rs, }, store: st, cluster: cl, msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), } be, tmpPath := backend.NewDefaultTmpBackend() defer func() { os.RemoveAll(tmpPath) }() s.kv = dstorage.New(be, &lease.FakeLessor{}, &s.consistIndex) s.be = be s.start() defer s.Stop() // submit applied entries and snap entries idx := uint64(0) outdated := 0 accepted := 0 for k := 1; k <= 101; k++ { idx++ ch := s.w.Register(uint64(idx)) req := &pb.Request{Method: "QGET", ID: uint64(idx)} ent := raftpb.Entry{Index: uint64(idx), Data: pbutil.MustMarshal(req)} ready := raft.Ready{Entries: []raftpb.Entry{ent}} n.readyc <- ready ready = raft.Ready{CommittedEntries: []raftpb.Entry{ent}} n.readyc <- ready // "idx" applied <-ch // one snapshot for every two messages if k%2 != 0 { continue } n.readyc <- raft.Ready{Messages: []raftpb.Message{{Type: raftpb.MsgSnap}}} // get the snapshot sent by the transport snapMsg := <-snapDoneC // If the snapshot trails applied records, recovery will panic // since there's no allocated snapshot at the place of the // snapshot record. This only happens when the applier and the // snapshot sender get out of sync. if snapMsg.Snapshot.Metadata.Index == idx { idx++ snapMsg.Snapshot.Metadata.Index = idx ready = raft.Ready{Snapshot: snapMsg.Snapshot} n.readyc <- ready accepted++ } else { outdated++ } // don't wait for the snapshot to complete, move to next message } if accepted != 50 { t.Errorf("accepted=%v, want 50", accepted) } if outdated != 0 { t.Errorf("outdated=%v, want 0", outdated) } }