Ejemplo n.º 1
0
// TestApplyMultiConfChangeShouldStop ensures that apply will return shouldStop
// if the local member is removed along with other conf updates.
func TestApplyMultiConfChangeShouldStop(t *testing.T) {
	cl := membership.NewCluster("")
	cl.SetStore(store.New())
	for i := 1; i <= 5; i++ {
		cl.AddMember(&membership.Member{ID: types.ID(i)})
	}
	srv := &EtcdServer{
		id: 2,
		r: raftNode{
			Node:      newNodeNop(),
			transport: rafthttp.NewNopTransporter(),
		},
		cluster: cl,
		w:       wait.New(),
	}
	ents := []raftpb.Entry{}
	for i := 1; i <= 4; i++ {
		ent := raftpb.Entry{
			Term:  1,
			Index: uint64(i),
			Type:  raftpb.EntryConfChange,
			Data: pbutil.MustMarshal(
				&raftpb.ConfChange{
					Type:   raftpb.ConfChangeRemoveNode,
					NodeID: uint64(i)}),
		}
		ents = append(ents, ent)
	}

	_, shouldStop := srv.apply(ents, &raftpb.ConfState{})
	if !shouldStop {
		t.Errorf("shouldStop = %t, want %t", shouldStop, true)
	}
}
Ejemplo n.º 2
0
func newTestCluster(membs []*membership.Member) *membership.RaftCluster {
	c := membership.NewCluster("")
	for _, m := range membs {
		c.AddMember(m)
	}
	return c
}
Ejemplo n.º 3
0
func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
	}
	w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)

	plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
	cl := membership.NewCluster("")
	cl.SetID(cid)
	s := raft.NewMemoryStorage()
	if snapshot != nil {
		s.ApplySnapshot(*snapshot)
	}
	s.SetHardState(st)
	s.Append(ents)
	c := &raft.Config{
		ID:              uint64(id),
		ElectionTick:    cfg.ElectionTicks,
		HeartbeatTick:   1,
		Storage:         s,
		MaxSizePerMsg:   maxSizePerMsg,
		MaxInflightMsgs: maxInflightMsgs,
		CheckQuorum:     true,
	}

	n := raft.RestartNode(c)
	raftStatusMu.Lock()
	raftStatus = n.Status
	raftStatusMu.Unlock()
	advanceTicksForElection(n, c.ElectionTick)
	return id, cl, n, s, w
}
Ejemplo n.º 4
0
func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
	}
	w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)

	// discard the previously uncommitted entries
	for i, ent := range ents {
		if ent.Index > st.Commit {
			plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
			ents = ents[:i]
			break
		}
	}

	// force append the configuration change entries
	toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
	ents = append(ents, toAppEnts...)

	// force commit newly appended entries
	err := w.Save(raftpb.HardState{}, toAppEnts)
	if err != nil {
		plog.Fatalf("%v", err)
	}
	if len(ents) != 0 {
		st.Commit = ents[len(ents)-1].Index
	}

	plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
	cl := membership.NewCluster("")
	cl.SetID(cid)
	s := raft.NewMemoryStorage()
	if snapshot != nil {
		s.ApplySnapshot(*snapshot)
	}
	s.SetHardState(st)
	s.Append(ents)
	c := &raft.Config{
		ID:              uint64(id),
		ElectionTick:    cfg.ElectionTicks,
		HeartbeatTick:   1,
		Storage:         s,
		MaxSizePerMsg:   maxSizePerMsg,
		MaxInflightMsgs: maxInflightMsgs,
	}
	n := raft.RestartNode(c)
	raftStatus = n.Status
	return id, cl, n, s, w
}
Ejemplo n.º 5
0
func TestApplyConfChangeShouldStop(t *testing.T) {
	cl := membership.NewCluster("")
	cl.SetStore(store.New())
	for i := 1; i <= 3; i++ {
		cl.AddMember(&membership.Member{ID: types.ID(i)})
	}
	srv := &EtcdServer{
		id: 1,
		r: raftNode{
			Node:      newNodeNop(),
			transport: rafthttp.NewNopTransporter(),
		},
		cluster: cl,
	}
	cc := raftpb.ConfChange{
		Type:   raftpb.ConfChangeRemoveNode,
		NodeID: 2,
	}
	// remove non-local member
	shouldStop, err := srv.applyConfChange(cc, &raftpb.ConfState{})
	if err != nil {
		t.Fatalf("unexpected error %v", err)
	}
	if shouldStop {
		t.Errorf("shouldStop = %t, want %t", shouldStop, false)
	}

	// remove local member
	cc.NodeID = 1
	shouldStop, err = srv.applyConfChange(cc, &raftpb.ConfState{})
	if err != nil {
		t.Fatalf("unexpected error %v", err)
	}
	if !shouldStop {
		t.Errorf("shouldStop = %t, want %t", shouldStop, true)
	}
}
Ejemplo n.º 6
0
// TestConcurrentApplyAndSnapshotV3 will send out snapshots concurrently with
// proposals.
func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
	const (
		// snapshots that may queue up at once without dropping
		maxInFlightMsgSnap = 16
	)
	n := newNopReadyNode()
	st := store.New()
	cl := membership.NewCluster("abc")
	cl.SetStore(st)

	testdir, err := ioutil.TempDir(os.TempDir(), "testsnapdir")
	if err != nil {
		t.Fatalf("Couldn't open tempdir (%v)", err)
	}
	defer os.RemoveAll(testdir)
	if err := os.MkdirAll(testdir+"/member/snap", 0755); err != nil {
		t.Fatalf("Couldn't make snap dir (%v)", err)
	}

	rs := raft.NewMemoryStorage()
	tr, snapDoneC := rafthttp.NewSnapTransporter(testdir)
	s := &EtcdServer{
		cfg: &ServerConfig{
			DataDir: testdir,
		},
		r: raftNode{
			Node:        n,
			transport:   tr,
			storage:     mockstorage.NewStorageRecorder(testdir),
			raftStorage: rs,
		},
		store:    st,
		cluster:  cl,
		msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
	}

	be, tmpPath := backend.NewDefaultTmpBackend()
	defer func() {
		os.RemoveAll(tmpPath)
	}()
	s.kv = dstorage.New(be, &lease.FakeLessor{}, &s.consistIndex)
	s.be = be

	s.start()
	defer s.Stop()

	// submit applied entries and snap entries
	idx := uint64(0)
	outdated := 0
	accepted := 0
	for k := 1; k <= 101; k++ {
		idx++
		ch := s.w.Register(uint64(idx))
		req := &pb.Request{Method: "QGET", ID: uint64(idx)}
		ent := raftpb.Entry{Index: uint64(idx), Data: pbutil.MustMarshal(req)}
		ready := raft.Ready{Entries: []raftpb.Entry{ent}}
		n.readyc <- ready

		ready = raft.Ready{CommittedEntries: []raftpb.Entry{ent}}
		n.readyc <- ready

		// "idx" applied
		<-ch

		// one snapshot for every two messages
		if k%2 != 0 {
			continue
		}

		n.readyc <- raft.Ready{Messages: []raftpb.Message{{Type: raftpb.MsgSnap}}}
		// get the snapshot sent by the transport
		snapMsg := <-snapDoneC
		// If the snapshot trails applied records, recovery will panic
		// since there's no allocated snapshot at the place of the
		// snapshot record. This only happens when the applier and the
		// snapshot sender get out of sync.
		if snapMsg.Snapshot.Metadata.Index == idx {
			idx++
			snapMsg.Snapshot.Metadata.Index = idx
			ready = raft.Ready{Snapshot: snapMsg.Snapshot}
			n.readyc <- ready
			accepted++
		} else {
			outdated++
		}
		// don't wait for the snapshot to complete, move to next message
	}
	if accepted != 50 {
		t.Errorf("accepted=%v, want 50", accepted)
	}
	if outdated != 0 {
		t.Errorf("outdated=%v, want 0", outdated)
	}
}
Ejemplo n.º 7
0
func TestApplyConfChangeError(t *testing.T) {
	cl := membership.NewCluster("")
	cl.SetStore(store.New())
	for i := 1; i <= 4; i++ {
		cl.AddMember(&membership.Member{ID: types.ID(i)})
	}
	cl.RemoveMember(4)

	tests := []struct {
		cc   raftpb.ConfChange
		werr error
	}{
		{
			raftpb.ConfChange{
				Type:   raftpb.ConfChangeAddNode,
				NodeID: 4,
			},
			membership.ErrIDRemoved,
		},
		{
			raftpb.ConfChange{
				Type:   raftpb.ConfChangeUpdateNode,
				NodeID: 4,
			},
			membership.ErrIDRemoved,
		},
		{
			raftpb.ConfChange{
				Type:   raftpb.ConfChangeAddNode,
				NodeID: 1,
			},
			membership.ErrIDExists,
		},
		{
			raftpb.ConfChange{
				Type:   raftpb.ConfChangeRemoveNode,
				NodeID: 5,
			},
			membership.ErrIDNotFound,
		},
	}
	for i, tt := range tests {
		n := newNodeRecorder()
		srv := &EtcdServer{
			r:       raftNode{Node: n},
			cluster: cl,
			cfg:     &ServerConfig{},
		}
		_, err := srv.applyConfChange(tt.cc, nil)
		if err != tt.werr {
			t.Errorf("#%d: applyConfChange error = %v, want %v", i, err, tt.werr)
		}
		cc := raftpb.ConfChange{Type: tt.cc.Type, NodeID: raft.None}
		w := []testutil.Action{
			{
				Name:   "ApplyConfChange",
				Params: []interface{}{cc},
			},
		}
		if g, _ := n.Wait(1); !reflect.DeepEqual(g, w) {
			t.Errorf("#%d: action = %+v, want %+v", i, g, w)
		}
	}
}
Ejemplo n.º 8
0
func rebuild(datadir string) ([]byte, *raftpb.HardState, store.Store, error) {
	waldir := path.Join(datadir, "member", "wal")
	snapdir := path.Join(datadir, "member", "snap")

	ss := snap.New(snapdir)
	snapshot, err := ss.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		return nil, nil, nil, err
	}

	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
	}

	w, err := wal.OpenForRead(waldir, walsnap)
	if err != nil {
		return nil, nil, nil, err
	}
	defer w.Close()

	meta, hardstate, ents, err := w.ReadAll()
	if err != nil {
		return nil, nil, nil, err
	}

	st := store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
	if snapshot != nil {
		err := st.Recovery(snapshot.Data)
		if err != nil {
			return nil, nil, nil, err
		}
	}

	cluster := membership.NewCluster("")
	cluster.SetStore(st)
	cluster.Recover(func(*semver.Version) {})

	applier := etcdserver.NewApplierV2(st, cluster)
	for _, ent := range ents {
		if ent.Type == raftpb.EntryConfChange {
			var cc raftpb.ConfChange
			pbutil.MustUnmarshal(&cc, ent.Data)
			switch cc.Type {
			case raftpb.ConfChangeAddNode:
				m := new(membership.Member)
				if err := json.Unmarshal(cc.Context, m); err != nil {
					return nil, nil, nil, err
				}
				cluster.AddMember(m)
			case raftpb.ConfChangeRemoveNode:
				id := types.ID(cc.NodeID)
				cluster.RemoveMember(id)
			case raftpb.ConfChangeUpdateNode:
				m := new(membership.Member)
				if err := json.Unmarshal(cc.Context, m); err != nil {
					return nil, nil, nil, err
				}
				cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
			}
			continue
		}

		var raftReq pb.InternalRaftRequest
		if !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { // backward compatible
			var r pb.Request
			pbutil.MustUnmarshal(&r, ent.Data)
			applyRequest(&r, applier)
		} else {
			if raftReq.V2 != nil {
				req := raftReq.V2
				applyRequest(req, applier)
			}
		}
	}

	return meta, &hardstate, st, nil
}
Ejemplo n.º 9
0
func rebuildStoreV2() (store.Store, uint64) {
	var index uint64
	cl := membership.NewCluster("")

	waldir := migrateWALdir
	if len(waldir) == 0 {
		waldir = path.Join(migrateDatadir, "member", "wal")
	}
	snapdir := path.Join(migrateDatadir, "member", "snap")

	ss := snap.New(snapdir)
	snapshot, err := ss.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		ExitWithError(ExitError, err)
	}

	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
		index = snapshot.Metadata.Index
	}

	w, err := wal.OpenForRead(waldir, walsnap)
	if err != nil {
		ExitWithError(ExitError, err)
	}
	defer w.Close()

	_, _, ents, err := w.ReadAll()
	if err != nil {
		ExitWithError(ExitError, err)
	}

	st := store.New()
	if snapshot != nil {
		err := st.Recovery(snapshot.Data)
		if err != nil {
			ExitWithError(ExitError, err)
		}
	}

	cl.SetStore(st)
	cl.Recover(api.UpdateCapability)

	applier := etcdserver.NewApplierV2(st, cl)
	for _, ent := range ents {
		if ent.Type == raftpb.EntryConfChange {
			var cc raftpb.ConfChange
			pbutil.MustUnmarshal(&cc, ent.Data)
			applyConf(cc, cl)
			continue
		}

		var raftReq pb.InternalRaftRequest
		if !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { // backward compatible
			var r pb.Request
			pbutil.MustUnmarshal(&r, ent.Data)
			applyRequest(&r, applier)
		} else {
			if raftReq.V2 != nil {
				req := raftReq.V2
				applyRequest(req, applier)
			}
		}
		if ent.Index > index {
			index = ent.Index
		}
	}

	return st, index
}