Пример #1
0
// newRaftNode initiates a raft instance and returns a committed log entry
// channel and error channel. Proposals for log updates are sent over the
// provided the proposal channel. All log entries are replayed over the
// commit channel, followed by a nil message (to indicate the channel is
// current), then new log entries. To shutdown, close proposeC and read errorC.
func newRaftNode(id int, peers []string, join bool, getSnapshot func() ([]byte, error), proposeC <-chan string,
	confChangeC <-chan raftpb.ConfChange) (<-chan *string, <-chan error, <-chan *snap.Snapshotter) {

	commitC := make(chan *string)
	errorC := make(chan error)

	rc := &raftNode{
		proposeC:    proposeC,
		confChangeC: confChangeC,
		commitC:     commitC,
		errorC:      errorC,
		id:          id,
		peers:       peers,
		join:        join,
		waldir:      fmt.Sprintf("raftexample-%d", id),
		snapdir:     fmt.Sprintf("raftexample-%d-snap", id),
		getSnapshot: getSnapshot,
		raftStorage: raft.NewMemoryStorage(),
		snapCount:   defaultSnapCount,
		stopc:       make(chan struct{}),
		httpstopc:   make(chan struct{}),
		httpdonec:   make(chan struct{}),

		snapshotterReady: make(chan *snap.Snapshotter, 1),
		// rest of structure populated after WAL replay
	}
	go rc.startRaft()
	return commitC, errorC, rc.snapshotterReady
}
Пример #2
0
// Applied > SnapCount should trigger a SaveSnap event
func TestTriggerSnap(t *testing.T) {
	snapc := 10
	st := store.NewRecorder()
	p := &storageRecorder{}
	srv := &EtcdServer{
		cfg:       &ServerConfig{TickMs: 1},
		snapCount: uint64(snapc),
		r: raftNode{
			Node:        newNodeCommitter(),
			raftStorage: raft.NewMemoryStorage(),
			storage:     p,
			transport:   rafthttp.NewNopTransporter(),
		},
		store:    st,
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	srv.start()
	for i := 0; i < snapc+1; i++ {
		srv.Do(context.Background(), pb.Request{Method: "PUT"})
	}

	wcnt := 2 + snapc
	gaction, _ := p.Wait(wcnt)

	srv.Stop()

	// each operation is recorded as a Save
	// (SnapCount+1) * Puts + SaveSnap = (SnapCount+1) * Save + SaveSnap
	if len(gaction) != wcnt {
		t.Fatalf("len(action) = %d, want %d", len(gaction), wcnt)
	}
	if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "SaveSnap"}) {
		t.Errorf("action = %s, want SaveSnap", gaction[wcnt-1])
	}
}
Пример #3
0
func StartNode(id uint64, peers []raft.Peer, addrs []string, t Transport) *Node {
	st := raft.NewMemoryStorage()
	c := &raft.Config{
		ID:              id,
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         st,
		MaxSizePerMsg:   1024 * 1024,
		MaxInflightMsgs: 256,
	}
	rn := raft.StartNode(c, peers)
	n := &Node{
		Node:    rn,
		storage: st,
	}
	n.t = t
	path := fmt.Sprint(DirPath, id)
	f, e := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.ModePerm)
	if e != nil {
		fmt.Fprintln(os.Stdout, "open  the  kv file error:", e)
	}
	n.f = f
	go n.t.listen()
	go n.start()
	return n
}
Пример #4
0
func TestStopRaftWhenWaitingForApplyDone(t *testing.T) {
	n := newReadyNode()
	r := raftNode{
		Node:        n,
		applyc:      make(chan apply),
		storage:     &storageRecorder{},
		raftStorage: raft.NewMemoryStorage(),
		transport:   &nopTransporter{},
	}
	r.s = &EtcdServer{r: r}
	go r.run()
	n.readyc <- raft.Ready{}
	select {
	case <-r.applyc:
	case <-time.After(time.Second):
		t.Fatalf("failed to receive apply struct")
	}

	r.stopped <- struct{}{}
	select {
	case <-r.done:
	case <-time.After(time.Second):
		t.Fatalf("failed to stop raft loop")
	}
}
Пример #5
0
func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
	}
	w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)

	plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
	cl := membership.NewCluster("")
	cl.SetID(cid)
	s := raft.NewMemoryStorage()
	if snapshot != nil {
		s.ApplySnapshot(*snapshot)
	}
	s.SetHardState(st)
	s.Append(ents)
	c := &raft.Config{
		ID:              uint64(id),
		ElectionTick:    cfg.ElectionTicks,
		HeartbeatTick:   1,
		Storage:         s,
		MaxSizePerMsg:   maxSizePerMsg,
		MaxInflightMsgs: maxInflightMsgs,
		CheckQuorum:     true,
	}

	n := raft.RestartNode(c)
	raftStatusMu.Lock()
	raftStatus = n.Status
	raftStatusMu.Unlock()
	advanceTicksForElection(n, c.ElectionTick)
	return id, cl, n, s, w
}
Пример #6
0
func startNode(cfg *ServerConfig, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
	var err error
	member := cfg.Cluster.MemberByName(cfg.Name)
	metadata := pbutil.MustMarshal(
		&pb.Metadata{
			NodeID:    uint64(member.ID),
			ClusterID: uint64(cfg.Cluster.ID()),
		},
	)
	if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
		log.Fatalf("etcdserver create snapshot directory error: %v", err)
	}
	if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
		log.Fatalf("etcdserver: create wal error: %v", err)
	}
	peers := make([]raft.Peer, len(ids))
	for i, id := range ids {
		ctx, err := json.Marshal((*cfg.Cluster).Member(id))
		if err != nil {
			log.Panicf("marshal member should never fail: %v", err)
		}
		peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
	}
	id = member.ID
	log.Printf("etcdserver: start member %s in cluster %s", id, cfg.Cluster.ID())
	s = raft.NewMemoryStorage()
	n = raft.StartNode(uint64(id), peers, 10, 1, s)
	return
}
Пример #7
0
// snapshot should snapshot the store and cut the persistent
func TestSnapshot(t *testing.T) {
	s := raft.NewMemoryStorage()
	s.Append([]raftpb.Entry{{Index: 1}})
	st := store.NewRecorder()
	p := &storageRecorder{}
	srv := &EtcdServer{
		cfg: &ServerConfig{},
		r: raftNode{
			Node:        newNodeNop(),
			raftStorage: s,
			storage:     p,
		},
		store: st,
	}
	srv.snapshot(1, raftpb.ConfState{Nodes: []uint64{1}})
	gaction, _ := st.Wait(2)
	if len(gaction) != 2 {
		t.Fatalf("len(action) = %d, want 1", len(gaction))
	}
	if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "Clone"}) {
		t.Errorf("action = %s, want Clone", gaction[0])
	}
	if !reflect.DeepEqual(gaction[1], testutil.Action{Name: "SaveNoCopy"}) {
		t.Errorf("action = %s, want SaveNoCopy", gaction[1])
	}
	gaction = p.Action()
	if len(gaction) != 1 {
		t.Fatalf("len(action) = %d, want 1", len(gaction))
	}
	if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "SaveSnap"}) {
		t.Errorf("action = %s, want SaveSnap", gaction[0])
	}
}
Пример #8
0
// TestApplyRepeat tests that server handles repeat raft messages gracefully
func TestApplyRepeat(t *testing.T) {
	n := newNodeConfChangeCommitterStream()
	n.readyc <- raft.Ready{
		SoftState: &raft.SoftState{RaftState: raft.StateLeader},
	}
	cl := newTestCluster(nil)
	st := store.New()
	cl.SetStore(store.New())
	cl.AddMember(&membership.Member{ID: 1234})
	s := &EtcdServer{
		r: raftNode{
			Node:        n,
			raftStorage: raft.NewMemoryStorage(),
			storage:     mockstorage.NewStorageRecorder(""),
			transport:   rafthttp.NewNopTransporter(),
		},
		cfg:      &ServerConfig{},
		store:    st,
		cluster:  cl,
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	s.applyV2 = &applierV2store{s}
	s.start()
	req := &pb.Request{Method: "QGET", ID: uint64(1)}
	ents := []raftpb.Entry{{Index: 1, Data: pbutil.MustMarshal(req)}}
	n.readyc <- raft.Ready{CommittedEntries: ents}
	// dup msg
	n.readyc <- raft.Ready{CommittedEntries: ents}

	// use a conf change to block until dup msgs are all processed
	cc := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 2}
	ents = []raftpb.Entry{{
		Index: 2,
		Type:  raftpb.EntryConfChange,
		Data:  pbutil.MustMarshal(cc),
	}}
	n.readyc <- raft.Ready{CommittedEntries: ents}
	// wait for conf change message
	act, err := n.Wait(1)
	// wait for stop message (async to avoid deadlock)
	stopc := make(chan error)
	go func() {
		_, werr := n.Wait(1)
		stopc <- werr
	}()
	s.Stop()

	// only want to confirm etcdserver won't panic; no data to check

	if err != nil {
		t.Fatal(err)
	}
	if len(act) == 0 {
		t.Fatalf("expected len(act)=0, got %d", len(act))
	}

	if err = <-stopc; err != nil {
		t.Fatalf("error on stop (%v)", err)
	}
}
Пример #9
0
// GroupStorage implements the Storage interface.
func (m *MemoryStorage) GroupStorage(groupID uint64) WriteableGroupStorage {
	m.mu.Lock()
	defer m.mu.Unlock()
	g, ok := m.groups[groupID]
	if !ok {
		g = raft.NewMemoryStorage()
		m.groups[groupID] = g
	}
	return g
}
Пример #10
0
// GroupStorage implements the Storage interface.
func (m *MemoryStorage) GroupStorage(groupID roachpb.RangeID, replicaID roachpb.ReplicaID) (WriteableGroupStorage, error) {
	m.mu.Lock()
	defer m.mu.Unlock()
	g, ok := m.groups[groupID]
	if !ok {
		g = raft.NewMemoryStorage()
		m.groups[groupID] = g
	}
	return g, nil
}
Пример #11
0
// snapshot should snapshot the store and cut the persistent
func TestSnapshot(t *testing.T) {
	be, tmpPath := backend.NewDefaultTmpBackend()
	defer func() {
		os.RemoveAll(tmpPath)
	}()

	s := raft.NewMemoryStorage()
	s.Append([]raftpb.Entry{{Index: 1}})
	st := mockstore.NewRecorderStream()
	p := mockstorage.NewStorageRecorderStream("")
	srv := &EtcdServer{
		Cfg: &ServerConfig{},
		r: raftNode{
			Node:        newNodeNop(),
			raftStorage: s,
			storage:     p,
		},
		store: st,
	}
	srv.kv = mvcc.New(be, &lease.FakeLessor{}, &srv.consistIndex)
	srv.be = be

	ch := make(chan struct{}, 2)

	go func() {
		gaction, _ := p.Wait(1)
		defer func() { ch <- struct{}{} }()

		if len(gaction) != 1 {
			t.Fatalf("len(action) = %d, want 1", len(gaction))
		}
		if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "SaveSnap"}) {
			t.Errorf("action = %s, want SaveSnap", gaction[0])
		}
	}()

	go func() {
		gaction, _ := st.Wait(2)
		defer func() { ch <- struct{}{} }()

		if len(gaction) != 2 {
			t.Fatalf("len(action) = %d, want 2", len(gaction))
		}
		if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "Clone"}) {
			t.Errorf("action = %s, want Clone", gaction[0])
		}
		if !reflect.DeepEqual(gaction[1], testutil.Action{Name: "SaveNoCopy"}) {
			t.Errorf("action = %s, want SaveNoCopy", gaction[1])
		}
	}()

	srv.snapshot(1, raftpb.ConfState{Nodes: []uint64{1}})
	<-ch
	<-ch
}
Пример #12
0
func newCtrl(
	self net.Addr,
	others []net.Addr, // to join existing cluster, pass nil or empty others
	minPeerCount int,
	incomingc <-chan raftpb.Message,
	outgoingc chan<- raftpb.Message,
	unreachablec <-chan uint64,
	confchangec <-chan raftpb.ConfChange,
	snapshotc chan<- raftpb.Snapshot,
	entryc chan<- raftpb.Entry,
	proposalc <-chan []byte,
	removedc chan<- struct{},
	logger mesh.Logger,
) *ctrl {
	storage := raft.NewMemoryStorage()
	raftLogger := &raft.DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
	raftLogger.EnableDebug()
	nodeConfig := &raft.Config{
		ID:              makeRaftPeer(self).ID,
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         storage,
		Applied:         0,    // starting fresh
		MaxSizePerMsg:   4096, // TODO(pb): looks like bytes; confirm that
		MaxInflightMsgs: 256,  // TODO(pb): copied from docs; confirm that
		CheckQuorum:     true, // leader steps down if quorum is not active for an electionTimeout
		Logger:          raftLogger,
	}

	startPeers := makeRaftPeers(others)
	if len(startPeers) == 0 {
		startPeers = nil // special case: join existing
	}
	node := raft.StartNode(nodeConfig, startPeers)

	c := &ctrl{
		self:         makeRaftPeer(self),
		minPeerCount: minPeerCount,
		incomingc:    incomingc,
		outgoingc:    outgoingc,
		unreachablec: unreachablec,
		confchangec:  confchangec,
		snapshotc:    snapshotc,
		entryc:       entryc,
		proposalc:    proposalc,
		stopc:        make(chan struct{}),
		removedc:     removedc,
		terminatedc:  make(chan struct{}),
		storage:      storage,
		node:         node,
		logger:       logger,
	}
	go c.driveRaft() // analagous to raftexample serveChannels
	return c
}
Пример #13
0
// NewNode generates a new Raft node
func NewNode(opts NodeOptions) *Node {
	cfg := opts.Config
	if cfg == nil {
		cfg = DefaultNodeConfig()
	}
	if opts.TickInterval == 0 {
		opts.TickInterval = time.Second
	}
	if opts.SendTimeout == 0 {
		opts.SendTimeout = 2 * time.Second
	}

	raftStore := raft.NewMemoryStorage()

	n := &Node{
		cluster:   membership.NewCluster(2 * cfg.ElectionTick),
		raftStore: raftStore,
		opts:      opts,
		Config: &raft.Config{
			ElectionTick:    cfg.ElectionTick,
			HeartbeatTick:   cfg.HeartbeatTick,
			Storage:         raftStore,
			MaxSizePerMsg:   cfg.MaxSizePerMsg,
			MaxInflightMsgs: cfg.MaxInflightMsgs,
			Logger:          cfg.Logger,
		},
		doneCh:              make(chan struct{}),
		removeRaftCh:        make(chan struct{}),
		stopped:             make(chan struct{}),
		leadershipBroadcast: watch.NewQueue(),
		lastSendToMember:    make(map[uint64]chan struct{}),
		keyRotator:          opts.KeyRotator,
	}
	n.memoryStore = store.NewMemoryStore(n)

	if opts.ClockSource == nil {
		n.ticker = clock.NewClock().NewTicker(opts.TickInterval)
	} else {
		n.ticker = opts.ClockSource.NewTicker(opts.TickInterval)
	}

	n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
	n.wait = newWait()

	n.removeRaftFunc = func(n *Node) func() {
		var removeRaftOnce sync.Once
		return func() {
			removeRaftOnce.Do(func() {
				close(n.removeRaftCh)
			})
		}
	}(n)

	return n
}
Пример #14
0
// NewNode generates a new Raft node
func NewNode(ctx context.Context, opts NewNodeOptions) *Node {
	cfg := opts.Config
	if cfg == nil {
		cfg = DefaultNodeConfig()
	}
	if opts.TickInterval == 0 {
		opts.TickInterval = time.Second
	}

	raftStore := raft.NewMemoryStorage()

	ctx, cancel := context.WithCancel(ctx)

	n := &Node{
		Ctx:            ctx,
		cancel:         cancel,
		cluster:        membership.NewCluster(),
		tlsCredentials: opts.TLSCredentials,
		raftStore:      raftStore,
		Address:        opts.Addr,
		opts:           opts,
		Config: &raft.Config{
			ElectionTick:    cfg.ElectionTick,
			HeartbeatTick:   cfg.HeartbeatTick,
			Storage:         raftStore,
			MaxSizePerMsg:   cfg.MaxSizePerMsg,
			MaxInflightMsgs: cfg.MaxInflightMsgs,
			Logger:          cfg.Logger,
		},
		forceNewCluster:     opts.ForceNewCluster,
		stopCh:              make(chan struct{}),
		doneCh:              make(chan struct{}),
		removeRaftCh:        make(chan struct{}),
		StateDir:            opts.StateDir,
		joinAddr:            opts.JoinAddr,
		sendTimeout:         2 * time.Second,
		leadershipBroadcast: events.NewBroadcaster(),
	}
	n.memoryStore = store.NewMemoryStore(n)

	if opts.ClockSource == nil {
		n.ticker = clock.NewClock().NewTicker(opts.TickInterval)
	} else {
		n.ticker = opts.ClockSource.NewTicker(opts.TickInterval)
	}
	if opts.SendTimeout != 0 {
		n.sendTimeout = opts.SendTimeout
	}

	n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
	n.wait = newWait()

	return n
}
Пример #15
0
func startNode(id uint64, peers []raft.Peer, iface iface) *node {
	st := raft.NewMemoryStorage()
	rn := raft.StartNode(id, peers, 10, 1, st)
	n := &node{
		Node:    rn,
		id:      id,
		storage: st,
		iface:   iface,
	}
	n.start()
	return n
}
Пример #16
0
// Applied > SnapCount should trigger a SaveSnap event
func TestTriggerSnap(t *testing.T) {
	be, tmpPath := backend.NewDefaultTmpBackend()
	defer func() {
		os.RemoveAll(tmpPath)
	}()

	snapc := 10
	st := mockstore.NewRecorder()
	p := mockstorage.NewStorageRecorderStream("")
	srv := &EtcdServer{
		cfg:       &ServerConfig{TickMs: 1},
		snapCount: uint64(snapc),
		r: raftNode{
			Node:        newNodeCommitter(),
			raftStorage: raft.NewMemoryStorage(),
			storage:     p,
			transport:   rafthttp.NewNopTransporter(),
		},
		store:    st,
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	srv.applyV2 = &applierV2store{srv}

	srv.kv = mvcc.New(be, &lease.FakeLessor{}, &srv.consistIndex)
	srv.be = be

	srv.start()

	donec := make(chan struct{})
	go func() {
		wcnt := 2 + snapc
		gaction, _ := p.Wait(wcnt)

		// each operation is recorded as a Save
		// (SnapCount+1) * Puts + SaveSnap = (SnapCount+1) * Save + SaveSnap
		if len(gaction) != wcnt {
			t.Fatalf("len(action) = %d, want %d", len(gaction), wcnt)
		}
		if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "SaveSnap"}) {
			t.Errorf("action = %s, want SaveSnap", gaction[wcnt-1])
		}
		close(donec)
	}()

	for i := 0; i < snapc+1; i++ {
		srv.Do(context.Background(), pb.Request{Method: "PUT"})
	}

	srv.Stop()
	<-donec
}
Пример #17
0
func restartNode(cfg *ServerConfig, index uint64, snapshot *raftpb.Snapshot) (types.ID, raft.Node, *raft.MemoryStorage, *wal.WAL) {
	w, id, cid, st, ents := readWAL(cfg.WALDir(), index)
	cfg.Cluster.SetID(cid)

	log.Printf("etcdserver: restart member %s in cluster %s at commit index %d", id, cfg.Cluster.ID(), st.Commit)
	s := raft.NewMemoryStorage()
	if snapshot != nil {
		s.ApplySnapshot(*snapshot)
	}
	s.SetHardState(st)
	s.Append(ents)
	n := raft.RestartNode(uint64(id), 10, 1, s)
	return id, n, s, w
}
Пример #18
0
func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
	}
	w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)

	// discard the previously uncommitted entries
	for i, ent := range ents {
		if ent.Index > st.Commit {
			plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
			ents = ents[:i]
			break
		}
	}

	// force append the configuration change entries
	toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
	ents = append(ents, toAppEnts...)

	// force commit newly appended entries
	err := w.Save(raftpb.HardState{}, toAppEnts)
	if err != nil {
		plog.Fatalf("%v", err)
	}
	if len(ents) != 0 {
		st.Commit = ents[len(ents)-1].Index
	}

	plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
	cl := membership.NewCluster("")
	cl.SetID(cid)
	s := raft.NewMemoryStorage()
	if snapshot != nil {
		s.ApplySnapshot(*snapshot)
	}
	s.SetHardState(st)
	s.Append(ents)
	c := &raft.Config{
		ID:              uint64(id),
		ElectionTick:    cfg.ElectionTicks,
		HeartbeatTick:   1,
		Storage:         s,
		MaxSizePerMsg:   maxSizePerMsg,
		MaxInflightMsgs: maxInflightMsgs,
	}
	n := raft.RestartNode(c)
	raftStatus = n.Status
	return id, cl, n, s, w
}
Пример #19
0
// TestSyncTrigger tests that the server proposes a SYNC request when its sync timer ticks
func TestSyncTrigger(t *testing.T) {
	n := newReadyNode()
	st := make(chan time.Time, 1)
	srv := &EtcdServer{
		cfg: &ServerConfig{TickMs: 1},
		r: raftNode{
			Node:        n,
			raftStorage: raft.NewMemoryStorage(),
			transport:   rafthttp.NewNopTransporter(),
			storage:     mockstorage.NewStorageRecorder(""),
		},
		store:      mockstore.NewNop(),
		SyncTicker: st,
		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
	}

	// trigger the server to become a leader and accept sync requests
	go func() {
		srv.start()
		n.readyc <- raft.Ready{
			SoftState: &raft.SoftState{
				RaftState: raft.StateLeader,
			},
		}
		// trigger a sync request
		st <- time.Time{}
	}()

	action, _ := n.Wait(1)
	go srv.Stop()

	if len(action) != 1 {
		t.Fatalf("len(action) = %d, want 1", len(action))
	}
	if action[0].Name != "Propose" {
		t.Fatalf("action = %s, want Propose", action[0].Name)
	}
	data := action[0].Params[0].([]byte)
	var req pb.Request
	if err := req.Unmarshal(data); err != nil {
		t.Fatalf("error unmarshalling data: %v", err)
	}
	if req.Method != "SYNC" {
		t.Fatalf("unexpected proposed request: %#v", req.Method)
	}

	// wait on stop message
	<-n.Chan()
}
Пример #20
0
func startMultiNode(id uint64, nt *multiraftNetwork) *multinode {
	st := raft.NewMemoryStorage()

	n := &multinode{
		MultiNode:       raft.StartMultiNode(0),
		nodeid:          id,
		storage:         st,
		pausec:          make(chan bool),
		createGroupChan: make(chan createGroupOp),
		removeGroupChan: make(chan removeGroupOp),
		network:         nt,
	}
	n.start()
	return n
}
Пример #21
0
func startNode(cfg *ServerConfig, cl *cluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
	var err error
	member := cl.MemberByName(cfg.Name)
	metadata := pbutil.MustMarshal(
		&pb.Metadata{
			NodeID:    uint64(member.ID),
			ClusterID: uint64(cl.ID()),
		},
	)

	//创建记录
	if err = os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
		plog.Fatalf("create snapshot directory error: %v", err)
	}
	if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
		plog.Fatalf("create wal error: %v", err)
	}
	//获取节点信息
	peers := make([]raft.Peer, len(ids))
	for i, id := range ids {
		ctx, err := json.Marshal((*cl).Member(id))
		if err != nil {
			plog.Panicf("marshal member should never fail: %v", err)
		}
		peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
	}
	id = member.ID
	plog.Infof("starting member %s in cluster %s", id, cl.ID())
	s = raft.NewMemoryStorage()
	c := &raft.Config{
		ID:              uint64(id),
		ElectionTick:    cfg.ElectionTicks,
		HeartbeatTick:   1,
		Storage:         s, //存储
		MaxSizePerMsg:   maxSizePerMsg,
		MaxInflightMsgs: maxInflightMsgs,
		CheckQuorum:     true,
	}

	n = raft.StartNode(c, peers)
	raftStatusMu.Lock()
	raftStatus = n.Status
	raftStatusMu.Unlock()
	advanceTicksForElection(n, c.ElectionTick)
	return
}
Пример #22
0
// newRaftNode initiates a raft instance and returns a committed log entry
// channel and error channel. Proposals for log updates are sent over the
// provided the proposal channel. All log entries are replayed over the
// commit channel, followed by a nil message (to indicate the channel is
// current), then new log entries. To shutdown, close proposeC and read errorC.
func newRaftNode(id int, peers []string, proposeC <-chan string) (<-chan *string, <-chan error) {
	rc := &raftNode{
		proposeC:    proposeC,
		commitC:     make(chan *string),
		errorC:      make(chan error),
		id:          id,
		peers:       peers,
		waldir:      fmt.Sprintf("raftexample-%d", id),
		raftStorage: raft.NewMemoryStorage(),
		stopc:       make(chan struct{}),
		httpstopc:   make(chan struct{}),
		httpdonec:   make(chan struct{}),
		// rest of structure populated after WAL replay
	}
	go rc.startRaft()
	return rc.commitC, rc.errorC
}
Пример #23
0
func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, raft.Node, *raft.MemoryStorage, *wal.WAL) {
	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
	}
	w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
	cfg.Cluster.SetID(cid)

	log.Printf("etcdserver: restart member %s in cluster %s at commit index %d", id, cfg.Cluster.ID(), st.Commit)
	s := raft.NewMemoryStorage()
	if snapshot != nil {
		s.ApplySnapshot(*snapshot)
	}
	s.SetHardState(st)
	s.Append(ents)
	n := raft.RestartNode(uint64(id), cfg.ElectionTicks, 1, s, 0)
	return id, n, s, w
}
Пример #24
0
func newNode(id uint64, peers []raft.Peer) *node {
	store := raft.NewMemoryStorage()
	n := &node{
		id:    id,
		store: store,
		cfg: &raft.Config{
			ID:              uint64(id),
			ElectionTick:    3,
			HeartbeatTick:   1,
			Storage:         store,
			MaxSizePerMsg:   4096,
			MaxInflightMsgs: 256,
		},
		data: make(map[string]string),
		ctx:  context.TODO(),
	}
	n.raft = raft.StartNode(n.cfg, peers)
	return n
}
Пример #25
0
// NewNode generates a new Raft node based on an unique
// ID, an address and optionally: a handler and receive
// only channel to send event when an entry is committed
// to the logs
func NewNode(id uint64, addr string, cfg *raft.Config, apply ApplyCommand) (*Node, error) {
	if cfg == nil {
		cfg = DefaultNodeConfig()
	}

	store := raft.NewMemoryStorage()
	peers := []raft.Peer{{ID: id}}

	n := &Node{
		ID:      id,
		Ctx:     context.TODO(),
		Cluster: NewCluster(),
		Store:   store,
		Address: addr,
		Cfg: &raft.Config{
			ID:              id,
			ElectionTick:    cfg.ElectionTick,
			HeartbeatTick:   cfg.HeartbeatTick,
			Storage:         store,
			MaxSizePerMsg:   cfg.MaxSizePerMsg,
			MaxInflightMsgs: cfg.MaxInflightMsgs,
			Logger:          cfg.Logger,
		},
		PStore:    make(map[string]string),
		ticker:    time.NewTicker(time.Second),
		stopChan:  make(chan struct{}),
		pauseChan: make(chan bool),
		apply:     apply,
	}

	n.Cluster.AddPeer(
		&Peer{
			NodeInfo: &NodeInfo{
				ID:   id,
				Addr: addr,
			},
		},
	)

	n.Node = raft.StartNode(n.Cfg, peers)
	return n, nil
}
Пример #26
0
func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, raft.Node, *raft.MemoryStorage, *wal.WAL) {
	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
	}
	w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
	cfg.Cluster.SetID(cid)

	// discard the previously uncommitted entries
	for i, ent := range ents {
		if ent.Index > st.Commit {
			log.Printf("etcdserver: discarding %d uncommitted WAL entries ", len(ents)-i)
			ents = ents[:i]
			break
		}
	}

	// force append the configuration change entries
	toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
	ents = append(ents, toAppEnts...)

	// force commit newly appended entries
	for _, e := range toAppEnts {
		err := w.SaveEntry(&e)
		if err != nil {
			log.Fatalf("etcdserver: %v", err)
		}
	}
	if len(ents) != 0 {
		st.Commit = ents[len(ents)-1].Index
	}

	log.Printf("etcdserver: forcing restart of member %s in cluster %s at commit index %d", id, cfg.Cluster.ID(), st.Commit)
	s := raft.NewMemoryStorage()
	if snapshot != nil {
		s.ApplySnapshot(*snapshot)
	}
	s.SetHardState(st)
	s.Append(ents)
	n := raft.RestartNode(uint64(id), 10, 1, s)
	return id, n, s, w
}
Пример #27
0
func startNode(id uint64, peers []raft.Peer, iface iface) *node {
	st := raft.NewMemoryStorage()
	c := &raft.Config{
		ID:              id,
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         st,
		MaxSizePerMsg:   1024 * 1024,
		MaxInflightMsgs: 256,
	}
	rn := raft.StartNode(c, peers)
	n := &node{
		Node:    rn,
		id:      id,
		storage: st,
		iface:   iface,
		pausec:  make(chan bool),
	}
	n.start()
	return n
}
Пример #28
0
// TestApplySnapshotAndCommittedEntries tests that server applies snapshot
// first and then committed entries.
func TestApplySnapshotAndCommittedEntries(t *testing.T) {
	n := newNopReadyNode()
	st := store.NewRecorder()
	cl := newCluster("abc")
	cl.SetStore(store.New())
	storage := raft.NewMemoryStorage()
	s := &EtcdServer{
		cfg: &ServerConfig{},
		r: raftNode{
			Node:        n,
			storage:     &storageRecorder{},
			raftStorage: storage,
			transport:   rafthttp.NewNopTransporter(),
		},
		store:   st,
		cluster: cl,
	}

	s.start()
	req := &pb.Request{Method: "QGET"}
	n.readyc <- raft.Ready{
		Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1}},
		CommittedEntries: []raftpb.Entry{
			{Index: 2, Data: pbutil.MustMarshal(req)},
		},
	}
	// make goroutines move forward to receive snapshot
	actions, _ := st.Wait(2)
	s.Stop()

	if len(actions) != 2 {
		t.Fatalf("len(action) = %d, want 2", len(actions))
	}
	if actions[0].Name != "Recovery" {
		t.Errorf("actions[0] = %s, want %s", actions[0].Name, "Recovery")
	}
	if actions[1].Name != "Get" {
		t.Errorf("actions[1] = %s, want %s", actions[1].Name, "Get")
	}
}
Пример #29
0
func newNode(gid uint32, id uint64, myAddr string) *node {
	fmt.Printf("NEW NODE GID, ID: [%v, %v]\n", gid, id)

	peers := peerPool{
		peers: make(map[uint64]string),
	}
	props := proposals{
		ids: make(map[uint32]chan error),
	}

	store := raft.NewMemoryStorage()
	rc := &task.RaftContext{
		Addr:  myAddr,
		Group: gid,
		Id:    id,
	}

	n := &node{
		ctx:   context.Background(),
		id:    id,
		gid:   gid,
		store: store,
		cfg: &raft.Config{
			ID:              id,
			ElectionTick:    10,
			HeartbeatTick:   1,
			Storage:         store,
			MaxSizePerMsg:   4096,
			MaxInflightMsgs: 256,
		},
		commitCh:    make(chan raftpb.Entry, numPendingMutations),
		peers:       peers,
		props:       props,
		raftContext: rc,
		messages:    make(chan sendmsg, 1000),
	}
	return n
}
Пример #30
0
func TestDoProposal(t *testing.T) {
	tests := []pb.Request{
		{Method: "POST", ID: 1},
		{Method: "PUT", ID: 1},
		{Method: "DELETE", ID: 1},
		{Method: "GET", ID: 1, Quorum: true},
	}
	for i, tt := range tests {
		st := mockstore.NewRecorder()
		srv := &EtcdServer{
			cfg: &ServerConfig{TickMs: 1},
			r: raftNode{
				Node:        newNodeCommitter(),
				storage:     mockstorage.NewStorageRecorder(""),
				raftStorage: raft.NewMemoryStorage(),
				transport:   rafthttp.NewNopTransporter(),
			},
			store:    st,
			reqIDGen: idutil.NewGenerator(0, time.Time{}),
		}
		srv.applyV2 = &applierV2store{srv}
		srv.start()
		resp, err := srv.Do(context.Background(), tt)
		srv.Stop()

		action := st.Action()
		if len(action) != 1 {
			t.Errorf("#%d: len(action) = %d, want 1", i, len(action))
		}
		if err != nil {
			t.Fatalf("#%d: err = %v, want nil", i, err)
		}
		wresp := Response{Event: &store.Event{}}
		if !reflect.DeepEqual(resp, wresp) {
			t.Errorf("#%d: resp = %v, want %v", i, resp, wresp)
		}
	}
}