예제 #1
0
파일: draft.go 프로젝트: dgraph-io/dgraph
// InitAndStartNode gets called after having at least one membership sync with the cluster.
func (n *node) InitAndStartNode(wal *raftwal.Wal) {
	restart, err := n.initFromWal(wal)
	x.Check(err)

	if restart {
		fmt.Printf("RESTARTING\n")
		n.raft = raft.RestartNode(n.cfg)

	} else {
		if groups().HasPeer(n.gid) {
			n.joinPeers()
			n.raft = raft.StartNode(n.cfg, nil)

		} else {
			peers := []raft.Peer{{ID: n.id}}
			n.raft = raft.StartNode(n.cfg, peers)
			// Trigger election, so this node can become the leader of this single-node cluster.
			n.canCampaign = true
		}
	}
	go n.processCommitCh()
	go n.Run()
	// TODO: Find a better way to snapshot, so we don't lose the membership
	// state information, which isn't persisted.
	// go n.snapshotPeriodically()
	go n.batchAndSendMessages()
}
예제 #2
0
파일: raftlog.go 프로젝트: yahoo/coname
// Start implements replication.LogReplicator
func (l *raftLog) Start(lo uint64) error {
	inited, err := l.config.Storage.(*raftStorage).IsInitialized()
	if err != nil {
		return err
	}
	if inited {
		l.config.Applied = lo
		l.node = raft.RestartNode(&l.config)
	} else {
		if lo != 0 {
			log.Panicf("storage uninitialized but state machine not fresh: lo = %d", lo)
		}
		// Add a dummy first entry
		hardState, confState, err := l.config.Storage.InitialState()
		if err != nil {
			return err
		}
		confNodes := make([]raft.Peer, 0, len(confState.Nodes))
		for _, id := range confState.Nodes {
			confNodes = append(confNodes, raft.Peer{ID: id})
		}
		l.config.Storage.(*raftStorage).save(hardState, make([]raftpb.Entry, 1))
		l.node = raft.StartNode(&l.config, confNodes)
	}

	l.leaderHintSet = make(chan bool, COMMITTED_BUFFER)
	l.waitCommitted = make(chan replication.LogEntry, COMMITTED_BUFFER)
	l.stop = make(chan struct{})
	l.stopped = make(chan struct{})
	l.grpcDropClient = make(chan uint64)
	l.grpcClientCache = make(map[uint64]proto.RaftClient)

	go l.run()
	return nil
}
예제 #3
0
func NewRaftNode(name string, c *raft.Config, storage *raft.MemoryStorage, lookup NodeLookup, tr Transporter) *raftNode {
	var peers []raft.Peer
	node := raft.StartNode(c, peers)
	var mu sync.Mutex
	removed := make(map[uint64]bool)
	h := Jesteress([]byte(name))

	return &raftNode{
		index: 0,
		term:  0,
		lead:  0,
		hash:  h,

		lt:   time.Now(),
		Node: node,
		cfg:  *c,
		mu:   mu,

		lookup:  lookup,
		removed: removed,

		idgen:       idutil.NewGenerator(uint8(h), time.Now()),
		w:           wait.New(),
		ticker:      time.Tick(500 * time.Millisecond),
		raftStorage: storage,
		storage:     &NoopStorage{},
		transport:   tr,
		applyc:      make(chan apply),
		stopped:     make(chan struct{}),
		done:        make(chan struct{}),
	}
}
예제 #4
0
파일: node.go 프로젝트: syhao/sunbase
func StartNode(id uint64, peers []raft.Peer, addrs []string, t Transport) *Node {
	st := raft.NewMemoryStorage()
	c := &raft.Config{
		ID:              id,
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         st,
		MaxSizePerMsg:   1024 * 1024,
		MaxInflightMsgs: 256,
	}
	rn := raft.StartNode(c, peers)
	n := &Node{
		Node:    rn,
		storage: st,
	}
	n.t = t
	path := fmt.Sprint(DirPath, id)
	f, e := os.OpenFile(path, os.O_CREATE|os.O_RDWR, os.ModePerm)
	if e != nil {
		fmt.Fprintln(os.Stdout, "open  the  kv file error:", e)
	}
	n.f = f
	go n.t.listen()
	go n.start()
	return n
}
예제 #5
0
파일: server.go 프로젝트: robszumski/etcd
func startNode(cfg *ServerConfig, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
	var err error
	member := cfg.Cluster.MemberByName(cfg.Name)
	metadata := pbutil.MustMarshal(
		&pb.Metadata{
			NodeID:    uint64(member.ID),
			ClusterID: uint64(cfg.Cluster.ID()),
		},
	)
	if err := os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
		log.Fatalf("etcdserver create snapshot directory error: %v", err)
	}
	if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
		log.Fatalf("etcdserver: create wal error: %v", err)
	}
	peers := make([]raft.Peer, len(ids))
	for i, id := range ids {
		ctx, err := json.Marshal((*cfg.Cluster).Member(id))
		if err != nil {
			log.Panicf("marshal member should never fail: %v", err)
		}
		peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
	}
	id = member.ID
	log.Printf("etcdserver: start member %s in cluster %s", id, cfg.Cluster.ID())
	s = raft.NewMemoryStorage()
	n = raft.StartNode(uint64(id), peers, 10, 1, s)
	return
}
예제 #6
0
파일: raft.go 프로젝트: mqliang/etcd
func (rc *raftNode) startRaft() {
	if !fileutil.Exist(rc.snapdir) {
		if err := os.Mkdir(rc.snapdir, 0750); err != nil {
			log.Fatalf("raftexample: cannot create dir for snapshot (%v)", err)
		}
	}
	rc.snapshotter = snap.New(rc.snapdir)
	rc.snapshotterReady <- rc.snapshotter

	oldwal := wal.Exist(rc.waldir)
	rc.wal = rc.replayWAL()

	rpeers := make([]raft.Peer, len(rc.peers))
	for i := range rpeers {
		rpeers[i] = raft.Peer{ID: uint64(i + 1)}
	}
	c := &raft.Config{
		ID:              uint64(rc.id),
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         rc.raftStorage,
		MaxSizePerMsg:   1024 * 1024,
		MaxInflightMsgs: 256,
	}

	if oldwal {
		rc.node = raft.RestartNode(c)
	} else {
		startPeers := rpeers
		if rc.join {
			startPeers = nil
		}
		rc.node = raft.StartNode(c, startPeers)
	}

	ss := &stats.ServerStats{}
	ss.Initialize()

	rc.transport = &rafthttp.Transport{
		ID:          types.ID(rc.id),
		ClusterID:   0x1000,
		Raft:        rc,
		ServerStats: ss,
		LeaderStats: stats.NewLeaderStats(strconv.Itoa(rc.id)),
		ErrorC:      make(chan error),
	}

	rc.transport.Start()
	for i := range rc.peers {
		if i+1 != rc.id {
			rc.transport.AddPeer(types.ID(i+1), []string{rc.peers[i]})
		}
	}

	go rc.serveRaft()
	go rc.serveChannels()
}
예제 #7
0
파일: ctrl.go 프로젝트: weaveworks/mesh
func newCtrl(
	self net.Addr,
	others []net.Addr, // to join existing cluster, pass nil or empty others
	minPeerCount int,
	incomingc <-chan raftpb.Message,
	outgoingc chan<- raftpb.Message,
	unreachablec <-chan uint64,
	confchangec <-chan raftpb.ConfChange,
	snapshotc chan<- raftpb.Snapshot,
	entryc chan<- raftpb.Entry,
	proposalc <-chan []byte,
	removedc chan<- struct{},
	logger mesh.Logger,
) *ctrl {
	storage := raft.NewMemoryStorage()
	raftLogger := &raft.DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
	raftLogger.EnableDebug()
	nodeConfig := &raft.Config{
		ID:              makeRaftPeer(self).ID,
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         storage,
		Applied:         0,    // starting fresh
		MaxSizePerMsg:   4096, // TODO(pb): looks like bytes; confirm that
		MaxInflightMsgs: 256,  // TODO(pb): copied from docs; confirm that
		CheckQuorum:     true, // leader steps down if quorum is not active for an electionTimeout
		Logger:          raftLogger,
	}

	startPeers := makeRaftPeers(others)
	if len(startPeers) == 0 {
		startPeers = nil // special case: join existing
	}
	node := raft.StartNode(nodeConfig, startPeers)

	c := &ctrl{
		self:         makeRaftPeer(self),
		minPeerCount: minPeerCount,
		incomingc:    incomingc,
		outgoingc:    outgoingc,
		unreachablec: unreachablec,
		confchangec:  confchangec,
		snapshotc:    snapshotc,
		entryc:       entryc,
		proposalc:    proposalc,
		stopc:        make(chan struct{}),
		removedc:     removedc,
		terminatedc:  make(chan struct{}),
		storage:      storage,
		node:         node,
		logger:       logger,
	}
	go c.driveRaft() // analagous to raftexample serveChannels
	return c
}
예제 #8
0
파일: node.go 프로젝트: CedarLogic/arangodb
func startNode(id uint64, peers []raft.Peer, iface iface) *node {
	st := raft.NewMemoryStorage()
	rn := raft.StartNode(id, peers, 10, 1, st)
	n := &node{
		Node:    rn,
		id:      id,
		storage: st,
		iface:   iface,
	}
	n.start()
	return n
}
예제 #9
0
파일: raft.go 프로젝트: oywc410/MYPG
func startNode(cfg *ServerConfig, cl *cluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
	var err error
	member := cl.MemberByName(cfg.Name)
	metadata := pbutil.MustMarshal(
		&pb.Metadata{
			NodeID:    uint64(member.ID),
			ClusterID: uint64(cl.ID()),
		},
	)

	//创建记录
	if err = os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil {
		plog.Fatalf("create snapshot directory error: %v", err)
	}
	if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
		plog.Fatalf("create wal error: %v", err)
	}
	//获取节点信息
	peers := make([]raft.Peer, len(ids))
	for i, id := range ids {
		ctx, err := json.Marshal((*cl).Member(id))
		if err != nil {
			plog.Panicf("marshal member should never fail: %v", err)
		}
		peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
	}
	id = member.ID
	plog.Infof("starting member %s in cluster %s", id, cl.ID())
	s = raft.NewMemoryStorage()
	c := &raft.Config{
		ID:              uint64(id),
		ElectionTick:    cfg.ElectionTicks,
		HeartbeatTick:   1,
		Storage:         s, //存储
		MaxSizePerMsg:   maxSizePerMsg,
		MaxInflightMsgs: maxInflightMsgs,
		CheckQuorum:     true,
	}

	n = raft.StartNode(c, peers)
	raftStatusMu.Lock()
	raftStatus = n.Status
	raftStatusMu.Unlock()
	advanceTicksForElection(n, c.ElectionTick)
	return
}
예제 #10
0
파일: raft.go 프로젝트: ikatson/etcd
func (rc *raftNode) startRaft() {
	oldwal := wal.Exist(rc.waldir)
	rc.wal = rc.replayWAL()

	rpeers := make([]raft.Peer, len(rc.peers))
	for i := range rpeers {
		rpeers[i] = raft.Peer{ID: uint64(i + 1)}
	}
	c := &raft.Config{
		ID:              uint64(rc.id),
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         rc.raftStorage,
		MaxSizePerMsg:   1024 * 1024,
		MaxInflightMsgs: 256,
	}

	if oldwal {
		rc.node = raft.RestartNode(c)
	} else {
		rc.node = raft.StartNode(c, rpeers)
	}

	ss := &stats.ServerStats{}
	ss.Initialize()

	rc.transport = &rafthttp.Transport{
		ID:          types.ID(rc.id),
		ClusterID:   0x1000,
		Raft:        rc,
		ServerStats: ss,
		LeaderStats: stats.NewLeaderStats(strconv.Itoa(rc.id)),
		ErrorC:      make(chan error),
	}

	rc.transport.Start()
	for i := range rc.peers {
		if i+1 != rc.id {
			rc.transport.AddPeer(types.ID(i+1), []string{rc.peers[i]})
		}
	}

	go rc.serveRaft()
	go rc.serveChannels()
}
예제 #11
0
func newNode(id uint64, peers []raft.Peer) *node {
	store := raft.NewMemoryStorage()
	n := &node{
		id:    id,
		store: store,
		cfg: &raft.Config{
			ID:              uint64(id),
			ElectionTick:    3,
			HeartbeatTick:   1,
			Storage:         store,
			MaxSizePerMsg:   4096,
			MaxInflightMsgs: 256,
		},
		data: make(map[string]string),
		ctx:  context.TODO(),
	}
	n.raft = raft.StartNode(n.cfg, peers)
	return n
}
예제 #12
0
파일: node.go 프로젝트: abronan/proton
// NewNode generates a new Raft node based on an unique
// ID, an address and optionally: a handler and receive
// only channel to send event when an entry is committed
// to the logs
func NewNode(id uint64, addr string, cfg *raft.Config, apply ApplyCommand) (*Node, error) {
	if cfg == nil {
		cfg = DefaultNodeConfig()
	}

	store := raft.NewMemoryStorage()
	peers := []raft.Peer{{ID: id}}

	n := &Node{
		ID:      id,
		Ctx:     context.TODO(),
		Cluster: NewCluster(),
		Store:   store,
		Address: addr,
		Cfg: &raft.Config{
			ID:              id,
			ElectionTick:    cfg.ElectionTick,
			HeartbeatTick:   cfg.HeartbeatTick,
			Storage:         store,
			MaxSizePerMsg:   cfg.MaxSizePerMsg,
			MaxInflightMsgs: cfg.MaxInflightMsgs,
			Logger:          cfg.Logger,
		},
		PStore:    make(map[string]string),
		ticker:    time.NewTicker(time.Second),
		stopChan:  make(chan struct{}),
		pauseChan: make(chan bool),
		apply:     apply,
	}

	n.Cluster.AddPeer(
		&Peer{
			NodeInfo: &NodeInfo{
				ID:   id,
				Addr: addr,
			},
		},
	)

	n.Node = raft.StartNode(n.Cfg, peers)
	return n, nil
}
예제 #13
0
파일: node.go 프로젝트: CliffYuan/etcd
func startNode(id uint64, peers []raft.Peer, iface iface) *node {
	st := raft.NewMemoryStorage()
	c := &raft.Config{
		ID:              id,
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         st,
		MaxSizePerMsg:   1024 * 1024,
		MaxInflightMsgs: 256,
	}
	rn := raft.StartNode(c, peers)
	n := &node{
		Node:    rn,
		id:      id,
		storage: st,
		iface:   iface,
		pausec:  make(chan bool),
	}
	n.start()
	return n
}
예제 #14
0
func startNode(cfg *ServerConfig, ids []uint64) (id uint64, n raft.Node, w *wal.WAL) {
	var err error
	// TODO: remove the discoveryURL when it becomes part of the source for
	// generating nodeID.
	member := cfg.Cluster.MemberByName(cfg.Name)
	metadata := pbutil.MustMarshal(&pb.Metadata{NodeID: member.ID, ClusterID: cfg.Cluster.ID()})
	if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
		log.Fatal(err)
	}
	peers := make([]raft.Peer, len(ids))
	for i, id := range ids {
		ctx, err := json.Marshal((*cfg.Cluster).Member(id))
		if err != nil {
			log.Fatal(err)
		}
		peers[i] = raft.Peer{ID: id, Context: ctx}
	}
	id = member.ID
	log.Printf("etcdserver: start node %x in cluster %x", id, cfg.Cluster.ID())
	n = raft.StartNode(id, peers, 10, 1)
	return
}
예제 #15
0
func newNode(id uint64, addr string, peers []raft.Peer) *node {
	store := raft.NewMemoryStorage()
	n := &node{
		id:    id,
		addr:  addr,
		ctx:   context.TODO(),
		store: store,
		cfg: &raft.Config{
			ID:              id,
			ElectionTick:    5 * hb,
			HeartbeatTick:   hb,
			Storage:         store,
			MaxSizePerMsg:   math.MaxUint16,
			MaxInflightMsgs: 256,
		},
		pstore: make(map[string]string),
		ticker: time.Tick(time.Second),
		done:   make(chan struct{}),
	}

	n.raft = raft.StartNode(n.cfg, peers)
	return n
}
예제 #16
0
파일: raft.go 프로젝트: Mic92/docker
// JoinAndStart joins and starts the raft server
func (n *Node) JoinAndStart(ctx context.Context) (err error) {
	ctx, cancel := n.WithContext(ctx)
	defer func() {
		cancel()
		if err != nil {
			n.done()
		}
	}()

	loadAndStartErr := n.loadAndStart(ctx, n.opts.ForceNewCluster)
	if loadAndStartErr != nil && loadAndStartErr != errNoWAL {
		return loadAndStartErr
	}

	snapshot, err := n.raftStore.Snapshot()
	// Snapshot never returns an error
	if err != nil {
		panic("could not get snapshot of raft store")
	}

	n.confState = snapshot.Metadata.ConfState
	n.appliedIndex = snapshot.Metadata.Index
	n.snapshotIndex = snapshot.Metadata.Index

	if loadAndStartErr == errNoWAL {
		if n.opts.JoinAddr != "" {
			c, err := n.ConnectToMember(n.opts.JoinAddr, 10*time.Second)
			if err != nil {
				return err
			}
			client := api.NewRaftMembershipClient(c.Conn)
			defer func() {
				_ = c.Conn.Close()
			}()

			joinCtx, joinCancel := context.WithTimeout(ctx, 10*time.Second)
			defer joinCancel()
			resp, err := client.Join(joinCtx, &api.JoinRequest{
				Addr: n.opts.Addr,
			})
			if err != nil {
				return err
			}

			n.Config.ID = resp.RaftID

			if _, err := n.createWAL(n.opts.ID); err != nil {
				return err
			}

			n.raftNode = raft.StartNode(n.Config, []raft.Peer{})

			if err := n.registerNodes(resp.Members); err != nil {
				if walErr := n.wal.Close(); err != nil {
					log.G(ctx).WithError(walErr).Error("raft: error closing WAL")
				}
				return err
			}
		} else {
			// First member in the cluster, self-assign ID
			n.Config.ID = uint64(rand.Int63()) + 1
			peer, err := n.createWAL(n.opts.ID)
			if err != nil {
				return err
			}
			n.raftNode = raft.StartNode(n.Config, []raft.Peer{peer})
			n.campaignWhenAble = true
		}
		atomic.StoreUint32(&n.isMember, 1)
		return nil
	}

	if n.opts.JoinAddr != "" {
		log.G(ctx).Warning("ignoring request to join cluster, because raft state already exists")
	}
	n.campaignWhenAble = true
	n.raftNode = raft.RestartNode(n.Config)
	atomic.StoreUint32(&n.isMember, 1)
	return nil
}
예제 #17
0
파일: raft.go 프로젝트: ygf11/docker
// JoinAndStart joins and starts the raft server
func (n *Node) JoinAndStart() error {
	loadAndStartErr := n.loadAndStart(n.Ctx, n.opts.ForceNewCluster)
	if loadAndStartErr != nil && loadAndStartErr != errNoWAL {
		n.ticker.Stop()
		return loadAndStartErr
	}

	snapshot, err := n.raftStore.Snapshot()
	// Snapshot never returns an error
	if err != nil {
		panic("could not get snapshot of raft store")
	}

	n.confState = snapshot.Metadata.ConfState
	n.appliedIndex = snapshot.Metadata.Index
	n.snapshotIndex = snapshot.Metadata.Index

	if loadAndStartErr == errNoWAL {
		if n.joinAddr != "" {
			c, err := n.ConnectToMember(n.joinAddr, 10*time.Second)
			if err != nil {
				return err
			}
			client := api.NewRaftMembershipClient(c.Conn)
			defer func() {
				_ = c.Conn.Close()
			}()

			ctx, cancel := context.WithTimeout(n.Ctx, 10*time.Second)
			defer cancel()
			resp, err := client.Join(ctx, &api.JoinRequest{
				Addr: n.Address,
			})
			if err != nil {
				return err
			}

			n.Config.ID = resp.RaftID

			if _, err := n.createWAL(n.opts.ID); err != nil {
				return err
			}

			n.Node = raft.StartNode(n.Config, []raft.Peer{})

			if err := n.registerNodes(resp.Members); err != nil {
				return err
			}
		} else {
			// First member in the cluster, self-assign ID
			n.Config.ID = uint64(rand.Int63()) + 1
			peer, err := n.createWAL(n.opts.ID)
			if err != nil {
				return err
			}
			n.Node = raft.StartNode(n.Config, []raft.Peer{peer})
			if err := n.Campaign(n.Ctx); err != nil {
				return err
			}
		}
		atomic.StoreUint32(&n.isMember, 1)
		return nil
	}

	if n.joinAddr != "" {
		n.Config.Logger.Warning("ignoring request to join cluster, because raft state already exists")
	}
	n.Node = raft.RestartNode(n.Config)
	atomic.StoreUint32(&n.isMember, 1)
	return nil
}
예제 #18
0
파일: raft.go 프로젝트: amitshukla/docker
// NewNode generates a new Raft node
func NewNode(ctx context.Context, opts NewNodeOptions) (*Node, error) {
	cfg := opts.Config
	if cfg == nil {
		cfg = DefaultNodeConfig()
	}
	if opts.TickInterval == 0 {
		opts.TickInterval = time.Second
	}

	raftID, err := identity.ParseNodeID(opts.ID)
	if err != nil {
		return nil, err
	}

	raftStore := raft.NewMemoryStorage()

	ctx, cancel := context.WithCancel(ctx)

	n := &Node{
		Ctx:            ctx,
		cancel:         cancel,
		cluster:        membership.NewCluster(),
		tlsCredentials: opts.TLSCredentials,
		raftStore:      raftStore,
		Address:        opts.Addr,
		Config: &raft.Config{
			ElectionTick:    cfg.ElectionTick,
			HeartbeatTick:   cfg.HeartbeatTick,
			Storage:         raftStore,
			MaxSizePerMsg:   cfg.MaxSizePerMsg,
			MaxInflightMsgs: cfg.MaxInflightMsgs,
			Logger:          cfg.Logger,
			ID:              raftID,
		},
		forceNewCluster:     opts.ForceNewCluster,
		stopCh:              make(chan struct{}),
		doneCh:              make(chan struct{}),
		StateDir:            opts.StateDir,
		joinAddr:            opts.JoinAddr,
		sendTimeout:         2 * time.Second,
		leadershipBroadcast: events.NewBroadcaster(),
	}
	n.memoryStore = store.NewMemoryStore(n)

	if opts.ClockSource == nil {
		n.ticker = clock.NewClock().NewTicker(opts.TickInterval)
	} else {
		n.ticker = opts.ClockSource.NewTicker(opts.TickInterval)
	}
	if opts.SendTimeout != 0 {
		n.sendTimeout = opts.SendTimeout
	}

	if err := n.loadAndStart(ctx, opts.ForceNewCluster); err != nil {
		n.ticker.Stop()
		return nil, err
	}

	snapshot, err := raftStore.Snapshot()
	// Snapshot never returns an error
	if err != nil {
		panic("could not get snapshot of raft store")
	}

	n.confState = snapshot.Metadata.ConfState
	n.appliedIndex = snapshot.Metadata.Index
	n.snapshotIndex = snapshot.Metadata.Index
	n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
	n.wait = newWait()

	if n.startNodePeers != nil {
		if n.joinAddr != "" {
			c, err := n.ConnectToMember(n.joinAddr, 10*time.Second)
			if err != nil {
				return nil, err
			}
			defer func() {
				_ = c.Conn.Close()
			}()

			ctx, cancel := context.WithTimeout(n.Ctx, 10*time.Second)
			defer cancel()
			resp, err := c.Join(ctx, &api.JoinRequest{
				Addr: n.Address,
			})
			if err != nil {
				return nil, err
			}

			n.Node = raft.StartNode(n.Config, []raft.Peer{})

			if err := n.registerNodes(resp.Members); err != nil {
				return nil, err
			}
		} else {
			n.Node = raft.StartNode(n.Config, n.startNodePeers)
			if err := n.Campaign(n.Ctx); err != nil {
				return nil, err
			}
		}
		return n, nil
	}

	if n.joinAddr != "" {
		n.Config.Logger.Warning("ignoring request to join cluster, because raft state already exists")
	}
	n.Node = raft.RestartNode(n.Config)
	return n, nil
}
예제 #19
0
func startNode(id int, chans []chan raftpb.Message) {
	l := log.WithField("id", id)
	storage := raft.NewMemoryStorage()
	c := &raft.Config{
		ID:              uint64(id),
		ElectionTick:    10,
		HeartbeatTick:   1,
		Storage:         storage,
		MaxSizePerMsg:   4096,
		MaxInflightMsgs: 256,
	}
	var peers []raft.Peer
	for i := 1; i <= 3; i++ {
		if id == i {
			continue
		}
		peer := raft.Peer{ID: uint64(i)}
		peers = append(peers, peer)
	}
	l.WithField("peers", peers).Debug("Peers")

	n := raft.StartNode(c, peers)
	tick := time.Tick(3 * time.Second)
	for count := 0; ; count++ {
		l.Debug("Waiting for something to happen")
		select {
		case <-tick:
			l.Debug("Got a tick")
			n.Tick()
			{
				r := 1 + rand.Intn(3)
				l.WithField("r", r).Debugf("Got rand value")
				if id == r {
					// I should send some data.
					data := fmt.Sprintf("This is me: %v at count %v", id, count)
					l.WithField("data", data).Debug("Proposing some data")
					ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
					go n.Propose(ctx, []byte(data))
				}
			}

		case rd := <-n.Ready():
			l.Debug("Got ready")
			// Save initial state.
			if err := save(rd, storage); err != nil {
				log.WithField("error", err).Error("While saving")
				return
			}
			// Send all messages to other nodes.
			for i := 1; i <= 3; i++ {
				if id == i {
					continue
				}
				for _, msg := range rd.Messages {
					chans[i-1] <- msg
				}
			}
			// Apply Snapshot (already done) and CommittedEntries
			storage.Append(rd.CommittedEntries) // No config change in test.
			n.Advance()

		case msg := <-chans[id-1]:
			l.WithField("msg", msg.String()).Debug("GOT MESSAGE")
			ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
			n.Step(ctx, msg)
		}
	}
}
예제 #20
0
파일: server.go 프로젝트: digideskio/etcd
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) *EtcdServer {
	m := cfg.Cluster.FindName(cfg.Name)
	if m == nil {
		// Should never happen
		log.Fatalf("could not find name %v in cluster!", cfg.Name)
	}
	snapdir := path.Join(cfg.DataDir, "snap")
	if err := os.MkdirAll(snapdir, privateDirMode); err != nil {
		log.Fatalf("etcdserver: cannot create snapshot directory: %v", err)
	}
	ss := snap.New(snapdir)
	st := store.New()
	var w *wal.WAL
	var n raft.Node
	var err error
	waldir := path.Join(cfg.DataDir, "wal")
	if !wal.Exist(waldir) {
		if w, err = wal.Create(waldir); err != nil {
			log.Fatal(err)
		}
		n = raft.StartNode(m.ID, cfg.Cluster.IDs(), 10, 1)
	} else {
		var index int64
		snapshot, err := ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			log.Fatal(err)
		}
		if snapshot != nil {
			log.Printf("etcdserver: restart from snapshot at index %d", snapshot.Index)
			st.Recovery(snapshot.Data)
			index = snapshot.Index
		}

		// restart a node from previous wal
		if w, err = wal.OpenAtIndex(waldir, index); err != nil {
			log.Fatal(err)
		}
		wid, st, ents, err := w.ReadAll()
		if err != nil {
			log.Fatal(err)
		}
		// TODO(xiangli): save/recovery nodeID?
		if wid != 0 {
			log.Fatalf("unexpected nodeid %d: nodeid should always be zero until we save nodeid into wal", wid)
		}
		n = raft.RestartNode(m.ID, cfg.Cluster.IDs(), 10, 1, snapshot, st, ents)
	}

	cls := NewClusterStore(st, *cfg.Cluster)

	s := &EtcdServer{
		store: st,
		node:  n,
		name:  cfg.Name,
		storage: struct {
			*wal.WAL
			*snap.Snapshotter
		}{w, ss},
		send:         Sender(cfg.Transport, cls),
		clientURLs:   cfg.ClientURLs,
		ticker:       time.Tick(100 * time.Millisecond),
		syncTicker:   time.Tick(500 * time.Millisecond),
		snapCount:    cfg.SnapCount,
		ClusterStore: cls,
	}
	return s
}