// makeRaft returns a Raft and its FSM, with snapshots based in the given dir. func makeRaft(t *testing.T, dir string) (*raft.Raft, *MockFSM) { snaps, err := raft.NewFileSnapshotStore(dir, 5, nil) if err != nil { t.Fatalf("err: %v", err) } fsm := &MockFSM{} store := raft.NewInmemStore() addr, trans := raft.NewInmemTransport("") config := raft.DefaultConfig() config.LocalID = raft.ServerID(fmt.Sprintf("server-%s", addr)) var members raft.Configuration members.Servers = append(members.Servers, raft.Server{ Suffrage: raft.Voter, ID: config.LocalID, Address: addr, }) err = raft.BootstrapCluster(config, store, store, snaps, trans, members) if err != nil { t.Fatalf("err: %v", err) } raft, err := raft.NewRaft(config, fsm, store, store, snaps, trans) if err != nil { t.Fatalf("err: %v", err) } timeout := time.After(10 * time.Second) for { if raft.Leader() != "" { break } select { case <-raft.LeaderCh(): case <-time.After(1 * time.Second): // Need to poll because we might have missed the first // go with the leader channel. case <-timeout: t.Fatalf("timed out waiting for leader") } } return raft, fsm }
// setupRaft is used to setup and initialize Raft func (s *Server) setupRaft() error { // If we have an unclean exit then attempt to close the Raft store. defer func() { if s.raft == nil && s.raftStore != nil { if err := s.raftStore.Close(); err != nil { s.logger.Printf("[ERR] consul: failed to close Raft store: %v", err) } } }() // Create the FSM. var err error s.fsm, err = NewFSM(s.tombstoneGC, s.config.LogOutput) if err != nil { return err } // Create a transport layer. trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput) s.raftTransport = trans // Make sure we set the LogOutput. s.config.RaftConfig.LogOutput = s.config.LogOutput // Our version of Raft protocol requires the LocalID to match the network // address of the transport. s.config.RaftConfig.LocalID = raft.ServerID(trans.LocalAddr()) // Build an all in-memory setup for dev mode, otherwise prepare a full // disk-based setup. var log raft.LogStore var stable raft.StableStore var snap raft.SnapshotStore if s.config.DevMode { store := raft.NewInmemStore() s.raftInmem = store stable = store log = store snap = raft.NewInmemSnapshotStore() } else { // Create the base raft path. path := filepath.Join(s.config.DataDir, raftState) if err := ensurePath(path, true); err != nil { return err } // Create the backend raft store for logs and stable storage. store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db")) if err != nil { return err } s.raftStore = store stable = store // Wrap the store in a LogCache to improve performance. cacheStore, err := raft.NewLogCache(raftLogCacheSize, store) if err != nil { return err } log = cacheStore // Create the snapshot store. snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput) if err != nil { return err } snap = snapshots // For an existing cluster being upgraded to the new version of // Raft, we almost never want to run recovery based on the old // peers.json file. We create a peers.info file with a helpful // note about where peers.json went, and use that as a sentinel // to avoid ingesting the old one that first time (if we have to // create the peers.info file because it's not there, we also // blow away any existing peers.json file). peersFile := filepath.Join(path, "peers.json") peersInfoFile := filepath.Join(path, "peers.info") if _, err := os.Stat(peersInfoFile); os.IsNotExist(err) { if err := ioutil.WriteFile(peersInfoFile, []byte(peersInfoContent), 0755); err != nil { return fmt.Errorf("failed to write peers.info file: %v", err) } // Blow away the peers.json file if present, since the // peers.info sentinel wasn't there. if _, err := os.Stat(peersFile); err == nil { if err := os.Remove(peersFile); err != nil { return fmt.Errorf("failed to delete peers.json, please delete manually (see peers.info for details): %v", err) } s.logger.Printf("[INFO] consul: deleted peers.json file (see peers.info for details)") } } else if _, err := os.Stat(peersFile); err == nil { s.logger.Printf("[INFO] consul: found peers.json file, recovering Raft configuration...") configuration, err := raft.ReadPeersJSON(peersFile) if err != nil { return fmt.Errorf("recovery failed to parse peers.json: %v", err) } tmpFsm, err := NewFSM(s.tombstoneGC, s.config.LogOutput) if err != nil { return fmt.Errorf("recovery failed to make temp FSM: %v", err) } if err := raft.RecoverCluster(s.config.RaftConfig, tmpFsm, log, stable, snap, trans, configuration); err != nil { return fmt.Errorf("recovery failed: %v", err) } if err := os.Remove(peersFile); err != nil { return fmt.Errorf("recovery failed to delete peers.json, please delete manually (see peers.info for details): %v", err) } s.logger.Printf("[INFO] consul: deleted peers.json file after successful recovery") } } // If we are in bootstrap or dev mode and the state is clean then we can // bootstrap now. if s.config.Bootstrap || s.config.DevMode { hasState, err := raft.HasExistingState(log, stable, snap) if err != nil { return err } if !hasState { // TODO (slackpad) - This will need to be updated when // we add support for node IDs. configuration := raft.Configuration{ Servers: []raft.Server{ raft.Server{ ID: raft.ServerID(trans.LocalAddr()), Address: trans.LocalAddr(), }, }, } if err := raft.BootstrapCluster(s.config.RaftConfig, log, stable, snap, trans, configuration); err != nil { return err } } } // Setup the Raft store. s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable, snap, trans) if err != nil { return err } // Start monitoring leadership. go s.monitorLeadership() return nil }