Exemplo n.º 1
0
// Open opens the store. If enableSingle is set, and there are no existing peers,
// then this node becomes the first node, and therefore leader, of the cluster.
func (s *Store) Open(enableSingle bool) error {
	if err := os.MkdirAll(s.raftDir, 0755); err != nil {
		return err
	}

	db, err := s.open()
	if err != nil {
		return err
	}
	s.db = db

	// Get the Raft configuration for this store.
	config := s.raftConfig()

	// Check for any existing peers.
	peers, err := readPeersJSON(filepath.Join(s.raftDir, "peers.json"))
	if err != nil {
		return err
	}
	s.joinRequired = len(peers) <= 1

	// Allow the node to entry single-mode, potentially electing itself, if
	// explicitly enabled and there is only 1 node in the cluster already.
	if enableSingle && len(peers) <= 1 {
		s.logger.Println("enabling single-node mode")
		config.EnableSingleNode = true
		config.DisableBootstrapAfterElect = false
	}

	// Setup Raft communication.
	transport := raft.NewNetworkTransport(s.raftTransport, 3, 10*time.Second, os.Stderr)

	// Create peer storage if necesssary.
	if s.peerStore == nil {
		s.peerStore = raft.NewJSONPeers(s.raftDir, transport)
	}

	// Create the snapshot store. This allows Raft to truncate the log.
	snapshots, err := raft.NewFileSnapshotStore(s.raftDir, retainSnapshotCount, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create the log store and stable store.
	logStore, err := raftboltdb.NewBoltStore(filepath.Join(s.raftDir, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}

	// Instantiate the Raft system.
	ra, err := raft.NewRaft(config, s, logStore, logStore, snapshots, s.peerStore, transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	s.raft = ra

	return nil
}
Exemplo n.º 2
0
func (r *localRaft) openRaft() error {
	s := r.store
	// Setup raft configuration.
	config := raft.DefaultConfig()
	config.Logger = s.Logger
	config.HeartbeatTimeout = s.HeartbeatTimeout
	config.ElectionTimeout = s.ElectionTimeout
	config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
	config.CommitTimeout = s.CommitTimeout

	// If no peers are set in the config then start as a single server.
	config.EnableSingleNode = (len(s.peers) == 0)

	// Ensure our addr is in the peer list
	if config.EnableSingleNode {
		s.peers = append(s.peers, s.Addr.String())
	}

	// Build raft layer to multiplex listener.
	r.raftLayer = newRaftLayer(s.RaftListener, s.Addr)

	// Create a transport layer
	r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, os.Stderr)

	// Create peer storage.
	r.peerStore = raft.NewJSONPeers(s.path, r.transport)

	// Create the log store and stable store.
	store, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}
	r.raftStore = store

	// Create the snapshot store.
	snapshots, err := raft.NewFileSnapshotStore(s.path, raftSnapshotsRetained, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create raft log.
	ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	r.raft = ra

	return nil
}
Exemplo n.º 3
0
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
	// If we are in bootstrap mode, enable a single node cluster
	if s.config.Bootstrap || (s.config.DevMode && !s.config.DevDisableBootstrap) {
		s.config.RaftConfig.EnableSingleNode = true
	}

	// Create the FSM
	var err error
	s.fsm, err = NewFSM(s.evalBroker, s.periodicDispatcher, s.config.LogOutput)
	if err != nil {
		return err
	}

	// Create a transport layer
	trans := raft.NewNetworkTransport(s.raftLayer, 3, s.config.RaftTimeout,
		s.config.LogOutput)
	s.raftTransport = trans

	// Create the backend raft store for logs and stable storage
	var log raft.LogStore
	var stable raft.StableStore
	var snap raft.SnapshotStore
	var peers raft.PeerStore
	if s.config.DevMode {
		store := raft.NewInmemStore()
		s.raftInmem = store
		stable = store
		log = store
		snap = raft.NewDiscardSnapshotStore()
		peers = &raft.StaticPeers{}
		s.raftPeers = peers

	} else {
		// Create the base raft path
		path := filepath.Join(s.config.DataDir, raftState)
		if err := ensurePath(path, true); err != nil {
			return err
		}

		// Create the BoltDB backend
		store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
		if err != nil {
			return err
		}
		s.raftStore = store
		stable = store

		// Wrap the store in a LogCache to improve performance
		cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
		if err != nil {
			store.Close()
			return err
		}
		log = cacheStore

		// Create the snapshot store
		snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
		if err != nil {
			if s.raftStore != nil {
				s.raftStore.Close()
			}
			return err
		}
		snap = snapshots

		// Setup the peer store
		s.raftPeers = raft.NewJSONPeers(path, trans)
		peers = s.raftPeers
	}

	// Ensure local host is always included if we are in bootstrap mode
	if s.config.RaftConfig.EnableSingleNode {
		p, err := peers.Peers()
		if err != nil {
			if s.raftStore != nil {
				s.raftStore.Close()
			}
			return err
		}
		if !raft.PeerContained(p, trans.LocalAddr()) {
			peers.SetPeers(raft.AddUniquePeer(p, trans.LocalAddr()))
		}
	}

	// Make sure we set the LogOutput
	s.config.RaftConfig.LogOutput = s.config.LogOutput

	// Setup the leader channel
	leaderCh := make(chan bool, 1)
	s.config.RaftConfig.NotifyCh = leaderCh
	s.leaderCh = leaderCh

	// Setup the Raft store
	s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable,
		snap, peers, trans)
	if err != nil {
		if s.raftStore != nil {
			s.raftStore.Close()
		}
		trans.Close()
		return err
	}
	return nil
}
Exemplo n.º 4
0
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
	// If we are in bootstrap mode, enable a single node cluster
	if s.config.Bootstrap {
		s.config.RaftConfig.EnableSingleNode = true
	}

	// Create the base path
	path := filepath.Join(s.config.DataDir, raftState)
	if err := ensurePath(path, true); err != nil {
		return err
	}

	// Create the FSM
	var err error
	s.fsm, err = NewFSM(s.config.LogOutput)
	if err != nil {
		return err
	}

	// Create the MDB store for logs and stable storage
	store, err := raftmdb.NewMDBStoreWithSize(path, raftDBSize)
	if err != nil {
		return err
	}
	s.raftStore = store

	// Create the snapshot store
	snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
	if err != nil {
		store.Close()
		return err
	}

	// Create a transport layer
	trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
	s.raftTransport = trans

	// Setup the peer store
	s.raftPeers = raft.NewJSONPeers(path, trans)

	// Ensure local host is always included if we are in bootstrap mode
	if s.config.Bootstrap {
		peers, err := s.raftPeers.Peers()
		if err != nil {
			store.Close()
			return err
		}
		if !raft.PeerContained(peers, trans.LocalAddr()) {
			s.raftPeers.SetPeers(raft.AddUniquePeer(peers, trans.LocalAddr()))
		}
	}

	// Make sure we set the LogOutput
	s.config.RaftConfig.LogOutput = s.config.LogOutput

	// Setup the Raft store
	s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, store, store,
		snapshots, s.raftPeers, trans)
	if err != nil {
		store.Close()
		trans.Close()
		return err
	}

	// Start monitoring leadership
	go s.monitorLeadership()
	return nil
}
Exemplo n.º 5
0
func (r *localRaft) open() error {
	s := r.store
	// Setup raft configuration.
	config := raft.DefaultConfig()
	config.LogOutput = ioutil.Discard

	if s.clusterTracingEnabled {
		config.Logger = s.Logger
	}
	config.HeartbeatTimeout = s.HeartbeatTimeout
	config.ElectionTimeout = s.ElectionTimeout
	config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
	config.CommitTimeout = s.CommitTimeout

	// If no peers are set in the config or there is one and we are it, then start as a single server.
	if len(s.peers) <= 1 {
		config.EnableSingleNode = true
		// Ensure we can always become the leader
		config.DisableBootstrapAfterElect = false
		// Don't shutdown raft automatically if we renamed our hostname back to a previous name
		config.ShutdownOnRemove = false
	}

	// Build raft layer to multiplex listener.
	r.raftLayer = newRaftLayer(s.RaftListener, s.RemoteAddr)

	// Create a transport layer
	r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, config.LogOutput)

	// Create peer storage.
	r.peerStore = raft.NewJSONPeers(s.path, r.transport)

	peers, err := r.peerStore.Peers()
	if err != nil {
		return err
	}

	// Make sure our address is in the raft peers or we won't be able to boot into the cluster
	if len(peers) > 0 && !raft.PeerContained(peers, s.RemoteAddr.String()) {
		s.Logger.Printf("%v is not in the list of raft peers. Please update %v/peers.json on all raft nodes to have the same contents.", s.RemoteAddr.String(), s.Path())
		return fmt.Errorf("peers out of sync: %v not in %v", s.RemoteAddr.String(), peers)
	}

	// Create the log store and stable store.
	store, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}
	r.raftStore = store

	// Create the snapshot store.
	snapshots, err := raft.NewFileSnapshotStore(s.path, raftSnapshotsRetained, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create raft log.
	ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	r.raft = ra

	return nil
}
Exemplo n.º 6
0
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
	// If we have an unclean exit then attempt to close the Raft store.
	defer func() {
		if s.raft == nil && s.raftStore != nil {
			if err := s.raftStore.Close(); err != nil {
				s.logger.Printf("[ERR] consul: failed to close Raft store: %v", err)
			}
		}
	}()

	// Create the FSM.
	var err error
	s.fsm, err = NewFSM(s.tombstoneGC, s.config.LogOutput)
	if err != nil {
		return err
	}

	// Create a transport layer.
	trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
	s.raftTransport = trans

	// Make sure we set the LogOutput.
	s.config.RaftConfig.LogOutput = s.config.LogOutput

	// Our version of Raft protocol requires the LocalID to match the network
	// address of the transport.
	s.config.RaftConfig.LocalID = raft.ServerID(trans.LocalAddr())

	// Build an all in-memory setup for dev mode, otherwise prepare a full
	// disk-based setup.
	var log raft.LogStore
	var stable raft.StableStore
	var snap raft.SnapshotStore
	if s.config.DevMode {
		store := raft.NewInmemStore()
		s.raftInmem = store
		stable = store
		log = store
		snap = raft.NewInmemSnapshotStore()
	} else {
		// Create the base raft path.
		path := filepath.Join(s.config.DataDir, raftState)
		if err := ensurePath(path, true); err != nil {
			return err
		}

		// Create the backend raft store for logs and stable storage.
		store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
		if err != nil {
			return err
		}
		s.raftStore = store
		stable = store

		// Wrap the store in a LogCache to improve performance.
		cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
		if err != nil {
			return err
		}
		log = cacheStore

		// Create the snapshot store.
		snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
		if err != nil {
			return err
		}
		snap = snapshots

		// For an existing cluster being upgraded to the new version of
		// Raft, we almost never want to run recovery based on the old
		// peers.json file. We create a peers.info file with a helpful
		// note about where peers.json went, and use that as a sentinel
		// to avoid ingesting the old one that first time (if we have to
		// create the peers.info file because it's not there, we also
		// blow away any existing peers.json file).
		peersFile := filepath.Join(path, "peers.json")
		peersInfoFile := filepath.Join(path, "peers.info")
		if _, err := os.Stat(peersInfoFile); os.IsNotExist(err) {
			if err := ioutil.WriteFile(peersInfoFile, []byte(peersInfoContent), 0755); err != nil {
				return fmt.Errorf("failed to write peers.info file: %v", err)
			}

			// Blow away the peers.json file if present, since the
			// peers.info sentinel wasn't there.
			if _, err := os.Stat(peersFile); err == nil {
				if err := os.Remove(peersFile); err != nil {
					return fmt.Errorf("failed to delete peers.json, please delete manually (see peers.info for details): %v", err)
				}
				s.logger.Printf("[INFO] consul: deleted peers.json file (see peers.info for details)")
			}
		} else if _, err := os.Stat(peersFile); err == nil {
			s.logger.Printf("[INFO] consul: found peers.json file, recovering Raft configuration...")
			configuration, err := raft.ReadPeersJSON(peersFile)
			if err != nil {
				return fmt.Errorf("recovery failed to parse peers.json: %v", err)
			}
			tmpFsm, err := NewFSM(s.tombstoneGC, s.config.LogOutput)
			if err != nil {
				return fmt.Errorf("recovery failed to make temp FSM: %v", err)
			}
			if err := raft.RecoverCluster(s.config.RaftConfig, tmpFsm,
				log, stable, snap, trans, configuration); err != nil {
				return fmt.Errorf("recovery failed: %v", err)
			}
			if err := os.Remove(peersFile); err != nil {
				return fmt.Errorf("recovery failed to delete peers.json, please delete manually (see peers.info for details): %v", err)
			}
			s.logger.Printf("[INFO] consul: deleted peers.json file after successful recovery")
		}
	}

	// If we are in bootstrap or dev mode and the state is clean then we can
	// bootstrap now.
	if s.config.Bootstrap || s.config.DevMode {
		hasState, err := raft.HasExistingState(log, stable, snap)
		if err != nil {
			return err
		}
		if !hasState {
			// TODO (slackpad) - This will need to be updated when
			// we add support for node IDs.
			configuration := raft.Configuration{
				Servers: []raft.Server{
					raft.Server{
						ID:      raft.ServerID(trans.LocalAddr()),
						Address: trans.LocalAddr(),
					},
				},
			}
			if err := raft.BootstrapCluster(s.config.RaftConfig,
				log, stable, snap, trans, configuration); err != nil {
				return err
			}
		}
	}

	// Setup the Raft store.
	s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable, snap, trans)
	if err != nil {
		return err
	}

	// Start monitoring leadership.
	go s.monitorLeadership()
	return nil
}
Exemplo n.º 7
0
func NewServer(cfg *config.Config) (*Server, error) {
	server := &Server{
		cfg:   cfg,
		conns: make(map[string]*rpc.Client),
	}

	// setup listener
	l, err := net.Listen("tcp", cfg.Server.Listen)
	if err != nil {
		return nil, err
	}

	// setup mux
	mux := proto.NewMux(l, nil)
	raftl := mux.Handle(proto.RaftProto)
	msgpackl := mux.Handle(proto.RpcProto)
	httpl := mux.HandleThird(cmux.HTTP1())
	redisl := mux.HandleThird(cmux.Any())

	// setup http server
	go http.Serve(httpl, nil)

	// setup rpc server
	kvs := NewKVS(server)
	rpcServer := rpc.NewServer()
	err = rpcServer.RegisterName("KV", kvs)
	if err != nil {
		return nil, err
	}
	// support msgpack rpc protocol
	go proto.ServeMsgpack(msgpackl, rpcServer)
	// support redis protocol
	go proto.ServeRedis(redisl, rpcServer)

	// setup raft transporter
	advertise, err := net.ResolveTCPAddr("tcp", cfg.Raft.Advertise)
	if err != nil {
		return nil, err
	}
	layer := NewRaftLayer(advertise, raftl)
	trans := raft.NewNetworkTransport(
		layer,
		5,
		time.Second,
		os.Stderr,
	)

	// setup raft fsm
	fsm, err := NewFSM(&cfg.DB)
	if err != nil {
		return nil, err
	}

	// setup raft
	raft, err := NewRaft(&cfg.Raft, fsm, trans)
	if err != nil {
		return nil, err
	}

	server.raftLayer = layer
	server.raftTrans = trans
	server.raft = raft
	server.rpcServer = rpcServer
	server.fsm = fsm
	server.kvs = kvs
	server.mux = mux
	return server, nil
}
Exemplo n.º 8
0
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
	// If we are in bootstrap mode, enable a single node cluster
	if s.config.Bootstrap || s.config.DevMode {
		s.config.RaftConfig.EnableSingleNode = true
	}

	// Create the FSM
	var err error
	s.fsm, err = NewFSM(s.tombstoneGC, s.config.LogOutput)
	if err != nil {
		return err
	}

	// Create a transport layer
	trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
	s.raftTransport = trans

	var log raft.LogStore
	var stable raft.StableStore
	var snap raft.SnapshotStore

	if s.config.DevMode {
		store := raft.NewInmemStore()
		s.raftInmem = store
		stable = store
		log = store
		snap = raft.NewDiscardSnapshotStore()
		s.raftPeers = &raft.StaticPeers{}
	} else {
		// Create the base raft path
		path := filepath.Join(s.config.DataDir, raftState)
		if err := ensurePath(path, true); err != nil {
			return err
		}

		// Create the backend raft store for logs and stable storage
		store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
		if err != nil {
			return err
		}
		s.raftStore = store
		stable = store

		// Wrap the store in a LogCache to improve performance
		cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
		if err != nil {
			store.Close()
			return err
		}
		log = cacheStore

		// Create the snapshot store
		snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
		if err != nil {
			store.Close()
			return err
		}
		snap = snapshots

		// Setup the peer store
		s.raftPeers = raft.NewJSONPeers(path, trans)
	}

	// Ensure local host is always included if we are in bootstrap mode
	if s.config.Bootstrap {
		peerAddrs, err := s.raftPeers.Peers()
		if err != nil {
			if s.raftStore != nil {
				s.raftStore.Close()
			}
			return err
		}
		if !raft.PeerContained(peerAddrs, trans.LocalAddr()) {
			s.raftPeers.SetPeers(raft.AddUniquePeer(peerAddrs, trans.LocalAddr()))
		}
	}

	// Make sure we set the LogOutput
	s.config.RaftConfig.LogOutput = s.config.LogOutput

	// Setup the Raft store
	s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable,
		snap, s.raftPeers, trans)
	if err != nil {
		if s.raftStore != nil {
			s.raftStore.Close()
		}
		trans.Close()
		return err
	}

	// Start monitoring leadership
	go s.monitorLeadership()
	return nil
}
Exemplo n.º 9
0
func (r *localRaft) open() error {
	r.closing = make(chan struct{})

	s := r.store
	// Setup raft configuration.
	config := raft.DefaultConfig()
	config.LogOutput = ioutil.Discard

	if s.clusterTracingEnabled {
		config.Logger = s.Logger
	}
	config.HeartbeatTimeout = s.HeartbeatTimeout
	config.ElectionTimeout = s.ElectionTimeout
	config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
	config.CommitTimeout = s.CommitTimeout
	// Since we actually never call `removePeer` this is safe.
	// If in the future we decide to call remove peer we have to re-evaluate how to handle this
	config.ShutdownOnRemove = false

	// If no peers are set in the config or there is one and we are it, then start as a single server.
	if len(s.peers) <= 1 {
		config.EnableSingleNode = true
		// Ensure we can always become the leader
		config.DisableBootstrapAfterElect = false
	}

	// Build raft layer to multiplex listener.
	r.raftLayer = newRaftLayer(s.RaftListener, s.RemoteAddr)

	// Create a transport layer
	r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, config.LogOutput)

	// Create peer storage.
	r.peerStore = raft.NewJSONPeers(s.path, r.transport)

	peers, err := r.peerStore.Peers()
	if err != nil {
		return err
	}

	// For single-node clusters, we can update the raft peers before we start the cluster if the hostname
	// has changed.
	if config.EnableSingleNode {
		if err := r.peerStore.SetPeers([]string{s.RemoteAddr.String()}); err != nil {
			return err
		}
		peers = []string{s.RemoteAddr.String()}
	}

	// If we have multiple nodes in the cluster, make sure our address is in the raft peers or
	// we won't be able to boot into the cluster because the other peers will reject our new hostname.  This
	// is difficult to resolve automatically because we need to have all the raft peers agree on the current members
	// of the cluster before we can change them.
	if len(peers) > 0 && !raft.PeerContained(peers, s.RemoteAddr.String()) {
		s.Logger.Printf("%s is not in the list of raft peers. Please update %v/peers.json on all raft nodes to have the same contents.", s.RemoteAddr.String(), s.Path())
		return fmt.Errorf("peers out of sync: %v not in %v", s.RemoteAddr.String(), peers)
	}

	// Create the log store and stable store.
	store, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}
	r.raftStore = store

	// Create the snapshot store.
	snapshots, err := raft.NewFileSnapshotStore(s.path, raftSnapshotsRetained, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create raft log.
	ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	r.raft = ra

	r.wg.Add(1)
	go r.logLeaderChanges()

	return nil
}
Exemplo n.º 10
0
// Open starts the raft consensus and opens the store.
func (s *Store) Open() error {
	s.mu.Lock()
	defer s.mu.Unlock()

	// Set up logging.
	s.logger = log.New(s.LogOutput, "[discoverd] ", log.LstdFlags)

	// Require listener & advertise address.
	if s.Listener == nil {
		return ErrListenerRequired
	} else if s.Advertise == nil {
		return ErrAdvertiseRequired
	}

	// Create root directory.
	if err := os.MkdirAll(s.path, 0777); err != nil {
		return err
	}

	// Create raft configuration.
	config := raft.DefaultConfig()
	config.HeartbeatTimeout = s.HeartbeatTimeout
	config.ElectionTimeout = s.ElectionTimeout
	config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
	config.CommitTimeout = s.CommitTimeout
	config.LogOutput = s.LogOutput
	config.EnableSingleNode = s.EnableSingleNode
	config.ShutdownOnRemove = false

	// Create multiplexing transport layer.
	raftLayer := newRaftLayer(s.Listener, s.Advertise)

	// Begin listening to TCP port.
	s.transport = raft.NewNetworkTransport(raftLayer, 3, 10*time.Second, os.Stderr)

	// Setup storage layers.
	s.peerStore = raft.NewJSONPeers(s.path, s.transport)
	stableStore, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
	if err != nil {
		return fmt.Errorf("stable store: %s", err)
	}
	s.stableStore = stableStore

	// Wrap the store in a LogCache to improve performance
	cacheStore, err := raft.NewLogCache(512, stableStore)
	if err != nil {
		stableStore.Close()
		return fmt.Errorf("log cache: %s", err)
	}

	// Create the snapshot store.
	ss, err := raft.NewFileSnapshotStore(s.path, 2, os.Stderr)
	if err != nil {
		return fmt.Errorf("snapshot store: %s", err)
	}

	// Create raft log.
	//
	// The mutex must be unlocked as initializing the raft store may
	// call back into methods which acquire the lock (e.g. Restore)
	s.mu.Unlock()
	r, err := raft.NewRaft(config, s, cacheStore, stableStore, ss, s.peerStore, s.transport)
	s.mu.Lock()
	if err != nil {
		return fmt.Errorf("raft: %s", err)
	}

	// make sure the store was not closed whilst the mutex was unlocked
	select {
	case <-s.closing:
		return ErrShutdown
	default:
	}

	s.raft = r

	// Start goroutine to monitor leadership changes.
	s.wg.Add(1)
	go s.monitorLeaderCh()

	// Start goroutine to check for instance expiry.
	s.wg.Add(1)
	go s.expirer()

	return nil
}
Exemplo n.º 11
0
func (r *raftState) open(s *store, ln net.Listener, initializePeers []string) error {
	r.ln = ln
	r.closing = make(chan struct{})

	// Setup raft configuration.
	config := raft.DefaultConfig()
	config.LogOutput = ioutil.Discard

	if r.config.ClusterTracing {
		config.Logger = r.logger
	}
	config.HeartbeatTimeout = time.Duration(r.config.HeartbeatTimeout)
	config.ElectionTimeout = time.Duration(r.config.ElectionTimeout)
	config.LeaderLeaseTimeout = time.Duration(r.config.LeaderLeaseTimeout)
	config.CommitTimeout = time.Duration(r.config.CommitTimeout)
	// Since we actually never call `removePeer` this is safe.
	// If in the future we decide to call remove peer we have to re-evaluate how to handle this
	config.ShutdownOnRemove = false

	// Build raft layer to multiplex listener.
	r.raftLayer = newRaftLayer(r.addr, r.ln)

	// Create a transport layer
	r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, config.LogOutput)

	// Create peer storage.
	r.peerStore = &peerStore{}

	// This server is joining the raft cluster for the first time if initializePeers are passed in
	if len(initializePeers) > 0 {
		if err := r.peerStore.SetPeers(initializePeers); err != nil {
			return err
		}
	}

	peers, err := r.peerStore.Peers()
	if err != nil {
		return err
	}

	// If no peers are set in the config or there is one and we are it, then start as a single server.
	if len(initializePeers) <= 1 {
		config.EnableSingleNode = true

		// Ensure we can always become the leader
		config.DisableBootstrapAfterElect = false

		// Make sure our peer address is here.  This happens with either a single node cluster
		// or a node joining the cluster, as no one else has that information yet.
		if !raft.PeerContained(peers, r.addr) {
			if err := r.peerStore.SetPeers([]string{r.addr}); err != nil {
				return err
			}
		}

		peers = []string{r.addr}
	}

	// Create the log store and stable store.
	store, err := raftboltdb.NewBoltStore(filepath.Join(r.path, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}
	r.raftStore = store

	// Create the snapshot store.
	snapshots, err := raft.NewFileSnapshotStore(r.path, raftSnapshotsRetained, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create raft log.
	ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	r.raft = ra

	r.wg.Add(1)
	go r.logLeaderChanges()

	return nil
}
Exemplo n.º 12
0
func (c *Cluster) start() error {
	c.store = newStore()

	cfg := raft.DefaultConfig()
	cfg.ShutdownOnRemove = false

	// setup K/V store
	raftStream, err := newStreamLayer(c.l.Addr(), c.tlsConfig, raftMessage)
	if err != nil {
		return err
	}
	raftTransport := raft.NewNetworkTransport(raftStream, 3, defaultTimeout, os.Stdout)
	peerStore := newPeerStore(c.home, raftTransport)

	peers, err := peerStore.Peers()
	if err != nil {
		return err
	}
	nPeers := len(peers)
	if nPeers <= 1 && c.peerAddr == "" {
		cfg.EnableSingleNode = true
	}

	kvRaft, err := newRaft(filepath.Join(c.home, "raft"), c.addr, peerStore, (*storeFSM)(c.store), raftTransport, cfg)
	if err != nil {
		return err
	}
	c.store.r = kvRaft
	kvRaft.store = c.store

	nodeRPCStream, err := newStreamLayer(c.l.Addr(), c.tlsConfig, api.RPCMessage)
	if err != nil {
		return err
	}
	nodeRPC := &nodeRPC{nodeRPCStream, kvRaft}
	go nodeRPC.handleConns()

	clientRPCStream, err := newStreamLayer(c.l.Addr(), c.tlsConfig, api.ClientMessage)
	if err != nil {
		return err
	}
	clientRPC := &clientRPC{clientRPCStream, c.store}
	go clientRPC.handleConns()

	handlers := map[api.MessageType]rpcHandler{
		raftMessage:       raftStream,
		api.RPCMessage:    nodeRPCStream,
		api.ClientMessage: clientRPCStream,
	}

	c.server = newRPCServer(c.l, handlers)
	c.r = kvRaft

	go c.store.waitLeader()
	go c.waitLeader()

	if c.peerAddr != "" && nPeers <= 1 {
		res, err := rpc(c.peerAddr, &rpcRequest{
			Method: addNode,
			Args:   []string{c.addr},
		}, c.tlsConfig)
		if err != nil {
			return err
		}
		if res.Err != "" && !strings.Contains(res.Err, "peer already known") {
			return fmt.Errorf(res.Err)
		}
	}

	return nil
}
Exemplo n.º 13
0
func (c *Cluster) start() error {
	c.store = newStore()

	cfg := raft.DefaultConfig()
	cfg.ShutdownOnRemove = false
	if c.logger != nil {
		cfg.LogOutput = c.logger
	}

	raftStream := rpc.NewStreamLayer(c.l.Addr(), byte(raftMessage), c.rpcDialer)
	raftTransport := raft.NewNetworkTransport(raftStream, 3, defaultTimeout, os.Stdout)
	peerStore := newPeerStore(c.home, raftTransport)
	c.peers = peerStore

	peers, err := peerStore.Peers()
	if err != nil {
		return err
	}
	nPeers := len(peers)
	if nPeers <= 1 && c.peerAddr == "" {
		cfg.EnableSingleNode = true
	}

	kvRaft, err := newRaft(filepath.Join(c.home, "raft"), c.addr, peerStore, (*storeFSM)(c.store), raftTransport, cfg)
	if err != nil {
		return err
	}
	c.store.r = kvRaft
	c.store.dialer = c.rpcDialer
	kvRaft.store = c.store
	kvRaft.stream = raftStream

	nodeRPCStream := rpc.NewStreamLayer(c.l.Addr(), byte(api.RPCMessage), c.rpcDialer)
	nodeRPC := &nodeRPC{nodeRPCStream, kvRaft}
	go nodeRPC.handleConns()

	clientRPCStream := rpc.NewStreamLayer(c.l.Addr(), byte(api.ClientMessage), c.rpcDialer)
	clientRPC := &clientRPC{clientRPCStream, c.store}
	go clientRPC.handleConns()

	handlers := map[byte]rpc.Handler{
		byte(raftMessage):       raftStream,
		byte(api.RPCMessage):    nodeRPCStream,
		byte(api.ClientMessage): clientRPCStream,
	}

	c.server = rpc.NewServer(c.l, handlers)
	c.r = kvRaft

	go c.store.waitLeader()
	go c.waitLeader()

	if c.peerAddr != "" && nPeers <= 1 {
		res, err := nodeRPCStream.RPC(c.peerAddr, &rpc.Request{
			Method: addNode,
			Args:   []string{c.addr},
		})
		if err != nil {
			return err
		}
		if res.Err != "" && !strings.Contains(res.Err, "peer already known") {
			return fmt.Errorf(res.Err)
		}
	}

	return nil
}
Exemplo n.º 14
0
// Open opens the store. If enableSingle is set, and there are no existing peers,
// then this node becomes the first node, and therefore leader, of the cluster.
func (s *Store) Open(enableSingle bool) error {
	if err := os.MkdirAll(s.raftDir, 0755); err != nil {
		return err
	}

	// Create the database. Unless it's a memory-based database, it must be deleted
	var db *sql.DB
	var err error
	if !s.dbConf.Memory {
		// as it will be rebuilt from (possibly) a snapshot and committed log entries.
		if err := os.Remove(s.dbPath); err != nil && !os.IsNotExist(err) {
			return err
		}
		db, err = sql.OpenWithDSN(s.dbPath, s.dbConf.DSN)
		if err != nil {
			return err
		}
		s.logger.Println("SQLite database opened at", s.dbPath)
	} else {
		db, err = sql.OpenInMemoryWithDSN(s.dbConf.DSN)
		if err != nil {
			return err
		}
		s.logger.Println("SQLite in-memory database opened")
	}
	s.db = db

	// Setup Raft configuration.
	config := raft.DefaultConfig()

	// Check for any existing peers.
	peers, err := readPeersJSON(filepath.Join(s.raftDir, "peers.json"))
	if err != nil {
		return err
	}
	s.joinRequired = len(peers) <= 1

	// Allow the node to entry single-mode, potentially electing itself, if
	// explicitly enabled and there is only 1 node in the cluster already.
	if enableSingle && len(peers) <= 1 {
		s.logger.Println("enabling single-node mode")
		config.EnableSingleNode = true
		config.DisableBootstrapAfterElect = false
	}

	// Setup Raft communication.
	transport := raft.NewNetworkTransport(s.raftTransport, 3, 10*time.Second, os.Stderr)

	// Create peer storage.
	peerStore := raft.NewJSONPeers(s.raftDir, transport)

	// Create the snapshot store. This allows Raft to truncate the log.
	snapshots, err := raft.NewFileSnapshotStore(s.raftDir, retainSnapshotCount, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create the log store and stable store.
	logStore, err := raftboltdb.NewBoltStore(filepath.Join(s.raftDir, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}

	// Instantiate the Raft system.
	ra, err := raft.NewRaft(config, s, logStore, logStore, snapshots, peerStore, transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	s.raft = ra

	return nil
}
Exemplo n.º 15
0
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
	// If we are in bootstrap mode, enable a single node cluster
	if s.config.Bootstrap {
		s.config.RaftConfig.EnableSingleNode = true
	}

	// Create the base state path
	statePath := filepath.Join(s.config.DataDir, tmpStatePath)
	if err := os.RemoveAll(statePath); err != nil {
		return err
	}
	if err := ensurePath(statePath, true); err != nil {
		return err
	}

	// Create the FSM
	var err error
	s.fsm, err = NewFSM(s.tombstoneGC, statePath, s.config.LogOutput)
	if err != nil {
		return err
	}

	// Set the maximum raft size based on 32/64bit. Since we are
	// doing an mmap underneath, we need to limit our use of virtual
	// address space on 32bit, but don't have to care on 64bit.
	dbSize := raftDBSize32bit
	if runtime.GOARCH == "amd64" {
		dbSize = raftDBSize64bit
	}

	// Create the base raft path
	path := filepath.Join(s.config.DataDir, raftState)
	if err := ensurePath(path, true); err != nil {
		return err
	}

	// Create the MDB store for logs and stable storage
	store, err := raftmdb.NewMDBStoreWithSize(path, dbSize)
	if err != nil {
		return err
	}
	s.raftStore = store

	// Wrap the store in a LogCache to improve performance
	cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
	if err != nil {
		store.Close()
		return err
	}

	// Create the snapshot store
	snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
	if err != nil {
		store.Close()
		return err
	}

	// Create a transport layer
	trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
	s.raftTransport = trans

	// Setup the peer store
	s.raftPeers = raft.NewJSONPeers(path, trans)

	// Ensure local host is always included if we are in bootstrap mode
	if s.config.Bootstrap {
		peers, err := s.raftPeers.Peers()
		if err != nil {
			store.Close()
			return err
		}
		if !raft.PeerContained(peers, trans.LocalAddr()) {
			s.raftPeers.SetPeers(raft.AddUniquePeer(peers, trans.LocalAddr()))
		}
	}

	// Make sure we set the LogOutput
	s.config.RaftConfig.LogOutput = s.config.LogOutput

	// Setup the Raft store
	s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, cacheStore, store,
		snapshots, s.raftPeers, trans)
	if err != nil {
		store.Close()
		trans.Close()
		return err
	}

	// Start monitoring leadership
	go s.monitorLeadership()
	return nil
}
Exemplo n.º 16
0
// setupRaft is used to setup and initialize Raft
func (c *cerebrum) setupRaft() error {

	// If we are in bootstrap mode, enable a single node cluster
	if c.config.Bootstrap {
		c.config.RaftConfig.EnableSingleNode = true
	}

	// Create the base state path
	statePath := filepath.Join(c.config.DataPath, tmpStatePath)
	if err := os.RemoveAll(statePath); err != nil {
		return err
	}
	if err := os.MkdirAll(filepath.Dir(statePath), 0755); err != nil {
		return err
	}

	// Create the base raft path
	path := filepath.Join(c.config.DataPath, RaftStateDir)
	if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
		return err
	}

	// Create the backend raft store for logs and stable storage
	store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
	if err != nil {
		return err
	}
	c.raftStore = store

	// Wrap the store in a LogCache to improve performance
	cacheStore, err := raft.NewLogCache(c.config.LogCacheSize, store)
	if err != nil {
		store.Close()
		return err
	}

	// Create the snapshot store
	snapshots, err := raft.NewFileSnapshotStore(path, c.config.SnapshotsRetained, c.config.LogOutput)
	if err != nil {
		store.Close()
		return err
	}

	// Try to bind
	addr, err := net.ResolveTCPAddr("tcp", c.config.RaftBindAddr)
	if err != nil {
		return err
	}

	// Start TCP listener
	listener, err := net.ListenTCP("tcp", addr)
	if err != nil {
		return err
	}

	// Create connection layer and transport
	layer := NewRaftLayer(c.dialer, listener.Addr(), c.config.TLSConfig)
	c.raftTransport = raft.NewNetworkTransport(layer, 3, 10*time.Second, c.config.LogOutput)

	// Create TLS connection dispatcher
	dispatcher := yamuxer.NewDispatcher(log.NewLogger(c.config.LogOutput, "dispatcher"), nil)
	dispatcher.Register(connRaft, layer)
	dispatcher.Register(connForward, &ForwardingHandler{c.applier, log.NewLogger(c.config.LogOutput, "forwarder")})

	// Create TLS connection muxer
	c.muxer = yamuxer.New(c.context, &yamuxer.Config{
		Listener:   listener,
		TLSConfig:  c.config.TLSConfig,
		Deadline:   c.config.ConnectionDeadline,
		LogOutput:  c.config.LogOutput,
		Dispatcher: dispatcher,
	})

	// Setup the peer store
	c.raftPeers = raft.NewJSONPeers(path, c.raftTransport)

	// Ensure local host is always included if we are in bootstrap mode
	if c.config.Bootstrap {
		peers, err := c.raftPeers.Peers()
		if err != nil {
			store.Close()
			return err
		}
		if !raft.PeerContained(peers, c.raftTransport.LocalAddr()) {
			c.raftPeers.SetPeers(raft.AddUniquePeer(peers, c.raftTransport.LocalAddr()))
		}
	}

	// Make sure we set the LogOutput
	c.config.RaftConfig.LogOutput = c.config.LogOutput

	// Setup the Raft store
	c.raft, err = raft.NewRaft(c.config.RaftConfig, c.fsm, cacheStore, store,
		snapshots, c.raftPeers, c.raftTransport)
	if err != nil {
		store.Close()
		c.raftTransport.Close()
		return err
	}

	// Setup forwarding and applier
	c.forwarder = NewForwarder(c.raft, c.dialer, log.NewLogger(c.config.LogOutput, "forwarder"))
	c.applier = NewApplier(c.raft, c.forwarder, log.NewLogger(c.config.LogOutput, "applier"), c.config.EnqueueTimeout)

	// // Start monitoring leadership
	// c.t.Go(func() error {
	// 	c.monitorLeadership()
	// 	return nil
	// })
	return nil
}