Exemplo n.º 1
0
// DefaultConfig is used to return a sane default configuration
func DefaultConfig() *Config {
	hostname, err := os.Hostname()
	if err != nil {
		panic(err)
	}

	conf := &Config{
		Datacenter:        DefaultDC,
		NodeName:          hostname,
		RPCAddr:           DefaultRPCAddr,
		RaftConfig:        raft.DefaultConfig(),
		SerfLANConfig:     serf.DefaultConfig(),
		SerfWANConfig:     serf.DefaultConfig(),
		ReconcileInterval: 60 * time.Second,
		ProtocolVersion:   ProtocolVersionMax,
	}

	// Increase our reap interval to 3 days instead of 24h.
	conf.SerfLANConfig.ReconnectTimeout = 3 * 24 * time.Hour
	conf.SerfWANConfig.ReconnectTimeout = 3 * 24 * time.Hour

	// WAN Serf should use the WAN timing, since we are using it
	// to communicate between DC's
	conf.SerfWANConfig.MemberlistConfig = memberlist.DefaultWANConfig()

	// Ensure we don't have port conflicts
	conf.SerfLANConfig.MemberlistConfig.BindPort = DefaultLANSerfPort
	conf.SerfWANConfig.MemberlistConfig.BindPort = DefaultWANSerfPort

	// Disable shutdown on removal
	conf.RaftConfig.ShutdownOnRemove = false

	return conf
}
Exemplo n.º 2
0
// ScaleRaft sets the config to have Raft timing parameters scaled by the given
// performance multiplier. This is done in an idempotent way so it's not tricky
// to call this when composing configurations and potentially calling this
// multiple times on the same structure.
func (c *Config) ScaleRaft(raftMultRaw uint) {
	raftMult := time.Duration(raftMultRaw)

	def := raft.DefaultConfig()
	c.RaftConfig.HeartbeatTimeout = raftMult * def.HeartbeatTimeout
	c.RaftConfig.ElectionTimeout = raftMult * def.ElectionTimeout
	c.RaftConfig.LeaderLeaseTimeout = raftMult * def.LeaderLeaseTimeout
}
Exemplo n.º 3
0
// DefaultConfig returns the default configuration
func DefaultConfig() *Config {
	hostname, err := os.Hostname()
	if err != nil {
		panic(err)
	}

	c := &Config{
		Region:                 DefaultRegion,
		Datacenter:             DefaultDC,
		NodeName:               hostname,
		ProtocolVersion:        ProtocolVersionMax,
		RaftConfig:             raft.DefaultConfig(),
		RaftTimeout:            10 * time.Second,
		LogOutput:              os.Stderr,
		RPCAddr:                DefaultRPCAddr,
		SerfConfig:             serf.DefaultConfig(),
		NumSchedulers:          1,
		ReconcileInterval:      60 * time.Second,
		EvalGCInterval:         5 * time.Minute,
		EvalGCThreshold:        1 * time.Hour,
		JobGCInterval:          5 * time.Minute,
		JobGCThreshold:         4 * time.Hour,
		NodeGCInterval:         5 * time.Minute,
		NodeGCThreshold:        24 * time.Hour,
		EvalNackTimeout:        60 * time.Second,
		EvalDeliveryLimit:      3,
		MinHeartbeatTTL:        10 * time.Second,
		MaxHeartbeatsPerSecond: 50.0,
		HeartbeatGrace:         10 * time.Second,
		FailoverHeartbeatTTL:   300 * time.Second,
		ConsulConfig:           config.DefaultConsulConfig(),
		VaultConfig:            config.DefaultVaultConfig(),
		RPCHoldTimeout:         5 * time.Second,
		TLSConfig:              &config.TLSConfig{},
	}

	// Enable all known schedulers by default
	c.EnabledSchedulers = make([]string, 0, len(scheduler.BuiltinSchedulers))
	for name := range scheduler.BuiltinSchedulers {
		c.EnabledSchedulers = append(c.EnabledSchedulers, name)
	}
	c.EnabledSchedulers = append(c.EnabledSchedulers, structs.JobTypeCore)

	// Default the number of schedulers to match the coores
	c.NumSchedulers = runtime.NumCPU()

	// Increase our reap interval to 3 days instead of 24h.
	c.SerfConfig.ReconnectTimeout = 3 * 24 * time.Hour

	// Serf should use the WAN timing, since we are using it
	// to communicate between DC's
	c.SerfConfig.MemberlistConfig = memberlist.DefaultWANConfig()
	c.SerfConfig.MemberlistConfig.BindPort = DefaultSerfPort

	// Disable shutdown on removal
	c.RaftConfig.ShutdownOnRemove = false
	return c
}
Exemplo n.º 4
0
// DefaultConfig is used to return a sane default configuration
func DefaultConfig() *Config {
	hostname, err := os.Hostname()
	if err != nil {
		panic(err)
	}

	conf := &Config{
		Datacenter:              DefaultDC,
		NodeName:                hostname,
		RPCAddr:                 DefaultRPCAddr,
		RaftConfig:              raft.DefaultConfig(),
		SerfLANConfig:           serf.DefaultConfig(),
		SerfWANConfig:           serf.DefaultConfig(),
		ReconcileInterval:       60 * time.Second,
		ProtocolVersion:         ProtocolVersion2Compatible,
		ACLTTL:                  30 * time.Second,
		ACLDefaultPolicy:        "allow",
		ACLDownPolicy:           "extend-cache",
		TombstoneTTL:            15 * time.Minute,
		TombstoneTTLGranularity: 30 * time.Second,
		SessionTTLMin:           10 * time.Second,
		DisableCoordinates:      false,

		// These are tuned to provide a total throughput of 128 updates
		// per second. If you update these, you should update the client-
		// side SyncCoordinateRateTarget parameter accordingly.
		CoordinateUpdatePeriod:     5 * time.Second,
		CoordinateUpdateBatchSize:  128,
		CoordinateUpdateMaxBatches: 5,
	}

	// Increase our reap interval to 3 days instead of 24h.
	conf.SerfLANConfig.ReconnectTimeout = 3 * 24 * time.Hour
	conf.SerfWANConfig.ReconnectTimeout = 3 * 24 * time.Hour

	// WAN Serf should use the WAN timing, since we are using it
	// to communicate between DC's
	conf.SerfWANConfig.MemberlistConfig = memberlist.DefaultWANConfig()

	// Turn LAN Serf to run globally
	conf.SerfLANConfig.MemberlistConfig = memberlist.DefaultWANConfig()

	// Ensure we don't have port conflicts
	conf.SerfLANConfig.MemberlistConfig.BindPort = DefaultLANSerfPort
	conf.SerfWANConfig.MemberlistConfig.BindPort = DefaultWANSerfPort

	// Disable shutdown on removal
	conf.RaftConfig.ShutdownOnRemove = false

	// Make Raft more WAN friendly
	conf.RaftConfig.HeartbeatTimeout = 5000 * time.Millisecond
	conf.RaftConfig.ElectionTimeout = 5000 * time.Millisecond
	conf.RaftConfig.CommitTimeout = 100 * time.Millisecond
	conf.RaftConfig.LeaderLeaseTimeout = 2500 * time.Millisecond

	return conf
}
Exemplo n.º 5
0
// DefaultConfig is used to return a sane default configuration
func DefaultConfig() *Config {
	hostname, err := os.Hostname()
	if err != nil {
		panic(err)
	}

	conf := &Config{
		Datacenter:               DefaultDC,
		NodeName:                 hostname,
		RPCAddr:                  DefaultRPCAddr,
		RaftConfig:               raft.DefaultConfig(),
		SerfLANConfig:            serf.DefaultConfig(),
		SerfWANConfig:            serf.DefaultConfig(),
		ReconcileInterval:        60 * time.Second,
		ProtocolVersion:          ProtocolVersion2Compatible,
		ACLTTL:                   30 * time.Second,
		ACLDefaultPolicy:         "allow",
		ACLDownPolicy:            "extend-cache",
		ACLReplicationInterval:   30 * time.Second,
		ACLReplicationApplyLimit: 100, // ops / sec
		TombstoneTTL:             15 * time.Minute,
		TombstoneTTLGranularity:  30 * time.Second,
		SessionTTLMin:            10 * time.Second,
		DisableCoordinates:       false,

		// These are tuned to provide a total throughput of 128 updates
		// per second. If you update these, you should update the client-
		// side SyncCoordinateRateTarget parameter accordingly.
		CoordinateUpdatePeriod:     5 * time.Second,
		CoordinateUpdateBatchSize:  128,
		CoordinateUpdateMaxBatches: 5,

		// Hold an RPC for up to 5 seconds by default
		RPCHoldTimeout: 5 * time.Second,
	}

	// Increase our reap interval to 3 days instead of 24h.
	conf.SerfLANConfig.ReconnectTimeout = 3 * 24 * time.Hour
	conf.SerfWANConfig.ReconnectTimeout = 3 * 24 * time.Hour

	// WAN Serf should use the WAN timing, since we are using it
	// to communicate between DC's
	conf.SerfWANConfig.MemberlistConfig = memberlist.DefaultWANConfig()

	// Ensure we don't have port conflicts
	conf.SerfLANConfig.MemberlistConfig.BindPort = DefaultLANSerfPort
	conf.SerfWANConfig.MemberlistConfig.BindPort = DefaultWANSerfPort

	// Enable interoperability with unversioned Raft library, and don't
	// start using new ID-based features yet.
	conf.RaftConfig.ProtocolVersion = 1

	// Disable shutdown on removal
	conf.RaftConfig.ShutdownOnRemove = false

	return conf
}
Exemplo n.º 6
0
// raftConfig returns a new Raft config for the store.
func (s *Store) raftConfig() *raft.Config {
	config := raft.DefaultConfig()
	if s.SnapshotThreshold != 0 {
		config.SnapshotThreshold = s.SnapshotThreshold
	}
	if s.HeartbeatTimeout != 0 {
		config.HeartbeatTimeout = s.HeartbeatTimeout
	}
	return config
}
Exemplo n.º 7
0
func main() {
	buf, err := ioutil.ReadFile("./config.json")
	if err != nil {
		log.Fatal(err)
	}

	var v Config
	err = json.Unmarshal(buf, &v)

	dataDir := v.DataDir
	os.MkdirAll(dataDir, 0755)

	if err != nil {
		log.Fatal(err)
	}

	cfg := raft.DefaultConfig()
	// cfg.EnableSingleNode = true
	fsm := new(Word)
	fsm.words = "hahaha"

	dbStore, err := raftboltdb.NewBoltStore(path.Join(dataDir, "raft_db"))
	if err != nil {
		log.Fatal(err)
	}
	fileStore, err := raft.NewFileSnapshotStore(dataDir, 1, os.Stdout)
	if err != nil {
		log.Fatal(err)
	}
	trans, err := raft.NewTCPTransport(v.Bind, nil, 3, 5*time.Second, os.Stdout)
	if err != nil {
		log.Fatal(err)
	}
	peers := make([]string, 0, 10)

	peers = raft.AddUniquePeer(peers, "192.168.78.151:12345")
	peers = raft.AddUniquePeer(peers, "192.168.78.151:12346")
	peers = raft.AddUniquePeer(peers, "192.168.78.151:12347")

	peerStore := raft.NewJSONPeers(dataDir, trans)
	peerStore.SetPeers(peers)

	r, err := raft.NewRaft(cfg, fsm, dbStore, dbStore, fileStore, peerStore, trans)

	t := time.NewTicker(time.Duration(1) * time.Second)

	for {
		select {
		case <-t.C:
			fmt.Println(r.Leader())
		}
	}

}
Exemplo n.º 8
0
// Open opens the store. If enableSingle is set, and there are no existing peers,
// then this node becomes the first node, and therefore leader, of the cluster.
func (s *Store) Open(enableSingle bool) error {
	// Setup Raft configuration.
	config := raft.DefaultConfig()

	// Check for any existing peers.
	peers, err := readPeersJSON(filepath.Join(s.RaftDir, "peers.json"))
	if err != nil {
		return err
	}

	// Allow the node to entry single-mode, potentially electing itself, if
	// explicitly enabled and there is only 1 node in the cluster already.
	if enableSingle && len(peers) <= 1 {
		s.logger.Println("enabling single-node mode")
		config.EnableSingleNode = true
		config.DisableBootstrapAfterElect = false
	}

	// Setup Raft communication.
	addr, err := net.ResolveTCPAddr("tcp", s.RaftBind)
	if err != nil {
		return err
	}
	transport, err := raft.NewTCPTransport(s.RaftBind, addr, 3, 10*time.Second, os.Stderr)
	if err != nil {
		return err
	}

	// Create peer storage.
	peerStore := raft.NewJSONPeers(s.RaftDir, transport)

	// Create the snapshot store. This allows the Raft to truncate the log.
	snapshots, err := raft.NewFileSnapshotStore(s.RaftDir, retainSnapshotCount, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create the log store and stable store.
	logStore, err := raftboltdb.NewBoltStore(filepath.Join(s.RaftDir, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}

	// Instantiate the Raft systems.
	ra, err := raft.NewRaft(config, (*fsm)(s), logStore, logStore, snapshots, peerStore, transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	s.raft = ra
	return nil
}
Exemplo n.º 9
0
func TestAgent_CheckPerformanceSettings(t *testing.T) {
	// Try a default config.
	{
		c := nextConfig()
		c.ConsulConfig = nil
		dir, agent := makeAgent(t, c)
		defer os.RemoveAll(dir)
		defer agent.Shutdown()

		raftMult := time.Duration(consul.DefaultRaftMultiplier)
		r := agent.consulConfig().RaftConfig
		def := raft.DefaultConfig()
		if r.HeartbeatTimeout != raftMult*def.HeartbeatTimeout ||
			r.ElectionTimeout != raftMult*def.ElectionTimeout ||
			r.LeaderLeaseTimeout != raftMult*def.LeaderLeaseTimeout {
			t.Fatalf("bad: %#v", *r)
		}
	}

	// Try a multiplier.
	{
		c := nextConfig()
		c.Performance.RaftMultiplier = 99
		dir, agent := makeAgent(t, c)
		defer os.RemoveAll(dir)
		defer agent.Shutdown()

		const raftMult time.Duration = 99
		r := agent.consulConfig().RaftConfig
		def := raft.DefaultConfig()
		if r.HeartbeatTimeout != raftMult*def.HeartbeatTimeout ||
			r.ElectionTimeout != raftMult*def.ElectionTimeout ||
			r.LeaderLeaseTimeout != raftMult*def.LeaderLeaseTimeout {
			t.Fatalf("bad: %#v", *r)
		}
	}
}
Exemplo n.º 10
0
func (r *localRaft) openRaft() error {
	s := r.store
	// Setup raft configuration.
	config := raft.DefaultConfig()
	config.Logger = s.Logger
	config.HeartbeatTimeout = s.HeartbeatTimeout
	config.ElectionTimeout = s.ElectionTimeout
	config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
	config.CommitTimeout = s.CommitTimeout

	// If no peers are set in the config then start as a single server.
	config.EnableSingleNode = (len(s.peers) == 0)

	// Ensure our addr is in the peer list
	if config.EnableSingleNode {
		s.peers = append(s.peers, s.Addr.String())
	}

	// Build raft layer to multiplex listener.
	r.raftLayer = newRaftLayer(s.RaftListener, s.Addr)

	// Create a transport layer
	r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, os.Stderr)

	// Create peer storage.
	r.peerStore = raft.NewJSONPeers(s.path, r.transport)

	// Create the log store and stable store.
	store, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}
	r.raftStore = store

	// Create the snapshot store.
	snapshots, err := raft.NewFileSnapshotStore(s.path, raftSnapshotsRetained, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create raft log.
	ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	r.raft = ra

	return nil
}
Exemplo n.º 11
0
// makeRaft returns a Raft and its FSM, with snapshots based in the given dir.
func makeRaft(t *testing.T, dir string) (*raft.Raft, *MockFSM) {
	snaps, err := raft.NewFileSnapshotStore(dir, 5, nil)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	fsm := &MockFSM{}
	store := raft.NewInmemStore()
	addr, trans := raft.NewInmemTransport("")

	config := raft.DefaultConfig()
	config.LocalID = raft.ServerID(fmt.Sprintf("server-%s", addr))

	var members raft.Configuration
	members.Servers = append(members.Servers, raft.Server{
		Suffrage: raft.Voter,
		ID:       config.LocalID,
		Address:  addr,
	})

	err = raft.BootstrapCluster(config, store, store, snaps, trans, members)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	raft, err := raft.NewRaft(config, fsm, store, store, snaps, trans)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	timeout := time.After(10 * time.Second)
	for {
		if raft.Leader() != "" {
			break
		}

		select {
		case <-raft.LeaderCh():
		case <-time.After(1 * time.Second):
			// Need to poll because we might have missed the first
			// go with the leader channel.
		case <-timeout:
			t.Fatalf("timed out waiting for leader")
		}
	}

	return raft, fsm
}
Exemplo n.º 12
0
func NewRaft(cfg *config.Raft, fsm raft.FSM, trans raft.Transport) (*raft.Raft, error) {
	raftLogDir := filepath.Join(cfg.DataDir, "log")
	raftMetaDir := filepath.Join(cfg.DataDir, "meta")

	logStore, err := raftleveldb.NewStore(raftLogDir)
	if err != nil {
		return nil, err
	}

	metaStore, err := raftleveldb.NewStore(raftMetaDir)
	if err != nil {
		return nil, err
	}

	snapshotStore, err := raft.NewFileSnapshotStore(cfg.DataDir, 3, os.Stderr)
	if err != nil {
		return nil, err
	}

	peerStore := raft.NewJSONPeers(cfg.DataDir, trans)

	raftConfig := raft.DefaultConfig()
	raftConfig.SnapshotInterval = time.Duration(cfg.SnapshotInterval)
	raftConfig.SnapshotThreshold = cfg.SnapshotThreshold
	raftConfig.EnableSingleNode = cfg.EnableSingleNode

	err = raft.ValidateConfig(raftConfig)
	if err != nil {
		return nil, err
	}
	return raft.NewRaft(
		raftConfig,
		fsm,
		logStore,
		metaStore,
		snapshotStore,
		peerStore,
		trans,
	)
}
Exemplo n.º 13
0
func (r *localRaft) open() error {
	s := r.store
	// Setup raft configuration.
	config := raft.DefaultConfig()
	config.LogOutput = ioutil.Discard

	if s.clusterTracingEnabled {
		config.Logger = s.Logger
	}
	config.HeartbeatTimeout = s.HeartbeatTimeout
	config.ElectionTimeout = s.ElectionTimeout
	config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
	config.CommitTimeout = s.CommitTimeout

	// If no peers are set in the config or there is one and we are it, then start as a single server.
	if len(s.peers) <= 1 {
		config.EnableSingleNode = true
		// Ensure we can always become the leader
		config.DisableBootstrapAfterElect = false
		// Don't shutdown raft automatically if we renamed our hostname back to a previous name
		config.ShutdownOnRemove = false
	}

	// Build raft layer to multiplex listener.
	r.raftLayer = newRaftLayer(s.RaftListener, s.RemoteAddr)

	// Create a transport layer
	r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, config.LogOutput)

	// Create peer storage.
	r.peerStore = raft.NewJSONPeers(s.path, r.transport)

	peers, err := r.peerStore.Peers()
	if err != nil {
		return err
	}

	// Make sure our address is in the raft peers or we won't be able to boot into the cluster
	if len(peers) > 0 && !raft.PeerContained(peers, s.RemoteAddr.String()) {
		s.Logger.Printf("%v is not in the list of raft peers. Please update %v/peers.json on all raft nodes to have the same contents.", s.RemoteAddr.String(), s.Path())
		return fmt.Errorf("peers out of sync: %v not in %v", s.RemoteAddr.String(), peers)
	}

	// Create the log store and stable store.
	store, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}
	r.raftStore = store

	// Create the snapshot store.
	snapshots, err := raft.NewFileSnapshotStore(s.path, raftSnapshotsRetained, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create raft log.
	ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	r.raft = ra

	return nil
}
func main() {
	sstore, err := raftboltdb.NewBoltStore("/tmp/stablestore")
	if err != nil {
		fmt.Printf("%v", err)
		os.Exit(1)
	}

	logstore, err := raftboltdb.NewBoltStore("/tmp/logstore")
	if err != nil {
		fmt.Printf("Failed to create logstore")
		os.Exit(1)
	}

	snaps, err := raft.NewFileSnapshotStoreWithLogger("/tmp/snapshots", 3, nil)
	errorOnExit(err)
	transport, err := raft.NewTCPTransport("127.0.0.1:7000", nil, 10, 10*time.Second, nil)
	errorOnExit(err)
	peerstore := raft.NewJSONPeers("/tmp/peers", transport)
	conf := raft.DefaultConfig()
	conf.EnableSingleNode = true
	conf.SnapshotThreshold = 40
	conf.SnapshotInterval = 10 * time.Second
	fsm := NewMyFsm()
	raftmod, err := raft.NewRaft(conf, fsm, logstore, sstore,
		snaps, peerstore, transport)
	time.Sleep(2 * time.Second)
	fmt.Printf("Leader is %v\n", raftmod.Leader())
	future := raftmod.Apply([]byte("hello:value"), 0)
	raftFutureErrorCheck(future)
	i := 0
	for ; i < 100; i++ {
		time.Sleep(2 * time.Millisecond)
		future := raftmod.Apply([]byte(fmt.Sprintf("key%d:value%d", i, i)), 0)
		raftFutureErrorCheck(future)
	}
	fmt.Printf("Do some fun\n")

	reader := bufio.NewReader(os.Stdin)
	for {
		fmt.Printf("Enter 1 to put, 2 to get, 3 to quit: ")
		text, _ := reader.ReadString('\n')
		text = strings.Trim(text, "\n")
		if text == "3" {
			os.Exit(0)
		} else if text == "1" {
			fmt.Printf("Key: ")
			key, _ := reader.ReadString('\n')
			key = strings.Trim(key, "\n\b \t\b")
			if key == "" {
				fmt.Printf("Empty key, continuing")
				continue
			}
			fmt.Printf("Value: ")
			value, _ := reader.ReadString('\n')
			value = strings.Trim(value, "\n")
			raftmod.Apply([]byte(fmt.Sprintf("%s:%s", key, value)), 0)
		} else if text == "2" {
			fmt.Printf("Key: ")
			key, _ := reader.ReadString('\n')
			key = strings.Trim(key, "\n\b \t\b")
			if key == "" {
				fmt.Printf("Empty key, continuing")
				continue
			}
			val, err := fsm.Get(fmt.Sprintf(key))
			if err != nil {
				fmt.Printf("Failed to get %s\n", key)
			} else {
				fmt.Printf("The value for key:%s is %s\n", key, val)
			}
		}
	}

}
Exemplo n.º 15
0
// DefaultConfig is used to return a sane default configuration
func DefaultConfig() *Config {
	hostname, err := os.Hostname()
	if err != nil {
		panic(err)
	}

	conf := &Config{
		Datacenter:               DefaultDC,
		NodeName:                 hostname,
		RPCAddr:                  DefaultRPCAddr,
		RaftConfig:               raft.DefaultConfig(),
		SerfLANConfig:            serf.DefaultConfig(),
		SerfWANConfig:            serf.DefaultConfig(),
		ReconcileInterval:        60 * time.Second,
		ProtocolVersion:          ProtocolVersion2Compatible,
		ACLTTL:                   30 * time.Second,
		ACLDefaultPolicy:         "allow",
		ACLDownPolicy:            "extend-cache",
		ACLReplicationInterval:   30 * time.Second,
		ACLReplicationApplyLimit: 100, // ops / sec
		TombstoneTTL:             15 * time.Minute,
		TombstoneTTLGranularity:  30 * time.Second,
		SessionTTLMin:            10 * time.Second,
		DisableCoordinates:       false,

		// These are tuned to provide a total throughput of 128 updates
		// per second. If you update these, you should update the client-
		// side SyncCoordinateRateTarget parameter accordingly.
		CoordinateUpdatePeriod:     5 * time.Second,
		CoordinateUpdateBatchSize:  128,
		CoordinateUpdateMaxBatches: 5,

		// This holds RPCs during leader elections. For the default Raft
		// config the election timeout is 5 seconds, so we set this a
		// bit longer to try to cover that period. This should be more
		// than enough when running in the high performance mode.
		RPCHoldTimeout: 7 * time.Second,
	}

	// Increase our reap interval to 3 days instead of 24h.
	conf.SerfLANConfig.ReconnectTimeout = 3 * 24 * time.Hour
	conf.SerfWANConfig.ReconnectTimeout = 3 * 24 * time.Hour

	// WAN Serf should use the WAN timing, since we are using it
	// to communicate between DC's
	conf.SerfWANConfig.MemberlistConfig = memberlist.DefaultWANConfig()

	// Ensure we don't have port conflicts
	conf.SerfLANConfig.MemberlistConfig.BindPort = DefaultLANSerfPort
	conf.SerfWANConfig.MemberlistConfig.BindPort = DefaultWANSerfPort

	// Enable interoperability with unversioned Raft library, and don't
	// start using new ID-based features yet.
	conf.RaftConfig.ProtocolVersion = 1
	conf.ScaleRaft(DefaultRaftMultiplier)

	// Disable shutdown on removal
	conf.RaftConfig.ShutdownOnRemove = false

	// Check every 5 seconds to see if there are enough new entries for a snapshot
	conf.RaftConfig.SnapshotInterval = 5 * time.Second

	return conf
}
Exemplo n.º 16
0
func (r *localRaft) open() error {
	r.closing = make(chan struct{})

	s := r.store
	// Setup raft configuration.
	config := raft.DefaultConfig()
	config.LogOutput = ioutil.Discard

	if s.clusterTracingEnabled {
		config.Logger = s.Logger
	}
	config.HeartbeatTimeout = s.HeartbeatTimeout
	config.ElectionTimeout = s.ElectionTimeout
	config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
	config.CommitTimeout = s.CommitTimeout
	// Since we actually never call `removePeer` this is safe.
	// If in the future we decide to call remove peer we have to re-evaluate how to handle this
	config.ShutdownOnRemove = false

	// If no peers are set in the config or there is one and we are it, then start as a single server.
	if len(s.peers) <= 1 {
		config.EnableSingleNode = true
		// Ensure we can always become the leader
		config.DisableBootstrapAfterElect = false
	}

	// Build raft layer to multiplex listener.
	r.raftLayer = newRaftLayer(s.RaftListener, s.RemoteAddr)

	// Create a transport layer
	r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, config.LogOutput)

	// Create peer storage.
	r.peerStore = raft.NewJSONPeers(s.path, r.transport)

	peers, err := r.peerStore.Peers()
	if err != nil {
		return err
	}

	// For single-node clusters, we can update the raft peers before we start the cluster if the hostname
	// has changed.
	if config.EnableSingleNode {
		if err := r.peerStore.SetPeers([]string{s.RemoteAddr.String()}); err != nil {
			return err
		}
		peers = []string{s.RemoteAddr.String()}
	}

	// If we have multiple nodes in the cluster, make sure our address is in the raft peers or
	// we won't be able to boot into the cluster because the other peers will reject our new hostname.  This
	// is difficult to resolve automatically because we need to have all the raft peers agree on the current members
	// of the cluster before we can change them.
	if len(peers) > 0 && !raft.PeerContained(peers, s.RemoteAddr.String()) {
		s.Logger.Printf("%s is not in the list of raft peers. Please update %v/peers.json on all raft nodes to have the same contents.", s.RemoteAddr.String(), s.Path())
		return fmt.Errorf("peers out of sync: %v not in %v", s.RemoteAddr.String(), peers)
	}

	// Create the log store and stable store.
	store, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}
	r.raftStore = store

	// Create the snapshot store.
	snapshots, err := raft.NewFileSnapshotStore(s.path, raftSnapshotsRetained, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create raft log.
	ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	r.raft = ra

	r.wg.Add(1)
	go r.logLeaderChanges()

	return nil
}
Exemplo n.º 17
0
func newRaft(a *App) (Cluster, error) {
	r := new(Raft)

	if len(a.config.Raft.Addr) == 0 {
		return nil, nil
	}

	peers := make([]string, 0, len(a.config.Raft.Cluster))

	r.raftAddr = a.config.Raft.Addr

	addr, err := net.ResolveTCPAddr("tcp", r.raftAddr)
	if err != nil {
		return nil, fmt.Errorf("invalid raft addr format %s, must host:port, err:%v", r.raftAddr, err)
	}

	peers = raft.AddUniquePeer(peers, addr.String())

	for _, cluster := range a.config.Raft.Cluster {
		addr, err = net.ResolveTCPAddr("tcp", cluster)
		if err != nil {
			return nil, fmt.Errorf("invalid cluster format %s, must host:port, err:%v", cluster, err)
		}

		peers = raft.AddUniquePeer(peers, addr.String())
	}

	os.MkdirAll(a.config.Raft.DataDir, 0755)

	cfg := raft.DefaultConfig()

	if len(a.config.Raft.LogDir) == 0 {
		r.log = os.Stdout
	} else {
		os.MkdirAll(a.config.Raft.LogDir, 0755)
		logFile := path.Join(a.config.Raft.LogDir, "raft.log")
		f, err := os.OpenFile(logFile, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0644)
		if err != nil {
			return nil, err
		}
		r.log = f

		cfg.LogOutput = r.log
	}

	raftDBPath := path.Join(a.config.Raft.DataDir, "raft_db")
	r.dbStore, err = raftboltdb.NewBoltStore(raftDBPath)
	if err != nil {
		return nil, err
	}

	fileStore, err := raft.NewFileSnapshotStore(a.config.Raft.DataDir, 1, r.log)
	if err != nil {
		return nil, err
	}

	r.trans, err = raft.NewTCPTransport(r.raftAddr, nil, 3, 5*time.Second, r.log)
	if err != nil {
		return nil, err
	}

	r.peerStore = raft.NewJSONPeers(a.config.Raft.DataDir, r.trans)

	if a.config.Raft.ClusterState == ClusterStateNew {
		log.Printf("[INFO] cluster state is new, use new cluster config")
		r.peerStore.SetPeers(peers)
	} else {
		log.Printf("[INFO] cluster state is existing, use previous + new cluster config")
		ps, err := r.peerStore.Peers()
		if err != nil {
			log.Printf("[INFO] get store peers error %v", err)
			return nil, err
		}

		for _, peer := range peers {
			ps = raft.AddUniquePeer(ps, peer)
		}

		r.peerStore.SetPeers(ps)
	}

	if peers, _ := r.peerStore.Peers(); len(peers) <= 1 {
		cfg.EnableSingleNode = true
		log.Println("[INFO] raft will run in single node mode, may only be used in test")
	}

	r.r, err = raft.NewRaft(cfg, a.fsm, r.dbStore, r.dbStore, fileStore, r.peerStore, r.trans)

	return r, err
}
Exemplo n.º 18
0
// NewRaft creates a new Raft instance. raft data is stored under the raft dir in prefix.
func NewRaft(c RaftConfig, prefix string, logDir string) (r *Raft, err error) {
	r = new(Raft)

	config := raft.DefaultConfig()
	config.EnableSingleNode = c.Single

	var logOutput *os.File
	if logDir != "\n" {
		logFile := path.Join(logDir, "raft.log")
		logOutput, err = os.OpenFile(logFile, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666)
		if err != nil {
			Fatal("Could not open raft log file: ", err)
		}

		config.LogOutput = logOutput
	}

	raftDir := path.Join(prefix, "raft")
	err = os.MkdirAll(raftDir, 0755)
	if err != nil {
		Fatal("Could not create raft storage dir: ", err)
	}

	fss, err := raft.NewFileSnapshotStore(raftDir, 1, nil)
	if err != nil {
		Error("Could not initialize raft snapshot store: ", err)
		return
	}

	// this should be our externally visible address. If not provided in the
	// config as 'advertise', we use the address of the listen config.
	if c.Advertise == nil {
		c.Advertise = &c.Listen
	}

	a, err := net.ResolveTCPAddr("tcp", *c.Advertise)
	if err != nil {
		Error("Could not lookup raft advertise address: ", err)
		return
	}

	r.transport, err = raft.NewTCPTransport(c.Listen, a, 3, 10*time.Second, nil)
	if err != nil {
		Error("Could not create raft transport: ", err)
		return
	}

	peerStore := raft.NewJSONPeers(raftDir, r.transport)

	if !c.Single {
		var peers []net.Addr
		peers, err = peerStore.Peers()
		if err != nil {
			return
		}

		for _, peerStr := range c.Peers {
			peer, err := net.ResolveTCPAddr("tcp", peerStr)
			if err != nil {
				Fatal("Bad peer:", err)
			}

			if !raft.PeerContained(peers, peer) {
				peerStore.SetPeers(raft.AddUniquePeer(peers, peer))
			}
		}
	} else {
		Warn("Running in single node permitted mode. Only use this for testing!")
	}

	r.mdb, err = raftmdb.NewMDBStore(raftDir)
	if err != nil {
		Error("Could not create raft store:", err)
		return
	}

	storage, err := NewStorage()
	if err != nil {
		Error("Could not create storage:", err)
		return
	}
	r.fsm = &FSM{storage}

	r.raft, err = raft.NewRaft(config, r.fsm, r.mdb, r.mdb, fss, peerStore, r.transport)
	if err != nil {
		Error("Could not initialize raft: ", err)
		return
	}

	return
}
Exemplo n.º 19
0
// Open starts the raft consensus and opens the store.
func (s *Store) Open() error {
	s.mu.Lock()
	defer s.mu.Unlock()

	// Set up logging.
	s.logger = log.New(s.LogOutput, "[discoverd] ", log.LstdFlags)

	// Require listener & advertise address.
	if s.Listener == nil {
		return ErrListenerRequired
	} else if s.Advertise == nil {
		return ErrAdvertiseRequired
	}

	// Create root directory.
	if err := os.MkdirAll(s.path, 0777); err != nil {
		return err
	}

	// Create raft configuration.
	config := raft.DefaultConfig()
	config.HeartbeatTimeout = s.HeartbeatTimeout
	config.ElectionTimeout = s.ElectionTimeout
	config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
	config.CommitTimeout = s.CommitTimeout
	config.LogOutput = s.LogOutput
	config.EnableSingleNode = s.EnableSingleNode
	config.ShutdownOnRemove = false

	// Create multiplexing transport layer.
	raftLayer := newRaftLayer(s.Listener, s.Advertise)

	// Begin listening to TCP port.
	s.transport = raft.NewNetworkTransport(raftLayer, 3, 10*time.Second, os.Stderr)

	// Setup storage layers.
	s.peerStore = raft.NewJSONPeers(s.path, s.transport)
	stableStore, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
	if err != nil {
		return fmt.Errorf("stable store: %s", err)
	}
	s.stableStore = stableStore

	// Wrap the store in a LogCache to improve performance
	cacheStore, err := raft.NewLogCache(512, stableStore)
	if err != nil {
		stableStore.Close()
		return fmt.Errorf("log cache: %s", err)
	}

	// Create the snapshot store.
	ss, err := raft.NewFileSnapshotStore(s.path, 2, os.Stderr)
	if err != nil {
		return fmt.Errorf("snapshot store: %s", err)
	}

	// Create raft log.
	//
	// The mutex must be unlocked as initializing the raft store may
	// call back into methods which acquire the lock (e.g. Restore)
	s.mu.Unlock()
	r, err := raft.NewRaft(config, s, cacheStore, stableStore, ss, s.peerStore, s.transport)
	s.mu.Lock()
	if err != nil {
		return fmt.Errorf("raft: %s", err)
	}

	// make sure the store was not closed whilst the mutex was unlocked
	select {
	case <-s.closing:
		return ErrShutdown
	default:
	}

	s.raft = r

	// Start goroutine to monitor leadership changes.
	s.wg.Add(1)
	go s.monitorLeaderCh()

	// Start goroutine to check for instance expiry.
	s.wg.Add(1)
	go s.expirer()

	return nil
}
Exemplo n.º 20
0
func main() {
	flag.Usage = func() {
		// It is unfortunate that we need to re-implement flag.PrintDefaults(),
		// but I cannot see any other way to achieve the grouping of flags.
		fmt.Fprintf(os.Stderr, "RobustIRC server (= node)\n")
		fmt.Fprintf(os.Stderr, "\n")
		fmt.Fprintf(os.Stderr, "The following flags are REQUIRED:\n")
		printDefault(flag.Lookup("network_name"))
		printDefault(flag.Lookup("network_password"))
		printDefault(flag.Lookup("peer_addr"))
		printDefault(flag.Lookup("tls_cert_path"))
		printDefault(flag.Lookup("tls_key_path"))
		fmt.Fprintf(os.Stderr, "\n")
		fmt.Fprintf(os.Stderr, "The following flags are only relevant when bootstrapping the network (once):\n")
		printDefault(flag.Lookup("join"))
		printDefault(flag.Lookup("singlenode"))
		fmt.Fprintf(os.Stderr, "\n")
		fmt.Fprintf(os.Stderr, "The following flags are optional:\n")
		printDefault(flag.Lookup("dump_canary_state"))
		printDefault(flag.Lookup("dump_heap_profile"))
		printDefault(flag.Lookup("canary_compaction_start"))
		printDefault(flag.Lookup("listen"))
		printDefault(flag.Lookup("raftdir"))
		printDefault(flag.Lookup("tls_ca_file"))
		printDefault(flag.Lookup("version"))
		fmt.Fprintf(os.Stderr, "\n")
		fmt.Fprintf(os.Stderr, "The following flags are optional and provided by glog:\n")
		printDefault(flag.Lookup("alsologtostderr"))
		printDefault(flag.Lookup("log_backtrace_at"))
		printDefault(flag.Lookup("log_dir"))
		printDefault(flag.Lookup("log_total_bytes"))
		printDefault(flag.Lookup("logtostderr"))
		printDefault(flag.Lookup("stderrthreshold"))
		printDefault(flag.Lookup("v"))
		printDefault(flag.Lookup("vmodule"))
	}
	flag.Parse()

	// Store logs in -raftdir, unless otherwise specified.
	if flag.Lookup("log_dir").Value.String() == "" {
		flag.Set("log_dir", *raftDir)
	}

	defer glog.Flush()
	glog.MaxSize = 64 * 1024 * 1024
	glog.CopyStandardLogTo("INFO")

	log.Printf("RobustIRC %s\n", Version)
	if *version {
		return
	}

	if _, err := os.Stat(filepath.Join(*raftDir, "deletestate")); err == nil {
		if err := os.RemoveAll(*raftDir); err != nil {
			log.Fatal(err)
		}
		if err := os.Mkdir(*raftDir, 0700); err != nil {
			log.Fatal(err)
		}
		log.Printf("Deleted %q because %q existed\n", *raftDir, filepath.Join(*raftDir, "deletestate"))
	}

	if err := outputstream.DeleteOldDatabases(*raftDir); err != nil {
		log.Fatalf("Could not delete old outputstream databases: %v\n", err)
	}

	if err := deleteOldCompactionDatabases(*raftDir); err != nil {
		glog.Errorf("Could not delete old compaction databases: %v (ignoring)\n", err)
	}

	log.Printf("Initializing RobustIRC…\n")

	if *networkPassword == "" {
		*networkPassword = os.Getenv("ROBUSTIRC_NETWORK_PASSWORD")
	}
	if *networkPassword == "" {
		log.Fatalf("-network_password not set. You MUST protect your network.\n")
	}
	digest := sha1.New()
	digest.Write([]byte(*networkPassword))
	passwordHash := "{SHA}" + base64.StdEncoding.EncodeToString(digest.Sum(nil))

	if *network == "" {
		log.Fatalf("-network_name not set, but required.\n")
	}

	if *peerAddr == "" {
		log.Printf("-peer_addr not set, initializing to %q. Make sure %q is a host:port string that other raft nodes can connect to!\n", *listen, *listen)
		*peerAddr = *listen
	}

	ircServer = ircserver.NewIRCServer(*raftDir, *network, time.Now())

	transport := rafthttp.NewHTTPTransport(
		*peerAddr,
		// Not deadlined, otherwise snapshot installments fail.
		robusthttp.Client(*networkPassword, false),
		nil,
		"")

	peerStore = raft.NewJSONPeers(*raftDir, transport)

	if *join == "" && !*singleNode {
		peers, err := peerStore.Peers()
		if err != nil {
			log.Fatal(err.Error())
		}
		if len(peers) == 0 {
			if !*timesafeguard.DisableTimesafeguard {
				log.Fatalf("No peers known and -join not specified. Joining the network is not safe because timesafeguard cannot be called.\n")
			}
		} else {
			if len(peers) == 1 && peers[0] == *peerAddr {
				// To prevent crashlooping too frequently in case the init system directly restarts our process.
				time.Sleep(10 * time.Second)
				log.Fatalf("Only known peer is myself (%q), implying this node was removed from the network. Please kill the process and remove the data.\n", *peerAddr)
			}
			if err := timesafeguard.SynchronizedWithNetwork(*peerAddr, peers, *networkPassword); err != nil {
				log.Fatal(err.Error())
			}
		}
	}

	var p []string

	config := raft.DefaultConfig()
	config.Logger = log.New(glog.LogBridgeFor("INFO"), "", log.Lshortfile)
	if *singleNode {
		config.EnableSingleNode = true
	}

	// Keep 5 snapshots in *raftDir/snapshots, log to stderr.
	fss, err := raft.NewFileSnapshotStore(*raftDir, 5, nil)
	if err != nil {
		log.Fatal(err)
	}

	// How often to check whether a snapshot should be taken. The check is
	// cheap, and the default value far too high for networks with a high
	// number of messages/s.
	// At the same time, it is important that we don’t check too early,
	// otherwise recovering from the most recent snapshot doesn’t work because
	// after recovering, a new snapshot (over the 0 committed messages) will be
	// taken immediately, effectively overwriting the result of the snapshot
	// recovery.
	config.SnapshotInterval = 300 * time.Second

	// Batch as many messages as possible into a single appendEntries RPC.
	// There is no downside to setting this too high.
	config.MaxAppendEntries = 1024

	// It could be that the heartbeat goroutine is not scheduled for a while,
	// so relax the default of 500ms.
	config.LeaderLeaseTimeout = timesafeguard.ElectionTimeout
	config.HeartbeatTimeout = timesafeguard.ElectionTimeout
	config.ElectionTimeout = timesafeguard.ElectionTimeout

	// We use prometheus, so hook up the metrics package (used by raft) to
	// prometheus as well.
	sink, err := metrics_prometheus.NewPrometheusSink()
	if err != nil {
		log.Fatal(err)
	}
	metrics.NewGlobal(metrics.DefaultConfig("raftmetrics"), sink)

	bootstrapping := *singleNode || *join != ""
	logStore, err := raft_store.NewLevelDBStore(filepath.Join(*raftDir, "raftlog"), bootstrapping)
	if err != nil {
		log.Fatal(err)
	}
	ircStore, err = raft_store.NewLevelDBStore(filepath.Join(*raftDir, "irclog"), bootstrapping)
	if err != nil {
		log.Fatal(err)
	}
	fsm := &FSM{
		store:             logStore,
		ircstore:          ircStore,
		lastSnapshotState: make(map[uint64][]byte),
	}
	logcache, err := raft.NewLogCache(config.MaxAppendEntries, logStore)
	if err != nil {
		log.Fatal(err)
	}

	node, err = raft.NewRaft(config, fsm, logcache, logStore, fss, peerStore, transport)
	if err != nil {
		log.Fatal(err)
	}

	if *dumpCanaryState != "" {
		canary(fsm, *dumpCanaryState)
		if *dumpHeapProfile != "" {
			debug.FreeOSMemory()
			f, err := os.Create(*dumpHeapProfile)
			if err != nil {
				log.Fatal(err)
			}
			defer f.Close()
			pprof.WriteHeapProfile(f)
		}
		return
	}

	go func() {
		for {
			secondsInState.WithLabelValues(node.State().String()).Inc()
			time.Sleep(1 * time.Second)
		}
	}()

	privaterouter := httprouter.New()
	privaterouter.Handler("GET", "/", exitOnRecoverHandleFunc(handleStatus))
	privaterouter.Handler("GET", "/irclog", exitOnRecoverHandleFunc(handleIrclog))
	privaterouter.Handler("POST", "/raft/*rest", exitOnRecoverHandler(transport))
	privaterouter.Handler("POST", "/join", exitOnRecoverHandleFunc(handleJoin))
	privaterouter.Handler("POST", "/part", exitOnRecoverHandleFunc(handlePart))
	privaterouter.Handler("GET", "/snapshot", exitOnRecoverHandleFunc(handleSnapshot))
	privaterouter.Handler("GET", "/leader", exitOnRecoverHandleFunc(handleLeader))
	privaterouter.Handler("POST", "/quit", exitOnRecoverHandleFunc(handleQuit))
	privaterouter.Handler("GET", "/config", exitOnRecoverHandleFunc(handleGetConfig))
	privaterouter.Handler("POST", "/config", exitOnRecoverHandleFunc(handlePostConfig))
	privaterouter.Handler("GET", "/metrics", exitOnRecoverHandler(prometheus.Handler()))

	publicrouter := httprouter.New()
	publicrouter.Handle("POST", "/robustirc/v1/:sessionid", exitOnRecoverHandle(handleCreateSession))
	publicrouter.Handle("POST", "/robustirc/v1/:sessionid/message", exitOnRecoverHandle(handlePostMessage))
	publicrouter.Handle("GET", "/robustirc/v1/:sessionid/messages", exitOnRecoverHandle(handleGetMessages))
	publicrouter.Handle("DELETE", "/robustirc/v1/:sessionid", exitOnRecoverHandle(handleDeleteSession))

	a := auth.NewBasicAuthenticator("robustirc", func(user, realm string) string {
		if user == "robustirc" {
			return passwordHash
		}
		return ""
	})

	http.Handle("/robustirc/", publicrouter)

	http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		if username := a.CheckAuth(r); username == "" {
			a.RequireAuth(w, r)
		} else {
			privaterouter.ServeHTTP(w, r)
		}
	}))

	srv := http.Server{Addr: *listen}
	if err := http2.ConfigureServer(&srv, nil); err != nil {
		log.Fatal(err)
	}

	// Manually create the net.TCPListener so that joinMaster() does not run
	// into connection refused errors (the master will try to contact the
	// node before acknowledging the join).
	srv.TLSConfig.Certificates = make([]tls.Certificate, 1)
	srv.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(*tlsCertPath, *tlsKeyPath)
	if err != nil {
		log.Fatal(err)
	}

	ln, err := net.Listen("tcp", *listen)
	if err != nil {
		log.Fatal(err)
	}

	tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)
	go srv.Serve(tlsListener)

	log.Printf("RobustIRC listening on %q. For status, see %s\n",
		*peerAddr,
		fmt.Sprintf("https://robustirc:%[email protected]%s/", *networkPassword, *peerAddr))

	if *join != "" {
		if err := timesafeguard.SynchronizedWithMasterAndNetwork(*peerAddr, *join, *networkPassword); err != nil {
			log.Fatal(err.Error())
		}

		p = joinMaster(*join, peerStore)
		// TODO(secure): properly handle joins on the server-side where the joining node is already in the network.
	}

	if len(p) > 0 {
		node.SetPeers(p)
	}

	expireSessionsTimer := time.After(expireSessionsInterval)
	secondTicker := time.Tick(1 * time.Second)
	for {
		select {
		case <-secondTicker:
			if node.State() == raft.Shutdown {
				log.Fatal("Node removed from the network (in raft state shutdown), terminating.")
			}
		case <-expireSessionsTimer:
			expireSessionsTimer = time.After(expireSessionsInterval)

			// Race conditions (a node becoming a leader or ceasing to be the
			// leader shortly before/after this runs) are okay, since the timer
			// is triggered often enough on every node so that it will
			// eventually run on the leader.
			if node.State() != raft.Leader {
				continue
			}

			applyMu.Lock()
			for _, msg := range ircServer.ExpireSessions() {
				// Cannot fail, no user input.
				msgbytes, _ := json.Marshal(msg)
				f := node.Apply(msgbytes, 10*time.Second)
				if err := f.Error(); err != nil {
					log.Printf("Apply(): %v\n", err)
					break
				}
			}
			applyMu.Unlock()
		}
	}
}
Exemplo n.º 21
0
// Open opens the store. If enableSingle is set, and there are no existing peers,
// then this node becomes the first node, and therefore leader, of the cluster.
func (s *Store) Open(enableSingle bool) error {
	if err := os.MkdirAll(s.raftDir, 0755); err != nil {
		return err
	}

	// Create the database. Unless it's a memory-based database, it must be deleted
	var db *sql.DB
	var err error
	if !s.dbConf.Memory {
		// as it will be rebuilt from (possibly) a snapshot and committed log entries.
		if err := os.Remove(s.dbPath); err != nil && !os.IsNotExist(err) {
			return err
		}
		db, err = sql.OpenWithDSN(s.dbPath, s.dbConf.DSN)
		if err != nil {
			return err
		}
		s.logger.Println("SQLite database opened at", s.dbPath)
	} else {
		db, err = sql.OpenInMemoryWithDSN(s.dbConf.DSN)
		if err != nil {
			return err
		}
		s.logger.Println("SQLite in-memory database opened")
	}
	s.db = db

	// Setup Raft configuration.
	config := raft.DefaultConfig()

	// Check for any existing peers.
	peers, err := readPeersJSON(filepath.Join(s.raftDir, "peers.json"))
	if err != nil {
		return err
	}
	s.joinRequired = len(peers) <= 1

	// Allow the node to entry single-mode, potentially electing itself, if
	// explicitly enabled and there is only 1 node in the cluster already.
	if enableSingle && len(peers) <= 1 {
		s.logger.Println("enabling single-node mode")
		config.EnableSingleNode = true
		config.DisableBootstrapAfterElect = false
	}

	// Setup Raft communication.
	transport := raft.NewNetworkTransport(s.raftTransport, 3, 10*time.Second, os.Stderr)

	// Create peer storage.
	peerStore := raft.NewJSONPeers(s.raftDir, transport)

	// Create the snapshot store. This allows Raft to truncate the log.
	snapshots, err := raft.NewFileSnapshotStore(s.raftDir, retainSnapshotCount, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create the log store and stable store.
	logStore, err := raftboltdb.NewBoltStore(filepath.Join(s.raftDir, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}

	// Instantiate the Raft system.
	ra, err := raft.NewRaft(config, s, logStore, logStore, snapshots, peerStore, transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	s.raft = ra

	return nil
}
Exemplo n.º 22
0
func newPeer(c *Config, fsm *fsm) (*peer, error) {
	r := &peer{}
	var err error = nil

	r.addr = c.Raft.Addr

	os.MkdirAll(c.Raft.DataDir, 0755)

	cfg := raft.DefaultConfig()

	raftDBPath := path.Join(c.Raft.DataDir, "raft_db")
	r.dbStore, err = raftboltdb.NewBoltStore(raftDBPath)
	if err != nil {
		return nil, err
	}

	fileStore, err := raft.NewFileSnapshotStore(c.Raft.DataDir, 1, os.Stderr)
	if err != nil {
		return nil, err
	}

	r.trans, err = raft.NewTCPTransport(r.addr, nil, 3, 5*time.Second, os.Stderr)
	if err != nil {
		return nil, err
	}

	r.peerStore = raft.NewJSONPeers(c.Raft.DataDir, r.trans)

	if c.Raft.ClusterState == ClusterStateNew {
		log.Info("cluster state is new, use new cluster config")
		r.peerStore.SetPeers(c.Raft.Cluster)
	} else {
		log.Info("cluster state is existing, use previous + new cluster config")
		ps, err := r.peerStore.Peers()
		if err != nil {
			log.Error("get store peers error %v", err)
			return nil, err
		}

		for _, peer := range c.Raft.Cluster {
			ps = raft.AddUniquePeer(ps, peer)
		}

		r.peerStore.SetPeers(ps)
	}

	if peers, _ := r.peerStore.Peers(); len(peers) <= 1 {
		cfg.EnableSingleNode = true
		log.Notice("raft running in single node mode")
	}

	r.fsm = fsm

	r.r, err = raft.NewRaft(cfg, fsm, r.dbStore, r.dbStore, fileStore, r.peerStore, r.trans)
	if err != nil {
		return nil, err
	}

	// watch for leadership changes
	go func() {
		for isLeader := range r.r.LeaderCh() {
			if isLeader {
				log.Info("new leader http: %v", c.Addr)
				r.apply(&action{Cmd: CmdNewLeader, Leader: c.Addr}, applyRetries)
			}
		}
	}()

	return r, nil
}
Exemplo n.º 23
0
func (r *raftState) open(s *store, ln net.Listener, initializePeers []string) error {
	r.ln = ln
	r.closing = make(chan struct{})

	// Setup raft configuration.
	config := raft.DefaultConfig()
	config.LogOutput = ioutil.Discard

	if r.config.ClusterTracing {
		config.Logger = r.logger
	}
	config.HeartbeatTimeout = time.Duration(r.config.HeartbeatTimeout)
	config.ElectionTimeout = time.Duration(r.config.ElectionTimeout)
	config.LeaderLeaseTimeout = time.Duration(r.config.LeaderLeaseTimeout)
	config.CommitTimeout = time.Duration(r.config.CommitTimeout)
	// Since we actually never call `removePeer` this is safe.
	// If in the future we decide to call remove peer we have to re-evaluate how to handle this
	config.ShutdownOnRemove = false

	// Build raft layer to multiplex listener.
	r.raftLayer = newRaftLayer(r.addr, r.ln)

	// Create a transport layer
	r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, config.LogOutput)

	// Create peer storage.
	r.peerStore = &peerStore{}

	// This server is joining the raft cluster for the first time if initializePeers are passed in
	if len(initializePeers) > 0 {
		if err := r.peerStore.SetPeers(initializePeers); err != nil {
			return err
		}
	}

	peers, err := r.peerStore.Peers()
	if err != nil {
		return err
	}

	// If no peers are set in the config or there is one and we are it, then start as a single server.
	if len(initializePeers) <= 1 {
		config.EnableSingleNode = true

		// Ensure we can always become the leader
		config.DisableBootstrapAfterElect = false

		// Make sure our peer address is here.  This happens with either a single node cluster
		// or a node joining the cluster, as no one else has that information yet.
		if !raft.PeerContained(peers, r.addr) {
			if err := r.peerStore.SetPeers([]string{r.addr}); err != nil {
				return err
			}
		}

		peers = []string{r.addr}
	}

	// Create the log store and stable store.
	store, err := raftboltdb.NewBoltStore(filepath.Join(r.path, "raft.db"))
	if err != nil {
		return fmt.Errorf("new bolt store: %s", err)
	}
	r.raftStore = store

	// Create the snapshot store.
	snapshots, err := raft.NewFileSnapshotStore(r.path, raftSnapshotsRetained, os.Stderr)
	if err != nil {
		return fmt.Errorf("file snapshot store: %s", err)
	}

	// Create raft log.
	ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
	if err != nil {
		return fmt.Errorf("new raft: %s", err)
	}
	r.raft = ra

	r.wg.Add(1)
	go r.logLeaderChanges()

	return nil
}
Exemplo n.º 24
0
func (c *Cluster) start() error {
	c.store = newStore()

	cfg := raft.DefaultConfig()
	cfg.ShutdownOnRemove = false
	if c.logger != nil {
		cfg.LogOutput = c.logger
	}

	raftStream := rpc.NewStreamLayer(c.l.Addr(), byte(raftMessage), c.rpcDialer)
	raftTransport := raft.NewNetworkTransport(raftStream, 3, defaultTimeout, os.Stdout)
	peerStore := newPeerStore(c.home, raftTransport)
	c.peers = peerStore

	peers, err := peerStore.Peers()
	if err != nil {
		return err
	}
	nPeers := len(peers)
	if nPeers <= 1 && c.peerAddr == "" {
		cfg.EnableSingleNode = true
	}

	kvRaft, err := newRaft(filepath.Join(c.home, "raft"), c.addr, peerStore, (*storeFSM)(c.store), raftTransport, cfg)
	if err != nil {
		return err
	}
	c.store.r = kvRaft
	c.store.dialer = c.rpcDialer
	kvRaft.store = c.store
	kvRaft.stream = raftStream

	nodeRPCStream := rpc.NewStreamLayer(c.l.Addr(), byte(api.RPCMessage), c.rpcDialer)
	nodeRPC := &nodeRPC{nodeRPCStream, kvRaft}
	go nodeRPC.handleConns()

	clientRPCStream := rpc.NewStreamLayer(c.l.Addr(), byte(api.ClientMessage), c.rpcDialer)
	clientRPC := &clientRPC{clientRPCStream, c.store}
	go clientRPC.handleConns()

	handlers := map[byte]rpc.Handler{
		byte(raftMessage):       raftStream,
		byte(api.RPCMessage):    nodeRPCStream,
		byte(api.ClientMessage): clientRPCStream,
	}

	c.server = rpc.NewServer(c.l, handlers)
	c.r = kvRaft

	go c.store.waitLeader()
	go c.waitLeader()

	if c.peerAddr != "" && nPeers <= 1 {
		res, err := nodeRPCStream.RPC(c.peerAddr, &rpc.Request{
			Method: addNode,
			Args:   []string{c.addr},
		})
		if err != nil {
			return err
		}
		if res.Err != "" && !strings.Contains(res.Err, "peer already known") {
			return fmt.Errorf(res.Err)
		}
	}

	return nil
}
Exemplo n.º 25
0
func (c *Cluster) start() error {
	c.store = newStore()

	cfg := raft.DefaultConfig()
	cfg.ShutdownOnRemove = false

	// setup K/V store
	raftStream, err := newStreamLayer(c.l.Addr(), c.tlsConfig, raftMessage)
	if err != nil {
		return err
	}
	raftTransport := raft.NewNetworkTransport(raftStream, 3, defaultTimeout, os.Stdout)
	peerStore := newPeerStore(c.home, raftTransport)

	peers, err := peerStore.Peers()
	if err != nil {
		return err
	}
	nPeers := len(peers)
	if nPeers <= 1 && c.peerAddr == "" {
		cfg.EnableSingleNode = true
	}

	kvRaft, err := newRaft(filepath.Join(c.home, "raft"), c.addr, peerStore, (*storeFSM)(c.store), raftTransport, cfg)
	if err != nil {
		return err
	}
	c.store.r = kvRaft
	kvRaft.store = c.store

	nodeRPCStream, err := newStreamLayer(c.l.Addr(), c.tlsConfig, api.RPCMessage)
	if err != nil {
		return err
	}
	nodeRPC := &nodeRPC{nodeRPCStream, kvRaft}
	go nodeRPC.handleConns()

	clientRPCStream, err := newStreamLayer(c.l.Addr(), c.tlsConfig, api.ClientMessage)
	if err != nil {
		return err
	}
	clientRPC := &clientRPC{clientRPCStream, c.store}
	go clientRPC.handleConns()

	handlers := map[api.MessageType]rpcHandler{
		raftMessage:       raftStream,
		api.RPCMessage:    nodeRPCStream,
		api.ClientMessage: clientRPCStream,
	}

	c.server = newRPCServer(c.l, handlers)
	c.r = kvRaft

	go c.store.waitLeader()
	go c.waitLeader()

	if c.peerAddr != "" && nPeers <= 1 {
		res, err := rpc(c.peerAddr, &rpcRequest{
			Method: addNode,
			Args:   []string{c.addr},
		}, c.tlsConfig)
		if err != nil {
			return err
		}
		if res.Err != "" && !strings.Contains(res.Err, "peer already known") {
			return fmt.Errorf(res.Err)
		}
	}

	return nil
}