Пример #1
0
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
	// If we are in bootstrap mode, enable a single node cluster
	if s.config.Bootstrap || (s.config.DevMode && !s.config.DevDisableBootstrap) {
		s.config.RaftConfig.EnableSingleNode = true
	}

	// Create the FSM
	var err error
	s.fsm, err = NewFSM(s.evalBroker, s.periodicDispatcher, s.config.LogOutput)
	if err != nil {
		return err
	}

	// Create a transport layer
	trans := raft.NewNetworkTransport(s.raftLayer, 3, s.config.RaftTimeout,
		s.config.LogOutput)
	s.raftTransport = trans

	// Create the backend raft store for logs and stable storage
	var log raft.LogStore
	var stable raft.StableStore
	var snap raft.SnapshotStore
	var peers raft.PeerStore
	if s.config.DevMode {
		store := raft.NewInmemStore()
		s.raftInmem = store
		stable = store
		log = store
		snap = raft.NewDiscardSnapshotStore()
		peers = &raft.StaticPeers{}
		s.raftPeers = peers

	} else {
		// Create the base raft path
		path := filepath.Join(s.config.DataDir, raftState)
		if err := ensurePath(path, true); err != nil {
			return err
		}

		// Create the BoltDB backend
		store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
		if err != nil {
			return err
		}
		s.raftStore = store
		stable = store

		// Wrap the store in a LogCache to improve performance
		cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
		if err != nil {
			store.Close()
			return err
		}
		log = cacheStore

		// Create the snapshot store
		snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
		if err != nil {
			if s.raftStore != nil {
				s.raftStore.Close()
			}
			return err
		}
		snap = snapshots

		// Setup the peer store
		s.raftPeers = raft.NewJSONPeers(path, trans)
		peers = s.raftPeers
	}

	// Ensure local host is always included if we are in bootstrap mode
	if s.config.RaftConfig.EnableSingleNode {
		p, err := peers.Peers()
		if err != nil {
			if s.raftStore != nil {
				s.raftStore.Close()
			}
			return err
		}
		if !raft.PeerContained(p, trans.LocalAddr()) {
			peers.SetPeers(raft.AddUniquePeer(p, trans.LocalAddr()))
		}
	}

	// Make sure we set the LogOutput
	s.config.RaftConfig.LogOutput = s.config.LogOutput

	// Setup the leader channel
	leaderCh := make(chan bool, 1)
	s.config.RaftConfig.NotifyCh = leaderCh
	s.leaderCh = leaderCh

	// Setup the Raft store
	s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable,
		snap, peers, trans)
	if err != nil {
		if s.raftStore != nil {
			s.raftStore.Close()
		}
		trans.Close()
		return err
	}
	return nil
}
Пример #2
0
func canary(fsm raft.FSM, statePath string) {
	// Create a snapshot (only creates metadata) and persist it (does the
	// actual compaction). Afterwards we have access to |rs.parsed| (all
	// raft log entries, but parsed) and |rs.del| (all messages which were
	// just compacted).
	log.Printf("Compacting before dumping state\n")

	snapshot, err := fsm.Snapshot()
	if err != nil {
		log.Fatalf("fsm.Snapshot(): %v\n", err)
	}

	rs, ok := snapshot.(*robustSnapshot)
	if !ok {
		log.Fatalf("snapshot is not a robustSnapshot")
	}

	sink, err := raft.NewDiscardSnapshotStore().Create(rs.lastIndex, 1, []byte{})
	if err != nil {
		log.Fatalf("DiscardSnapshotStore.Create(): %v\n", err)
	}

	if err := snapshot.Persist(sink); err != nil {
		log.Fatalf("snapshot.Persist(): %v\n", err)
	}

	// Dump the in-memory state into a file, to be read by robustirc-canary.
	f, err := os.Create(statePath)
	if err != nil {
		log.Fatal(err)
	}
	defer f.Close()

	log.Printf("Dumping state for robustirc-canary into %q\n", statePath)

	enc := json.NewEncoder(f)

	// Sort the keys to iterate through |rs.parsed| in deterministic order.
	keys := make([]uint64, 0, len(rs.parsed))
	for idx := range rs.parsed {
		keys = append(keys, idx)
	}
	sort.Sort(uint64Slice(keys))

	for _, idx := range keys {
		nmsg := rs.parsed[idx]
		// TODO: come up with pseudo-values for createsession/deletesession
		if nmsg.Type != types.RobustIRCFromClient {
			continue
		}
		ircmsg := irc.ParseMessage(nmsg.Data)
		if ircmsg.Command == irc.PING || ircmsg.Command == irc.PONG {
			continue
		}
		vmsgs, _ := ircServer.Get(nmsg.Id)
		cm := canaryMessageState{
			Id:        idx,
			Session:   nmsg.Session.Id,
			Input:     util.PrivacyFilterIrcmsg(ircmsg).String(),
			Output:    make([]canaryMessageOutput, len(vmsgs)),
			Compacted: rs.del[idx],
		}
		for idx, vmsg := range vmsgs {
			ifc := make(map[string]bool)
			for k, v := range vmsg.InterestingFor {
				ifc["0x"+strconv.FormatInt(k, 16)] = v
			}
			cm.Output[idx] = canaryMessageOutput{
				Text:           util.PrivacyFilterIrcmsg(irc.ParseMessage(vmsg.Data)).String(),
				InterestingFor: ifc,
			}
		}
		if err := enc.Encode(&cm); err != nil {
			log.Fatal(err)
		}
	}
}
Пример #3
0
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
	// If we have an unclean exit then attempt to close the Raft store.
	defer func() {
		if s.raft == nil && s.raftStore != nil {
			if err := s.raftStore.Close(); err != nil {
				s.logger.Printf("[ERR] consul: failed to close Raft store: %v", err)
			}
		}
	}()

	// Create the FSM.
	var err error
	s.fsm, err = NewFSM(s.tombstoneGC, s.config.LogOutput)
	if err != nil {
		return err
	}

	// Create a transport layer.
	trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
	s.raftTransport = trans

	// Make sure we set the LogOutput.
	s.config.RaftConfig.LogOutput = s.config.LogOutput

	// Our version of Raft protocol requires the LocalID to match the network
	// address of the transport.
	s.config.RaftConfig.LocalID = raft.ServerID(trans.LocalAddr())

	// Build an all in-memory setup for dev mode, otherwise prepare a full
	// disk-based setup.
	var log raft.LogStore
	var stable raft.StableStore
	var snap raft.SnapshotStore
	if s.config.DevMode {
		store := raft.NewInmemStore()
		s.raftInmem = store
		stable = store
		log = store
		snap = raft.NewDiscardSnapshotStore()
	} else {
		// Create the base raft path.
		path := filepath.Join(s.config.DataDir, raftState)
		if err := ensurePath(path, true); err != nil {
			return err
		}

		// Create the backend raft store for logs and stable storage.
		store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
		if err != nil {
			return err
		}
		s.raftStore = store
		stable = store

		// Wrap the store in a LogCache to improve performance.
		cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
		if err != nil {
			return err
		}
		log = cacheStore

		// Create the snapshot store.
		snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
		if err != nil {
			return err
		}
		snap = snapshots

		// For an existing cluster being upgraded to the new version of
		// Raft, we almost never want to run recovery based on the old
		// peers.json file. We create a peers.info file with a helpful
		// note about where peers.json went, and use that as a sentinel
		// to avoid ingesting the old one that first time (if we have to
		// create the peers.info file because it's not there, we also
		// blow away any existing peers.json file).
		peersFile := filepath.Join(path, "peers.json")
		peersInfoFile := filepath.Join(path, "peers.info")
		if _, err := os.Stat(peersInfoFile); os.IsNotExist(err) {
			if err := ioutil.WriteFile(peersInfoFile, []byte(peersInfoContent), 0755); err != nil {
				return fmt.Errorf("failed to write peers.info file: %v", err)
			}

			// Blow away the peers.json file if present, since the
			// peers.info sentinel wasn't there.
			if _, err := os.Stat(peersFile); err == nil {
				if err := os.Remove(peersFile); err != nil {
					return fmt.Errorf("failed to delete peers.json, please delete manually (see peers.info for details): %v", err)
				}
				s.logger.Printf("[INFO] consul: deleted peers.json file (see peers.info for details)")
			}
		} else if _, err := os.Stat(peersFile); err == nil {
			s.logger.Printf("[INFO] consul: found peers.json file, recovering Raft configuration...")
			configuration, err := raft.ReadPeersJSON(peersFile)
			if err != nil {
				return fmt.Errorf("recovery failed to parse peers.json: %v", err)
			}
			tmpFsm, err := NewFSM(s.tombstoneGC, s.config.LogOutput)
			if err != nil {
				return fmt.Errorf("recovery failed to make temp FSM: %v", err)
			}
			if err := raft.RecoverCluster(s.config.RaftConfig, tmpFsm,
				log, stable, snap, trans, configuration); err != nil {
				return fmt.Errorf("recovery failed: %v", err)
			}
			if err := os.Remove(peersFile); err != nil {
				return fmt.Errorf("recovery failed to delete peers.json, please delete manually (see peers.info for details): %v", err)
			}
			s.logger.Printf("[INFO] consul: deleted peers.json file after successful recovery")
		}
	}

	// If we are in bootstrap or dev mode and the state is clean then we can
	// bootstrap now.
	if s.config.Bootstrap || s.config.DevMode {
		hasState, err := raft.HasExistingState(log, stable, snap)
		if err != nil {
			return err
		}
		if !hasState {
			// TODO (slackpad) - This will need to be updated when
			// we add support for node IDs.
			configuration := raft.Configuration{
				Servers: []raft.Server{
					raft.Server{
						ID:      raft.ServerID(trans.LocalAddr()),
						Address: trans.LocalAddr(),
					},
				},
			}
			if err := raft.BootstrapCluster(s.config.RaftConfig,
				log, stable, snap, trans, configuration); err != nil {
				return err
			}
		}
	}

	// Setup the Raft store.
	s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable, snap, trans)
	if err != nil {
		return err
	}

	// Start monitoring leadership.
	go s.monitorLeadership()
	return nil
}
Пример #4
0
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
	// If we are in bootstrap mode, enable a single node cluster
	if s.config.Bootstrap || s.config.DevMode {
		s.config.RaftConfig.EnableSingleNode = true
	}

	// Create the FSM
	var err error
	s.fsm, err = NewFSM(s.tombstoneGC, s.config.LogOutput)
	if err != nil {
		return err
	}

	// Create a transport layer
	trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
	s.raftTransport = trans

	var log raft.LogStore
	var stable raft.StableStore
	var snap raft.SnapshotStore

	if s.config.DevMode {
		store := raft.NewInmemStore()
		s.raftInmem = store
		stable = store
		log = store
		snap = raft.NewDiscardSnapshotStore()
		s.raftPeers = &raft.StaticPeers{}
	} else {
		// Create the base raft path
		path := filepath.Join(s.config.DataDir, raftState)
		if err := ensurePath(path, true); err != nil {
			return err
		}

		// Create the backend raft store for logs and stable storage
		store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
		if err != nil {
			return err
		}
		s.raftStore = store
		stable = store

		// Wrap the store in a LogCache to improve performance
		cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
		if err != nil {
			store.Close()
			return err
		}
		log = cacheStore

		// Create the snapshot store
		snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
		if err != nil {
			store.Close()
			return err
		}
		snap = snapshots

		// Setup the peer store
		s.raftPeers = raft.NewJSONPeers(path, trans)
	}

	// Ensure local host is always included if we are in bootstrap mode
	if s.config.Bootstrap {
		peerAddrs, err := s.raftPeers.Peers()
		if err != nil {
			if s.raftStore != nil {
				s.raftStore.Close()
			}
			return err
		}
		if !raft.PeerContained(peerAddrs, trans.LocalAddr()) {
			s.raftPeers.SetPeers(raft.AddUniquePeer(peerAddrs, trans.LocalAddr()))
		}
	}

	// Make sure we set the LogOutput
	s.config.RaftConfig.LogOutput = s.config.LogOutput

	// Setup the Raft store
	s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable,
		snap, s.raftPeers, trans)
	if err != nil {
		if s.raftStore != nil {
			s.raftStore.Close()
		}
		trans.Close()
		return err
	}

	// Start monitoring leadership
	go s.monitorLeadership()
	return nil
}
Пример #5
0
func canary(fsm raft.FSM, statePath string) {
	// Create a snapshot (only creates metadata) and persist it (does the
	// actual compaction). Afterwards we have access to |rs.parsed| (all
	// raft log entries, but parsed) and |rs.del| (all messages which were
	// just compacted).
	log.Printf("Compacting before dumping state\n")

	fsm.(*FSM).skipDeletionForCanary = true

	snapshot, err := fsm.Snapshot()
	if err != nil {
		log.Fatalf("fsm.Snapshot(): %v\n", err)
	}

	rs, ok := snapshot.(*robustSnapshot)
	if !ok {
		log.Fatalf("snapshot is not a robustSnapshot")
	}

	sink, err := raft.NewDiscardSnapshotStore().Create(rs.lastIndex, 1, []byte{})
	if err != nil {
		log.Fatalf("DiscardSnapshotStore.Create(): %v\n", err)
	}

	if err := snapshot.Persist(sink); err != nil {
		log.Fatalf("snapshot.Persist(): %v\n", err)
	}

	sink.Close()

	// Dump the in-memory state into a file, to be read by robustirc-canary.
	f, err := os.Create(statePath)
	if err != nil {
		log.Fatal(err)
	}
	defer f.Close()

	log.Printf("Dumping state for robustirc-canary into %q\n", statePath)

	enc := json.NewEncoder(f)

	iterator := rs.store.GetBulkIterator(rs.firstIndex, rs.lastIndex+1)
	defer iterator.Release()
	available := iterator.First()
	for available {
		var nlog raft.Log
		if err := iterator.Error(); err != nil {
			glog.Errorf("Error while iterating through the log: %v", err)
			available = iterator.Next()
			continue
		}
		idx := binary.BigEndian.Uint64(iterator.Key())
		value := iterator.Value()
		if err := json.Unmarshal(value, &nlog); err != nil {
			glog.Errorf("Skipping log entry %d because of a JSON unmarshaling error: %v", idx, err)
			continue
		}
		available = iterator.Next()

		// TODO: compact raft messages as well, so that peer changes are not kept forever
		if nlog.Type != raft.LogCommand {
			continue
		}

		nmsg := types.NewRobustMessageFromBytes(nlog.Data)
		if time.Unix(0, nmsg.Id.Id).Before(rs.compactionEnd) {
			continue
		}

		// TODO: come up with pseudo-values for createsession/deletesession
		if nmsg.Type != types.RobustIRCFromClient {
			continue
		}
		ircmsg := irc.ParseMessage(nmsg.Data)
		if ircmsg.Command == irc.PING || ircmsg.Command == irc.PONG {
			continue
		}
		vmsgs, _ := ircServer.Get(nmsg.Id)
		cm := canaryMessageState{
			Id:        idx,
			Session:   nmsg.Session.Id,
			Input:     util.PrivacyFilterIrcmsg(ircmsg).String(),
			Output:    make([]canaryMessageOutput, len(vmsgs)),
			Compacted: false,
		}
		for idx, vmsg := range vmsgs {
			ifc := make(map[string]bool)
			for k, v := range vmsg.InterestingFor {
				ifc["0x"+strconv.FormatInt(k, 16)] = v
			}
			cm.Output[idx] = canaryMessageOutput{
				Text:           util.PrivacyFilterIrcmsg(irc.ParseMessage(vmsg.Data)).String(),
				InterestingFor: ifc,
			}
		}
		if err := enc.Encode(&cm); err != nil {
			log.Fatal(err)
		}
	}
}