// updateACLReplicationStatus safely updates the ACL replication status.
func (s *Server) updateACLReplicationStatus(status structs.ACLReplicationStatus) {
	// Fixup the times to shed some useless precision to ease formattting,
	// and always report UTC.
	status.LastError = status.LastError.Round(time.Second).UTC()
	status.LastSuccess = status.LastSuccess.Round(time.Second).UTC()

	// Set the shared state.
	s.aclReplicationStatusLock.Lock()
	s.aclReplicationStatus = status
	s.aclReplicationStatusLock.Unlock()
}
// runACLReplication is a long-running goroutine that will attempt to replicate
// ACLs while the server is the leader, until the shutdown channel closes.
func (s *Server) runACLReplication() {
	var status structs.ACLReplicationStatus
	status.Enabled = true
	status.SourceDatacenter = s.config.ACLDatacenter
	s.updateACLReplicationStatus(status)

	// Show that it's not running on the way out.
	defer func() {
		status.Running = false
		s.updateACLReplicationStatus(status)
	}()

	// Give each server's replicator a random initial phase for good
	// measure.
	select {
	case <-s.shutdownCh:
		return

	case <-time.After(lib.RandomStagger(s.config.ACLReplicationInterval)):
	}

	// We are fairly conservative with the lastRemoteIndex so that after a
	// leadership change or an error we re-sync everything (we also don't
	// want to block the first time after one of these events so we can
	// show a successful sync in the status endpoint).
	var lastRemoteIndex uint64
	replicate := func() {
		if !status.Running {
			lastRemoteIndex = 0 // Re-sync everything.
			status.Running = true
			s.updateACLReplicationStatus(status)
			s.logger.Printf("[INFO] consul: ACL replication started")
		}

		index, err := s.replicateACLs(lastRemoteIndex)
		if err != nil {
			lastRemoteIndex = 0 // Re-sync everything.
			status.LastError = time.Now()
			s.updateACLReplicationStatus(status)
			s.logger.Printf("[WARN] consul: ACL replication error (will retry if still leader): %v", err)
		} else {
			lastRemoteIndex = index
			status.ReplicatedIndex = index
			status.LastSuccess = time.Now()
			s.updateACLReplicationStatus(status)
			s.logger.Printf("[DEBUG] consul: ACL replication completed through remote index %d", index)
		}
	}
	pause := func() {
		if status.Running {
			lastRemoteIndex = 0 // Re-sync everything.
			status.Running = false
			s.updateACLReplicationStatus(status)
			s.logger.Printf("[INFO] consul: ACL replication stopped (no longer leader)")
		}
	}

	// This will slowly poll to see if replication should be active. Once it
	// is and we've caught up, the replicate() call will begin to block and
	// only wake up when the query timer expires or there are new ACLs to
	// replicate. We've chosen this design so that the ACLReplicationInterval
	// is the lower bound for how quickly we will replicate, no matter how
	// much ACL churn is happening on the remote side.
	//
	// The blocking query inside replicate() respects the shutdown channel,
	// so we won't get stuck in here as things are torn down.
	for {
		select {
		case <-s.shutdownCh:
			return

		case <-time.After(s.config.ACLReplicationInterval):
			if s.IsLeader() {
				replicate()
			} else {
				pause()
			}
		}
	}
}