/*
WaitForShutdown blocks until the server is asked to shutdown, then it orchestrates the shutdown.

The shutdown sequence is:

	- Close the RPC and HTTP listeners
	- Shutdown the configuration go-routine
	- Ask each topic's RaftNode to shutdown, this in turn:
		- Shut's down the election timer / leader loop
		- Ask's each peer goroutine to quit
		- Once each's peer's connection is closed, shutdown the write aggregator
		- Once the write aggregator is shutdown, notify the storage of shutdown.  Disk storage will:
			- Shutdown the cleanup goroutine
			- Shutdown the segment close goroutine
			- Close each open segment
	- Shutdown each ServerPeer connection
	- Close main goroutine

Note that open RPC connections from other nodes are not closed, but they will behave safely if any new requests are recieved.
*/
func (srv *ServerNode) WaitForShutdown() {
	reason := <-srv.shutdownServer
	if srv.inShutdown {
		srv_log("Received additional request to shutdown (%v), ignoring as we are in shutdown procedure.\n", reason)
		return
	}
	srv.inShutdown = true
	srv_log("Received request to shutdown: %v", reason)
	//srv_log("Number of goroutines to shutdown: %v\n", runtime.NumGoroutine())
	// Close the RPC and http listener
	srv_log("Shutting down incoming listeners.\n")
	srv.rpcListener.Close()
	if srv.httpListener != nil {
		srv.httpListener.Close()
	}
	if srv.cborListener != nil {
		srv.cborListener.Close()
	}
	if srv.gobListener != nil {
		srv.gobListener.Close()
	}
	srv_log("Shutting down configuration goroutine.\n")
	close(srv.changeConfig)

	// Trigger each raftNode shutdown
	srv.lock.RLock()
	topicsList := make([]*Node, 0, len(srv.topics))
	for _, node := range srv.topics {
		topicsList = append(topicsList, node)
	}
	srv.lock.RUnlock()
	notifier := utils.NewShutdownNotifier(len(topicsList))
	for _, topic := range topicsList {
		topic.Shutdown(notifier)
	}

	done := notifier.WaitForDone(SHUTDOWN_WAIT_TIMER)

	srv_log("Shutdown of %v out of %v topics completed\n", done, len(topicsList))

	srv.lock.Lock()
	peerCount := len(srv.peers)
	peerConnectionsNotifier := utils.NewShutdownNotifier(peerCount)
	for _, srvPeer := range srv.peers {
		srvPeer.shutdown(peerConnectionsNotifier)
	}
	srv.peers = make(map[string]*ServerPeer)
	srv.lock.Unlock()

	done = peerConnectionsNotifier.WaitForDone(SHUTDOWN_WAIT_TIMER)
	srv_log("Shutdown %v out of %v peer connections\n", done, peerCount)

	// Give chance for other goroutines to complete
	time.Sleep(500 * time.Millisecond)

	srv_log("Shutdown procedure complete.")
	return
}
Exemple #2
0
/*
Shutdown is used by the ServerNode to shutdown this raft node.
*/
func (node *Node) Shutdown(notifier *utils.ShutdownNotifier) {
	go func() {
		// End the leadership loop if we are in one.
		node.setState(SHUTDOWN_NODE)

		// Shutdown the election timer goroutine.
		electionShutdown := utils.NewShutdownNotifier(1)
		node.electionTimer.Shutdown(electionShutdown)

		// Have to wait on the election timer to make sure leadership loop has quit
		// Wait for the election timer to confirm closing.
		if electionShutdown.WaitForDone(RAFT_NODE_SUBSYSTEM_SHUTDOWN_TIMEOUT) != 1 {
			node.node_log("Election timer did not shutdown - proceeding anyway.\n")
		} else {
			node.node_log("Election timer shutdown completed.\n")
		}

		// Start shutdown on the commit log - this releases clients in GET.
		node.log.Shutdown()

		// Get the list of peers - need the lock for this.
		node.lock.RLock()
		peersToClose := len(node.peers)
		peerList := make([]*Peer, 0, peersToClose)
		peerShutdown := utils.NewShutdownNotifier(peersToClose)
		for _, peer := range node.peers {
			peerList = append(peerList, peer)
		}
		node.lock.RUnlock()

		// Ask for shutdown without the lock to avoid possible contention blocking the channel used to send commands to the peer goroutine.
		for _, peer := range peerList {
			peer.shutdown(peerShutdown)
		}

		// Wait for the peers to confirm closing - no timeout.
		peerShutdown.WaitForAllDone()
		node.node_log("Peers all shutdown.\n")

		// Shutdown the write aggregator
		writeAggNotifier := utils.NewShutdownNotifier(1)
		node.writeAggregator.Shutdown(writeAggNotifier)

		// Wait for the write aggregator to shutdown
		writeAggNotifier.WaitForAllDone()
		node.node_log("Write aggregator shutdown complete.")

		// Ask our storage to shutdown
		storageNotifier := utils.NewShutdownNotifier(1)
		store := node.log.GetLogStorage()
		store.Shutdown(storageNotifier)

		storageNotifier.WaitForAllDone()
		node.node_log("Storage shutdown completed.")
		notifier.ShutdownDone()
	}()
}
func (dlog *DiskLogStorage) Shutdown(notifier *utils.ShutdownNotifier) {
	routinesNotifier := utils.NewShutdownNotifier(2)
	dlog.closeSegmentsShutdownChannel <- routinesNotifier
	dlog.segmentCleanupShutdownChannel <- routinesNotifier
	routinesNotifier.WaitForAllDone()
	// Now close all segments.
	dlog.lock.Lock()
	for _, segment := range dlog.segments {
		segment.Close()
	}
	dlog.lock.Unlock()
	notifier.ShutdownDone()
}
/*
removeTopic shutdowns the currently running topic node and removes the topic from the configuration.
This does not delete any data.
*/
func (srv *ServerNode) removeTopic(topic ConfigTopic) error {
	// Remove the topic from our list
	srv.lock.Lock()
	node := srv.topics[topic.Name]
	delete(srv.topics, topic.Name)
	// Update the sorted list of topic names
	srv.updateTopicNameListHoldingLock()
	srv.lock.Unlock()
	// Ask the node to shutdown
	notifier := utils.NewShutdownNotifier(1)
	node.Shutdown(notifier)
	notifier.WaitForAllDone()
	srv_log("Topic %v removed - data can now be deleted.\n", topic.Name)
	return nil
}