Пример #1
0
// ClientRequestSendMessages checks we are the leader and then queues the messages
// Results are only sent once the messages are on the queue
func (node *Node) ClientRequestSendMessages(args *rapi.SendMessagesArgs, results *rapi.SendMessagesResults) error {
	//log.Printf("Starting request send message\n")
	state := node.getState()
	if state == SHUTDOWN_NODE {
		results.Result = rapi.Get_RI_NODE_IN_SHUTDOWN(node.name)
		return nil
	} else if state != LEADER_NODE {
		results.Result = rapi.Get_RI_NODE_NOT_LEADER(node.name, node.getLastSeenLeader())
		return nil
	}
	//log.Printf("Sending to queue aggregator.\n")
	//IDs, err := node.log.Queue(node.getTerm(), args.SentMessages)
	IDs, err := node.writeAggregator.Queue(args.SentMessages)
	//log.Printf("Got response from write aggregator\n")
	results.IDs = IDs
	if err == nil {
		results.Result = rapi.Get_RI_SUCCESS()
		if !args.WaitForCommit {
			return nil
		}
		if len(IDs) > 0 {
			// See if we are waiting on commit
			waitForIndex := IDs[len(IDs)-1]
			for node.getState() == LEADER_NODE {
				if node.commitIndex.WaitOnCommitChange(waitForIndex) >= waitForIndex {
					return nil
				}
			}
			// If we get here then we are no longer the leader - return an error
			results.Result = rapi.Get_RI_NODE_NOT_LEADER(node.name, node.getLastSeenLeader())
		}
	}
	return err
}
Пример #2
0
/*
IdentifyNode is used by other peers in the cluster to identify themselves to this node.
*/
func (handler *RPCHandler) IdentifyNode(args *IdentifyNodeArgs, results *IdentifyNodeResults) error {
	handler.peerName = args.Name
	handler.identified = true
	if args.ClusterID == handler.server.GetClusterID() {
		results.Result = rapi.Get_RI_SUCCESS()
	} else {
		results.Result = rapi.Get_RI_MISMATCHED_CLUSTER_ID(handler.server.address, args.ClusterID, handler.server.GetClusterID())
	}
	return nil
}
Пример #3
0
// ClientReceiveMessages returns messages at and beyond ID
// If WaitForMessages is true and no messages are currently present, this method blocks
// until at least one message can be returned
func (node *Node) ClientReceiveMessages(args *rapi.ReceiveMessagesArgs, results *rapi.ReceiveMessagesResults) error {
	state := node.getState()
	if state == SHUTDOWN_NODE {
		results.Result = rapi.Get_RI_NODE_IN_SHUTDOWN(node.name)
		return nil
	}

	msgs, nextID, err := node.log.Get(args.ID, args.Quantity, args.WaitForMessages)

	if err == nil {
		results.Result = rapi.Get_RI_SUCCESS()
		results.ReceivedMessages = msgs
		results.NextID = nextID
	}
	return err

}
Пример #4
0
func (srv *ServerNode) GetClusterDetails(args *rapi.GetClusterDetailsArgs, results *rapi.GetClusterDetailsResults) error {
	srv.lock.RLock()
	defer srv.lock.RUnlock()
	if srv.inShutdown {
		results.Result = rapi.Get_RI_NODE_IN_SHUTDOWN(srv.address)
		return nil
	}
	results.ClusterID = srv.config.Cluster_ID
	results.Peers = make([]string, 0, len(srv.config.Peers))
	for _, peerName := range srv.config.Peers {
		results.Peers = append(results.Peers, peerName)
	}
	results.Topics = make([]string, 0, len(srv.config.Topics))
	for topicName, _ := range srv.config.Topics {
		results.Topics = append(results.Topics, topicName)
	}
	results.Result = rapi.Get_RI_SUCCESS()
	return nil
}
Пример #5
0
func (node *Node) GetTopicDetails(args *rapi.GetTopicDetailsArgs, results *rapi.GetTopicDetailsResults) error {
	state := node.getState()
	var err error
	if state == SHUTDOWN_NODE {
		results.Result = rapi.Get_RI_NODE_IN_SHUTDOWN(node.name)
		return nil
	}

	results.FirstIndex, err = node.log.FirstIndex()
	if err != nil {
		results.Result = rapi.Get_RI_INTERNAL_ERROR(node.name, err.Error())
		return nil
	}
	results.LastIndex, err = node.log.LastIndex()
	if err != nil {
		results.Result = rapi.Get_RI_INTERNAL_ERROR(node.name, err.Error())
		return nil
	}
	results.CommitIndex = node.log.GetCurrentCommit()
	results.Result = rapi.Get_RI_SUCCESS()
	return nil
}
Пример #6
0
func (srv *ServerNode) manageConfigurationChanges() {
	srv_log("Waiting for configuration requests to handle\n")
	for request := range srv.changeConfig {
		srv_log("Processing new configuration change request\n")
		newConfig := request.Configuration
		if newConfig.Cluster_ID == srv.config.Cluster_ID {
			var err error
			srv_log("Recieved new configuration")
			if newConfig.Scope&CNF_Set_Peers != 0 {
				srv_log("Change in peers configuration")
				srv.config.Peers = newConfig.Peers
				srv.setPeers(newConfig.Peers)
				var peerConfigToUse ConfigPeers
				// Use the new configuration for topics only if we are part of the cluster.
				if newConfig.Peers.Contains(srv.address) {
					peerConfigToUse = newConfig.Peers
				} else {
					peerConfigToUse = make(ConfigPeers, 0)
				}
				srv_log("Passing new peer configuration to topics.\n")
				for _, topic := range srv.topics {
					topic.ChangePeerConfiguration(peerConfigToUse)
				}
			} else if newConfig.Scope&CNF_Set_Topic != 0 {
				srv_log("Topic configuration")
				for _, newTopic := range newConfig.Topics {

					if !srv.config.Topics.Contains(newTopic.Name) {
						topicConfig := GetDefaultTopicConfiguration()
						topicConfig.Name = newTopic.Name
						if newTopic.SegmentSize > 0 {
							topicConfig.SegmentSize = newTopic.SegmentSize
						}
						srv.config.Topics[newTopic.Name] = topicConfig
						node, err := srv.createTopic(topicConfig)
						node.StartNode()
						if err != nil {
							break
						}
					} else {
						// Modification of existing topic.
						configChangesToApply := make([]disklog.DiskLogConfigFunction, 0, 3)
						currentTopic := srv.config.Topics[newTopic.Name]
						if newTopic.SegmentSize > 0 && currentTopic.SegmentSize != newTopic.SegmentSize {
							// Change the current configuration
							currentTopic.SegmentSize = newTopic.SegmentSize
							configChangesToApply = append(configChangesToApply, disklog.SetTargetSegmentSize(newTopic.SegmentSize))
						}
						if newTopic.SegmentCleanupAge >= 0 && currentTopic.SegmentCleanupAge != newTopic.SegmentCleanupAge {
							// Change the current configuration
							currentTopic.SegmentCleanupAge = newTopic.SegmentCleanupAge
							configChangesToApply = append(configChangesToApply, disklog.SetSegmentCleanupAge(newTopic.SegmentCleanupAge))
						}
						if len(configChangesToApply) > 0 {
							// Store any changes we've made
							srv.config.Topics[newTopic.Name] = currentTopic
							// Update the current configuration.
							srv.lock.RLock()
							node := srv.topics[newTopic.Name]
							diskStorage := node.GetCommitLog().GetLogStorage().(*disklog.DiskLogStorage)
							diskStorage.ChangeConfiguration(configChangesToApply...)
							srv.lock.RUnlock()
						}

					}
				}
			} else if newConfig.Scope&CNF_Remove_Topic != 0 {
				srv_log("Topic removal")
				for _, removedTopic := range newConfig.Topics {
					if srv.config.Topics.Contains(removedTopic.Name) {
						currentTopic := srv.config.Topics[removedTopic.Name]
						delete(srv.config.Topics, removedTopic.Name)
						err = srv.removeTopic(currentTopic)
						if err != nil {
							break
						}
					} else {
						srv_log("Topic %v not present - no removal required.\n", removedTopic.Name)
					}
				}
			}
			if err == nil {
				srv_log("Saving configuration\n")
				err = srv.config.SaveConfiguration()
			}

			if err != nil {
				request.Response <- rapi.Get_RI_INTERNAL_ERROR(srv.address, err.Error())
			} else {
				request.Response <- rapi.Get_RI_SUCCESS()
			}
		} else {
			srv_log("Cluster ID give for config change (%v) doesn't match server cluster ID of (%v)\n", newConfig.Cluster_ID, srv.config.Cluster_ID)
			request.Response <- rapi.Get_RI_MISMATCHED_CLUSTER_ID(srv.address, newConfig.Cluster_ID, srv.config.Cluster_ID)
		}
	}
	srv_log("manageConfigurationChanges shutdown\n")
}