Exemple #1
0
func NewLibstore(masterServerHostPort, myHostPort string) (Libstore, error) {
	/* Upon creation, an instance of the Libstore will first contact the coordinator node
	using the GetServers RPC, which will retrieve a list of available storage servers for the
	session use */

	client, err := util.TryDial(masterServerHostPort)
	if err != nil {
		return nil, err
	}

	// launch the coordinator
	coord, err := coordinator.StartCoordinator(masterServerHostPort)
	if err != nil {
		return nil, err
	}

	connectionsMap := make(map[string]*rpc.Client)
	connectionsMap[masterServerHostPort] = client

	ls := &libstore{
		client:               client,
		connections:          connectionsMap,
		coord:                coord,
		storageServers:       nil,
		masterServerHostPort: masterServerHostPort,
	}

	args := &storagerpc.GetServersArgs{}
	var reply storagerpc.GetServersReply

	// attempt to get the list of servers in the ring from the MasterStorageServers
	for i := 0; i < util.MaxConnectAttempts; i++ {
		ls.connections[masterServerHostPort].Call("CohortStorageServer.GetServers", args, &reply)
		if reply.Status != storagerpc.OK {
			time.Sleep(time.Second)
			continue
		} else {
			ls.storageServers = reply.Servers
			break
		}
	}

	// connect to each of the storageServers when we acquire the list
	for i := 0; i < len(ls.storageServers); i++ {
		hostport := ls.storageServers[i].HostPort
		if hostport != masterServerHostPort { // Dont dial the master twice
			cli, err := util.TryDial(hostport)
			if err != nil {
				return nil, err
			}

			ls.connections[hostport] = cli
		}
	}

	return ls, nil
}
Exemple #2
0
func StartCoordinator(masterServerHostPort string) (Coordinator, error) {
	cli, err := util.TryDial(masterServerHostPort)
	if err != nil {
		return nil, err
	}

	args := &storagerpc.GetServersArgs{}
	var reply storagerpc.GetServersReply

	// attempt to get the list of servers in the ring from the MasterStorageServer
	var servers []storagerpc.Node
	for t := util.MaxConnectAttempts; ; t-- {
		err := cli.Call("CohortStorageServer.GetServers", args, &reply)
		if reply.Status == storagerpc.OK {
			servers = reply.Servers
			break
		} else if t <= 0 {
			log.Println("SS Not Ready")
			// StorageServers not ready
			return nil, err
		}
		// Wait a second before retrying
		time.Sleep(time.Second)
	}

	// create conns to be cached and add masterServer to map
	conns := make(map[string]*rpc.Client)
	conns[masterServerHostPort] = cli

	// create the coordinator
	coord := &coordinator{
		masterStorageServer: cli,
		servers:             servers,
		connections:         conns,
		nextOperationId:     1,
	}

	return coord, nil
}
Exemple #3
0
func NewCohortStorageServer(masterHostPort, selfHostPort string, nodeId uint32, numNodes int) (CohortStorageServer, error) {
	ss := new(cohortStorageServer)

	ss.nodeId = nodeId
	ss.masterHostPort = masterHostPort
	ss.selfHostPort = selfHostPort

	ss.servers = make([]storagerpc.Node, 0, numNodes) // Consistent hashing ring. Empty if not instance is not master.
	ss.storage = make(map[string]string)
	ss.locks = make(map[string]*sync.RWMutex)
	ss.exists = make(map[uint32]bool)
	ss.rw = new(sync.RWMutex)

	ss.undoLog = make(map[int]LogEntry) // TransactionId to Store 1. Key 2. TransactionId. (Old)Value
	ss.redoLog = make(map[int]LogEntry) // TransactionId to Store 1. Key 2. TransactionID. (New)Value

	ss.numNodes = numNodes
	ss.tickers = make(map[string]uint64)
	ss.setTickers()

	// server is the master and must init the ring and listen for 'RegisterServer' calls
	if masterHostPort == "" {
		ss.master = true
		masterNode := storagerpc.Node{HostPort: selfHostPort, NodeId: nodeId, Master: true}
		ss.exists[nodeId] = true
		ss.servers = append(ss.servers, masterNode)

		for errCount := 0; ; errCount++ {
			err := rpc.RegisterName("CohortStorageServer", storagerpc.Wrap(ss))
			if err != nil {
				if errCount == 5 {
					return nil, err
				}
				time.Sleep(time.Second)
				continue
			} else {
				break
			}
		}

		var err error
		listener, err := net.Listen("tcp", selfHostPort)
		log.Println("Master listening on: ", selfHostPort)
		if err != nil {
			return nil, err
		}

		rpc.HandleHTTP()
		go http.Serve(listener, nil)

		return ss, nil
	}

	// server is a slave in the ring
	cli, err := util.TryDial(masterHostPort)
	if err != nil {
		log.Println("error: ", err)
		return nil, err
	}
	// Try to register the slave into the ring with the masterNode
	slaveNode := storagerpc.Node{HostPort: selfHostPort, NodeId: nodeId, Master: false}
	args := &storagerpc.RegisterArgs{ServerInfo: slaveNode}
	var reply storagerpc.RegisterReply
	// break out when status == storagerpc.OK
	for reply.Status = storagerpc.NotReady; reply.Status == storagerpc.NotReady; time.Sleep(time.Second) {
		if err := cli.Call("CohortStorageServer.RegisterServer", args, &reply); err != nil {
			log.Println("Failed to RegisterServer: ", err)
			return nil, err
		}
	}

	ss.servers = reply.Servers

	for errCount := 0; ; errCount++ {
		err := rpc.RegisterName("CohortStorageServer", storagerpc.Wrap(ss))
		if err != nil {
			if errCount == 5 {
				return nil, err
			}
			time.Sleep(time.Second)
			continue
		} else {
			break
		}
	}

	listener, err := net.Listen("tcp", selfHostPort)
	if err != nil {
		return nil, err
	}

	rpc.HandleHTTP()
	go http.Serve(listener, nil)

	return ss, nil
}
Exemple #4
0
// Propose receives a map of [hostport] --> [prepareArgs] for that node to execute
// and makes async RPC calls to involved nodes to Prepare for 2PC
func (coord *coordinator) Propose(prepareMap PrepareMap) (datatypes.Status, error) {

	// Prepare for transaction
	stat := storagerpc.CommitStatus(storagerpc.Commit)
	resultStatus := datatypes.OK

	// channel to receive async replies from CohortServers
	doneCh := make(chan *rpc.Call, 3*len(prepareMap))

	// send out Prepare call to all nodes
	responsesToExpect := 0
	for hostport, argsList := range prepareMap {
		if _, ok := coord.connections[hostport]; !ok {
			cli, err := util.TryDial(hostport)
			if err != nil {
				return datatypes.BadData, err
			}
			coord.connections[hostport] = cli
		}

		for i := 0; i < len(argsList); i++ {
			prepareArgs := argsList[i]
			//log.Println("prepare args", prepareArgs)
			var prepareReply storagerpc.PrepareReply
			coord.connections[hostport].Go("CohortStorageServer.Prepare", prepareArgs, &prepareReply, doneCh)
			responsesToExpect++
		}
	}

	// receive replies from prepare
	for i := 0; i < responsesToExpect; i++ {
		rpcReply := <-doneCh
		// if RPC fails or non-OK status then Rollback
		replyStatus := rpcReply.Reply.(*storagerpc.PrepareReply).Status
		if rpcReply.Error != nil || replyStatus != datatypes.OK {
			resultStatus = replyStatus
			stat = storagerpc.Rollback

		}
	}

	// send the Commit call to all nodes with the updated status
	for hostport, args := range prepareMap {
		for i := 0; i < len(prepareMap[hostport]); i++ {
			commitArgs := &storagerpc.CommitArgs{
				TransactionId: args[i].TransactionId,
				Status:        stat,
			}

			var commitReply *storagerpc.CommitReply
			coord.connections[hostport].Go("CohortStorageServer.Commit", commitArgs, &commitReply, doneCh)
		}
	}

	// receive Ack from all nodes
	for i := 0; i < responsesToExpect; i++ {
		rpcReply := <-doneCh
		if rpcReply.Error != nil {
			return 0, rpcReply.Error
		}
	}

	return datatypes.Status(resultStatus), nil
}