func (m *Monitor) RegisterLoadBalancer(args *commproto.RegisterLoadBalancerArgs, reply *commproto.RegisterLoadBalancerReply) error {
	lsplog.Vlogf(3, "[Monitor] A register load balancer received from %s", args.HostPort)
	if m.loadBalancers.Len() < m.numberOfLoadBalancers {
		<-m.registering
		lb := &commproto.LoadBalancerInformation{args.HostPort, m.lbID, "", list.New()}
		m.lbID = m.lbID + 1
		m.loadBalancers.PushBack(lb)
		m.init <- 1
		m.registering <- 1
		<-m.initDone

		reply.Buddy = lb.BuddyHostPort
		reply.SwitchAddr = m.switchHostPort
		// Fill information for other load balancers
		reply.LoadBalancersHostPort = make([]string, m.loadBalancers.Len()-1)
		i := 0
		for e := m.loadBalancers.Front(); e != nil; e = e.Next() {
			lbi := e.Value.(*commproto.LoadBalancerInformation)
			if args.HostPort != lbi.Hostport {
				reply.LoadBalancersHostPort[i] = lbi.Hostport
				i = i + 1
			}
		}
		thisLB := m.getLoadBalancer(args.HostPort)
		workersReply := make([]string, thisLB.WorkersList.Len())
		i = 0
		for e := thisLB.WorkersList.Front(); e != nil; e = e.Next() {
			workersReply[i] = *(e.Value.(*string))
		}

		m.informSwitch <- 1
		lsplog.Vlogf(3, "[Monitor] Finished registering load balancer %s", args.HostPort)
	}
	return nil
}
Exemple #2
0
//Broadcast message to all replicas
func (lp *Libpaxos) broadcast(PacketMsg *Msg) {
	if PacketMsg != nil {
		//lsplog.Vlogf(6, "[Libpaxos] Broadcast type: %d", PacketMsg.MsgType)

		p := Packet{}
		p.PacketFrom = lp.self
		p.PacketMsg = *PacketMsg

		var reply Reply
		for _, r := range lp.replicas {
			//			if r == lp.self {
			//				continue
			//			}

			//lsplog.Vlogf(6, "[Libpaxos] Broadcast: %s", r)

			client, err := rpc.DialHTTP("tcp", r)
			if lsplog.CheckReport(6, err) {
				lsplog.Vlogf(6, "[Libpaxos] Broadcast to %s failed", r)
				//client.Close()
				continue
			}

			err = client.Call("Libpaxos.ReceiveMessage", p, &reply)
			if lsplog.CheckReport(1, err) {
				lsplog.Vlogf(6, "[Libpaxos] Broadcast call to %s failed", r)
			}
			client.Close()
		}
	}
}
func (tl *TranLayer) BookingFlights(orderList []tranlayerproto.Order) error {

	// find server for each airline company
	tranOrderList := make([]*tranOrder, len(orderList))

	for i, order := range orderList {
		conn, pptID, err := tl.lib_conn.GetServerWithAddress(order.AirlineID)
		if lsplog.CheckReport(2, err) {
			return err
		}
		tranOrderList[i] = &tranOrder{order.FlightID, pptID, order.Amount, conn}
	}

	// get unique transaction id
	tranID := fmt.Sprintf("%s:%d", tl.myhostport, time.Now().UnixNano())
	lsplog.Vlogf(5, "Begin transaction:"+tranID)
	// send request to store handler
	req := &reqContent{INIT_TRANS, tranID, tranOrderList}
	replyc := make(chan interface{})
	tl.reqChan <- &Request{req, replyc}
	// wait for response
	status := (<-replyc).(bool)
	lsplog.Vlogf(5, "End of transaction:"+tranID)
	if status {
		return nil
	}
	return lsplog.MakeErr("Transaction Failed")
}
func main() {
	lsplog.SetVerbose(3)
	lsplog.Vlogf(3, "[Request] Args: %s", os.Args)
	var e error
	ww := new(Worker)
	ww.load = 0
	ww.endFailureRecovery = make(chan bool, 1)
	ww.startFailureRecovery = make(chan bool, 1)
	ww.reqLock = new(sync.Mutex)
	monitor := os.Args[1]
	port, _ := strconv.Atoi(os.Args[2])
	l, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
	if err != nil {
		log.Fatal("listen error:", e)
	}
	_, _, _ = net.SplitHostPort(l.Addr().String())
	//port, _ = strconv.Atoi(listenport)
	rpc.Register(ww)
	rpc.HandleHTTP()
	go http.Serve(l, nil)
	// Register to monitor and get load balancer hostport and its buddy
	ww.cliM, _ = rpc.DialHTTP("tcp", monitor)
	name, _ := os.Hostname()
	addrs, _ := net.LookupHost(name)
	args := &commproto.RegisterWorkerArgs{fmt.Sprintf("%s:%s", addrs[0], port)}
	var reply commproto.RegisterWorkerReply
	ww.cliM.Call("MonitorRPC.RegisterWorker", args, &reply)
	lsplog.Vlogf(3, "[Worker] Received LoadBalancerHostPort: %s", reply.LoadBalancerHostPort)
	// connect to main LB
	parts := strings.Split(reply.LoadBalancerHostPort, ":")
	fakePort, _ := strconv.Atoi(parts[1])
	fakeHostPort := fmt.Sprintf("%s:%d", parts[0], fakePort+10)
	ww.cli, e = lsp12.NewLspClient(fakeHostPort, &lsp12.LspParams{5, 2000})
	for e != nil || ww.cli == nil {
		ww.cli, e = lsp12.NewLspClient(reply.LoadBalancerHostPort, &lsp12.LspParams{5, 2000})
		lsplog.Vlogf(3, "[Worker] Connection to load balancer failed. \n")
	}
	// queue of requests
	ww.reqList = list.New()

	// lauch the request handler
	go requestHandler(ww)

	// listen to requests
	for {
		req, err := ww.cli.Read()
		if err != nil {
			ww.startFailureRecovery <- true
			<-ww.endFailureRecovery
			continue
		}
		lsplog.Vlogf(3, "[Worker] Recieved: %s", string(req))
		responseParts := strings.Split(string(req), " ")
		i, _ := strconv.Atoi(responseParts[1])
		ww.reqLock.Lock()
		ww.load += i
		ww.reqList.PushBack(i)
		ww.reqLock.Unlock()
	}
}
func (m *Monitor) RegisterWorker(args *commproto.RegisterWorkerArgs, reply *commproto.RegisterWorkerReply) error {
	lsplog.Vlogf(3, "[Monitor] A register worker received from %v", args.HostPort)
	if m.workers.Len() != m.numberOfWorkers {
		<-m.registering
		w := args.HostPort
		m.workers.PushBack(&w)
		m.init <- 1
		m.registering <- 1
		<-m.initDone
		// Look for the load balancer and buddy for this worker
	L:
		for e := m.loadBalancers.Front(); e != nil; e = e.Next() {
			plb := e.Value.(*commproto.LoadBalancerInformation)
			for el := plb.WorkersList.Front(); el != nil; el = el.Next() {
				pw := el.Value.(*string)
				if *pw == args.HostPort {
					reply.LoadBalancerHostPort = plb.Hostport
					//reply.Buddy = plb.BuddyHostPort
					break L
				}
			}
		}
		m.informSwitch <- 1
		lsplog.Vlogf(3, "[Monitor] Finished registering worker %v", args.HostPort)
	}
	return nil
}
func (con *UDPConn) Write(b []byte) (int, error) {
	ncon := con.ncon
	if dropit(writeDropPercent) {
		lsplog.Vlogf(5, "UDP: DROPPING written packet of length %v\n", len(b))
		// Make it look like write was successful
		return len(b), nil
	} else {
		n, err := ncon.Write(b)
		lsplog.Vlogf(5, "UDP: Wrote packet of length %v\v", n)
		return n, err
	}
	return 0, nil
}
func (con *UDPConn) WriteToUDP(b []byte, addr *UDPAddr) (int, error) {
	ncon := con.ncon
	naddr := &net.UDPAddr{addr.IP, addr.Port}
	if dropit(writeDropPercent) {
		lsplog.Vlogf(5, "UDP: DROPPING written packet of length %v\n", len(b))
		// Make it look like write was successful
		return len(b), nil
	} else {
		n, err := ncon.WriteToUDP(b, naddr)
		lsplog.Vlogf(5, "UDP: Wrote packet of length %v", n)
		return n, err
	}
	return 0, nil
}
// getServer
// parameters:
// - key: the key for entry
// return
// - point of the connection to server
// - error
// function:
// - calculate the correct server id to connect
// - connect to the server and cache the connection
func (ls *Libstore) getServer(key string) (*rpc.Client, error) {
	// Use beginning of key to group related keys together
	precolon := strings.Split(key, ":")[0]
	keyid := Storehash(precolon)
	//_ = keyid // use keyid to compile

	// Calculate the correct server id
	largeHash := false
	currentMachine := 0
	for i := 0; i < len(ls.nodelist); i++ {
		if ls.nodelist[i].NodeID >= keyid {
			currentMachine = i
			largeHash = true
		}
	}

	if largeHash {
		for i := 0; i < len(ls.nodelist); i++ {
			if ls.nodelist[i].NodeID >= keyid && ls.nodelist[i].NodeID < ls.nodelist[currentMachine].NodeID {
				currentMachine = i
			}
		}
	} else {
		for i := 0; i < len(ls.nodelist); i++ {
			if ls.nodelist[i].NodeID < ls.nodelist[currentMachine].NodeID {
				currentMachine = i
			}
		}
	}

	// Connect to the storage server
	ls.connLock.Lock()
	cli, ok := ls.connMap[currentMachine]
	if cli == nil || !ok {
		newcli, err := rpc.DialHTTP("tcp", ls.nodelist[currentMachine].HostPort)
		if err != nil {
			ls.connLock.Unlock()
			return nil, err
		}
		// cache the connection
		lsplog.Vlogf(7, "Get new connection to %s", ls.nodelist[currentMachine].HostPort)
		lsplog.Vlogf(7, "Cached new connection to %s", ls.nodelist[currentMachine].HostPort)
		ls.connMap[currentMachine] = newcli
		cli = newcli
	} else {
		lsplog.Vlogf(7, "Get connection to %s from connection cache", ls.nodelist[currentMachine].HostPort)
	}
	ls.connLock.Unlock()
	return cli, nil
}
// AppendToList
// function:
// - put key and list value into storage
func (ss *Storageserver) AppendToList(args *storageproto.PutArgs, reply *storageproto.PutReply) error {
	req := &cacheReq{APPEND_LIST_VALUE, args.Key, args.Value}
	replyc := make(chan interface{})
	ss.cacheReqC <- Request{req, replyc}
	status := (<-replyc).(bool)
	if status {
		lsplog.Vlogf(5, "Append key successfully %s %s", args.Key, args.Value)
		reply.Status = storageproto.OK
	} else {
		lsplog.Vlogf(5, "Append key failed %s %s", args.Key, args.Value)
		reply.Status = storageproto.EITEMEXISTS
	}

	return nil
}
// RemoveFromList
// function:
// - remove key and list value from storage
func (ss *Storageserver) RemoveFromList(args *storageproto.PutArgs, reply *storageproto.PutReply) error {
	req := &cacheReq{REMOVE_LIST_VALUE, args.Key, args.Value}
	replyc := make(chan interface{})
	ss.cacheReqC <- Request{req, replyc}
	status := (<-replyc).(bool)
	if status {
		lsplog.Vlogf(5, "Remove key successfully %s %s", args.Key, args.Value)
		reply.Status = storageproto.OK
	} else {
		lsplog.Vlogf(5, "Remove key failed %s %s", args.Key, args.Value)
		reply.Status = storageproto.EITEMNOTFOUND
	}

	return nil
}
// Shut down all network activity
func (srv *LspServer) stopGlobalNetwork() {
	srv.stopGlobalNetworkFlag = true
	err := srv.udpConn.Close()
	if lsplog.CheckReport(4, err) {
		lsplog.Vlogf(6, "Server Continuing\n")
	}
}
// Shutting down network communications
func (cli *LspClient) stopNetwork() {
	cli.lspConn.stopNetworkFlag = true
	err := cli.udpConn.Close()
	if lsplog.CheckReport(4, err) {
		lsplog.Vlogf(6, "Client Continuing\n")
	}
}
Exemple #13
0
func (tl *TranLayer) requestHandler() {
	for {
		// send remain transaction orders
		if len(tl.oldTransactionMap) != 0 {
			for transID, oldTransaction := range tl.oldTransactionMap {
				order := oldTransaction.orderList.Remove().(*tranOrder)

				flightID := order.flightID
				amount := order.amount
				conn := order.conn
				pptList := oldTransaction.pptList

				args := &airlineproto.TranArgs{transaction.TRANS_INIT, transID, flightID, amount, pptList, 0, tl.myhostport}
				var reply airlineproto.TranReply
				conn.Call("AirlineRPC.Transaction", args, &reply)

				if oldTransaction.orderList.Empty() {
					delete(tl.oldTransactionMap, transID)
					go tl.transTimer(transID)
				}

				break
			}
			continue
		}

		request := <-tl.reqChan
		lsplog.Vlogf(6, "Handling request")
		switch request.content.reqType {
		case INIT_TRANS, TRANS_RESPONSE, TRANS_EXPIRED, TRANS_OLD:
			tl.transactionHandler(request)
		}
	}
}
func (tl *TranLayer) expireTrans(request *Request) {
	lsplog.Vlogf(5, "Expire transaction...")

	transID := request.content.key
	activeTrans, exist := tl.activeTransMap[transID]
	if exist == false {
		return
	}
	// send decision to all participants that vote commit
	// TODO: store log to persistent storage
	for transKey, vote := range activeTrans.commitGrp {
		if vote == transaction.COMMIT {
			conn := activeTrans.pptMap[transKey]
			//infos := strings.SplitN(transKey, "#", 2)
			//infos := transKey
			flightID := transKey
			args := &airlineproto.TranArgs{transaction.TRANS_RESPONSE,
				transID, flightID, 0, nil, activeTrans.decision, tl.myhostport}
			var reply airlineproto.TranReply
			conn.Call("AirlineRPC.Transaction", args, &reply)
		}
	}

	replyc, _ := tl.deferTransMap[transID]
	if activeTrans.decision == transaction.COMMIT {
		replyc <- true
	} else {
		replyc <- false
	}
}
// Non-master servers to the master
func (ss *Storageserver) RegisterServer(args *storageproto.RegisterArgs, reply *storageproto.RegisterReply) error {
	lsplog.Vlogf(5, "Connected:", args.ServerInfo.NodeID)
	ss.serverListLock.Lock()
	if ss.cntnodes < ss.numnodes {
		ss.servers[ss.cntnodes] = args.ServerInfo
		ss.cntnodes += 1
	}

	if ss.cntnodes == ss.numnodes {
		lsplog.Vlogf(5, "Ready")
		reply.Ready = true
		reply.Servers = ss.servers
	}
	ss.serverListLock.Unlock()
	return nil
}
func (m *Monitor) ReportLoadBalancerFailure(args *commproto.ReportFailureArgs, reply *commproto.ReportFailureReply) error {
	lsplog.Vlogf(3, "[Monitor] LoadBalancer: %s failed", args.FailedHostPort)
	failed := m.deleteLoadBalancer(args.FailedHostPort)
	lbToNewWorkers := make(map[string]*list.List)
	failedWorkersList := failed.WorkersList
	for e := failedWorkersList.Front(); e != nil; e = e.Next() {
		wo := e.Value.(*string)
		in := m.loadBalancers.Remove(m.loadBalancers.Front())
		lb := in.(*commproto.LoadBalancerInformation)
		lb.WorkersList.PushBack(wo)
		m.loadBalancers.PushBack(lb)
		m.updateWorker(*wo, lb.Hostport, lb.BuddyHostPort)
		if lbToNewWorkers[lb.Hostport] == nil {
			lbToNewWorkers[lb.Hostport] = list.New()
		}
		lbToNewWorkers[lb.Hostport].PushBack(wo)
	}
	m.redistributeWorkers(failed, lbToNewWorkers)

	//TODO
	// Distribute workers

	reply.NewBuddy = failed.BuddyHostPort
	return nil
}
Exemple #17
0
// RPC
func (tl *TranLayer) TransResponse(args *airlineproto.TranArgs, reply *airlineproto.TranReply) error {
	lsplog.Vlogf(5, args.TranID+": Reeceived trans respose %d", args.Vote)
	// send request to store handler
	var req *reqContent
	switch args.TranType {
	case transaction.TRANS_RESPONSE:
		req = &reqContent{TRANS_RESPONSE, "", args}
	case transaction.TRANS_OLD:
		req = &reqContent{TRANS_OLD, "", args}
	}

	switch req.reqType {
	case TRANS_RESPONSE:
		tl.reqChan <- &Request{req, nil}
		reply.Status = airlineproto.OK
	case TRANS_OLD:
		replyc := make(chan interface{})
		tl.reqChan <- &Request{req, replyc}
		decision := (<-replyc).(int)
		reply.Status = decision
	}

	// reply to rpc
	reply.Status = airlineproto.OK
	return nil
}
func (lb *LoadBalancer) buddyHeartbeat() {
	for {
		time.Sleep(2 * time.Second)
		lsplog.Vlogf(2, "[LoadBalancer] Heartbeat")
		//HeartBeat
		if lb.LbsRPC[lb.buddy] == nil {
			continue
		}
		err := lb.LbsRPC[lb.buddy].Call("LoadBalancerRPC.HeartBeat", nil, nil)
		if err != nil {
			lsplog.Vlogf(3, "dialing: %+v", err)
			lsplog.Vlogf(3, "Buddy failed")

			lb.informFailureAndGetNewBuddy()
		}
	}
}
func (m *Monitor) initializeDLB() {
	// Wait for all the load balancers and workers to register
	for i := 0; i < m.numberOfWorkers+m.numberOfLoadBalancers; i++ {
		<-m.init
	}
	lsplog.Vlogf(3, "[Monitor] Initialization is starting")

	// Assign buddys
	prevHostPort := ""
	for e := m.loadBalancers.Front(); e != nil; e = e.Next() {
		slb := e.Value.(*commproto.LoadBalancerInformation)
		slb.BuddyHostPort = prevHostPort
		lsplog.Vlogf(3, "[Monitor] HOst port: %s", slb.BuddyHostPort)
		prevHostPort = slb.Hostport
	}
	slb := m.loadBalancers.Front().Value.(*commproto.LoadBalancerInformation)
	slb.BuddyHostPort = prevHostPort

	// Print for debugging
	for e := m.loadBalancers.Front(); e != nil; e = e.Next() {
		prn := e.Value.(*commproto.LoadBalancerInformation)
		lsplog.Vlogf(3, "[Monitor] Host port: %s Buddy: %s", prn.Hostport, prn.BuddyHostPort)
	}

	// Assign workers to load balancers
	for e := m.workers.Front(); e != nil; e = e.Next() {
		for el := m.loadBalancers.Front(); el != nil; el = el.Next() {
			prn := el.Value.(*commproto.LoadBalancerInformation)
			lsplog.Vlogf(3, "[Monitor] Rotation: %s", prn.Hostport)
		}
		lsplog.Vlogf(3, "[Monitor] -----------")
		sw := e.Value.(*string)
		// Rotate the load balancers to assign worker to.
		in := m.loadBalancers.Remove(m.loadBalancers.Front())
		lb := in.(*commproto.LoadBalancerInformation)
		lb.WorkersList.PushBack(sw)
		m.loadBalancers.PushBack(lb)
	}

	// Print for debugging
	for e := m.loadBalancers.Front(); e != nil; e = e.Next() {
		sp := e.Value.(*commproto.LoadBalancerInformation)
		lsplog.Vlogf(1, "[Monitor] Load balancer: %s Buddy: %s", sp.Hostport, sp.BuddyHostPort)
		for el := sp.WorkersList.Front(); el != nil; el = el.Next() {
			lsplog.Vlogf(1, "[Monitor] Worker: %+v", el.Value)
			time.Sleep(2 * time.Second)
		}
	}

	// Finished registering
	for i := 0; i < m.numberOfWorkers+m.numberOfLoadBalancers; i++ {
		m.initDone <- 1
	}
	lsplog.Vlogf(3, "[Monitor] Initialization complete")
}
func (sw *Switch) RegisterLoadBalancers(args *commproto.RegisterArgs, reply *commproto.RegisterReply) error {
	lsplog.Vlogf(6, "[Switch] Received a registration request")
	sw.numLB = args.NumLB
	sw.activeLBs = make([]*lspnet.UDPAddr, sw.numLB, sw.numLB)
	for i := 0; i < sw.numLB; i++ {
		sw.activeLBs[i] = args.Addrs[i]
	}
	sw.registered <- true
	return nil
}
// Put
// function:
// - put key value into storage
func (ss *Storageserver) Put(args *storageproto.PutArgs, reply *storageproto.PutReply) error {
	req := &cacheReq{PUT_VALUE, args.Key, args.Value}
	replyc := make(chan interface{})
	ss.cacheReqC <- Request{req, replyc}
	<-replyc
	lsplog.Vlogf(5, "Put key successfully %s %s", args.Key, args.Value)
	reply.Status = storageproto.OK

	return nil
}
func main() {
	lsplog.SetVerbose(3)
	lsplog.Vlogf(3, "[Request] Args: %s", os.Args)
	hostport := os.Args[1]
	rep, _ := strconv.Atoi(os.Args[2])
	var e error
	lsplog.Vlogf(3, "[Client] Trying to connect to switch.")
	client, e = lsp12.NewLspClient(hostport, &lsp12.LspParams{5, 2000})
	if e != nil {
		lsplog.Vlogf(3, "[Client] Connection to switch failed.")
	}
	for i := 0; i < rep; i++ {
		r := random(1, 100)
		request := fmt.Sprintf("Request %f", r)
		client.Write([]byte(request))
		fmt.Printf("Sending request number %d: time to sleep %d \n", i, r)
		time.Sleep(time.Duration(100) * time.Millisecond)
	}
}
Exemple #23
0
func (ac *Acceptor) operate(p *Packet) *Msg {
	msg := p.PacketMsg
	proto, f := ac.proto[msg.Instance]
	if !f {
		//Create protocol info for new instance
		ap := &AcceptorProtocol{}
		ap.init()

		ac.proto[msg.Instance] = ap
		proto = ap
	}

	switch msg.MsgType {
	//Phase 1: Prepare
	case PREPARE:
		lsplog.Vlogf(6, "[Acceptor] PREPARE received for instance %d.", msg.Instance)
		if msg.Proposal > proto.proposal {
			proto.proposal = msg.Proposal
			return &Msg{PREPARE_OK, proto.proposalAccepted, proto.value, msg.Instance}
		} else {
			lsplog.Vlogf(6, "[Acceptor] Reject proposal number %d.", msg.Proposal)
			return &Msg{PREPARE_REJECT, proto.proposal, "", msg.Instance}
		}

	//Phase 2: Accept
	case ACCEPT:
		lsplog.Vlogf(6, "[Acceptor] ACCEPT received for instance %d.", msg.Instance)
		lsplog.Vlogf(6, "[Acceptor] n:%d n_h:%d n_a:%d", msg.Proposal, proto.proposal, proto.proposalAccepted)
		if msg.Proposal >= proto.proposal && msg.Proposal != proto.proposalAccepted {
			proto.proposal = msg.Proposal
			proto.proposalAccepted = msg.Proposal
			proto.value = msg.Value

			return &Msg{ACCEPT_OK, msg.Proposal, msg.Value, msg.Instance}
		} else if msg.Proposal < proto.proposal {
			//Seen a higher proposal number
			lsplog.Vlogf(6, "[Acceptor] Reject proposal number %d.", msg.Proposal)
			return &Msg{ACCEPT_REJECT, 0, "", 0}
		}
	}
	return nil
}
func (cli *LspClient) iWrite(payload []byte) error {
	// Will fill in ID & sequence number later
	m := GenDataMessage(0, 0, payload)
	cli.appWriteChan <- m
	rm := <-cli.writeReplyChan
	lsplog.Vlogf(5, "Completed write of %s", string(payload))
	if rm != nil {
		// Recycle so that subsequent writes will get error
		cli.writeReplyChan <- rm
	}
	return rm
}
func (con *UDPConn) ReadFromUDP(b []byte) (n int, addr *UDPAddr, err error) {
	var buffer [2000]byte
	ncon := con.ncon
	var naddr *net.UDPAddr
	done := false
	for !done {
		n, naddr, err = ncon.ReadFromUDP(buffer[0:])
		if dropit(readDropPercent) {
			lsplog.Vlogf(5, "UDP: DROPPING read packet of length %v\n", n)
		} else {
			lsplog.Vlogf(6, "UDP: Read packet of length %v\n", n)
			copy(b, buffer[0:])
			done = true
		}
		if naddr == nil {
			addr = nil
		} else {
			addr = &UDPAddr{naddr.IP, naddr.Port}
		}
	}
	return n, addr, err
}
Exemple #26
0
func (ts *Tribserver) doGetFlightsFromIds(flightids []string) ([]airlineproto.FlightInfo, error) {
	flightsInfo := make([]airlineproto.FlightInfo, len(flightids))
	for i, fid := range flightids {
		f, err := ts.lib_airline.GetFlight(fid)
		lsplog.Vlogf(6, "Flight id "+fid)
		if lsplog.CheckReport(3, err) {
			return nil, lsplog.MakeErr("Error fetching flight from flight id")
		}
		flightsInfo[i] = f
	}

	return flightsInfo, nil
}
// GetList
// function:
// - fetch stored list value with the key
// - register lease
func (ss *Storageserver) GetList(args *storageproto.GetArgs, reply *storageproto.GetListReply) error {
	if !ss.isKeyInRange(args.Key) {
		reply.Status = storageproto.EWRONGSERVER
		return nil
	}

	req := &cacheReq{GET_LIST_VALUE, args.Key, ""}
	replyc := make(chan interface{})
	ss.cacheReqC <- Request{req, replyc}
	valueList := <-replyc
	if valueList != nil {
		lsplog.Vlogf(5, "Return Value List %s", args.Key)
		reply.Status = storageproto.OK
		reply.Value = valueList.([]string)
		// lease
		if args.WantLease {
			req := &cacheReq{PUT_LEASE, args.Key, args.LeaseClient}
			replyc := make(chan interface{})
			ss.cacheReqC <- Request{req, replyc}
			status := (<-replyc).(bool)
			if status {
				lsplog.Vlogf(5, "Get Lease successfully %s", args.Key)
				reply.Lease.Granted = true
				reply.Lease.ValidSeconds = storageproto.LEASE_SECONDS
			} else {
				lsplog.Vlogf(5, "Get Lease failed %s", args.Key)
				reply.Lease.Granted = false
			}
		} else {
			reply.Lease.Granted = false
		}
	} else {
		lsplog.Vlogf(5, "Value not found %s", args.Key)
		reply.Status = storageproto.EITEMNOTFOUND
	}
	return nil
}
// Revoke lease from client and wait for response.
// As this blocks the call is made in  a new go routine.
func (ss *Storageserver) callCacheRPC(key, client string) {
	lsplog.Vlogf(4, "Revoke from:", client)
	// Connect to the client
	ss.connLock.Lock()
	cli, ok := ss.connMap[client]
	if cli == nil || !ok {
		newcli, err := rpc.DialHTTP("tcp", client)
		if err != nil {
			ss.connLock.Unlock()
			lsplog.Vlogf(5, "Could not connect to client %s, returning nil", client)
			return
		}

		// cache the connection
		ss.connMap[client] = newcli
		cli = newcli
	}
	ss.connLock.Unlock()

	// cache rpc
	args := &storageproto.RevokeLeaseArgs{key}
	var reply storageproto.RevokeLeaseReply
	lsplog.Vlogf(5, "@@call cache rpc")
	err := cli.Call("CacheRPC.RevokeLease", args, &reply)
	lsplog.Vlogf(5, "@@cache rpc return")
	if err != nil {
		lsplog.Vlogf(5, "RPC failed: %s\n", err)
		return
	}
	if reply.Status == storageproto.OK {
		request := &cacheReq{REVOKE_LEASE, key, client}
		replyc := make(chan interface{})
		ss.cacheReqC <- Request{request, replyc}
		<-replyc
	}
	return
}
func NewMonitor(port int, numlb int, numworkers int, switchHostPort string) *Monitor {
	lsplog.Vlogf(3, "[Monitor] Starting monitor")
	mon := new(Monitor)
	mon.numberOfLoadBalancers = numlb
	mon.numberOfWorkers = numworkers
	mon.switchHostPort = switchHostPort
	mon.loadBalancers = list.New()
	mon.workers = list.New()
	mon.init = make(chan int, numlb+numworkers)
	mon.initDone = make(chan int, numlb+numworkers)
	mon.informSwitch = make(chan int, numlb+numworkers)
	mon.registering = make(chan int, 1)
	mon.registering <- 1
	go mon.initializeDLB()
	go mon.startSwitch()
	return mon
}
Exemple #30
0
func (ts *Tribserver) ViewFlights(args *tribproto.ViewFlightsArgs, reply *tribproto.ViewFlightsReply) error {
	lsplog.Vlogf(5, "Get flights:"+args.From+" to:"+args.To+" date:"+args.DepartureDate)

	flightids, err := ts.lib_airline.GetAllFlights(args.From, args.To, args.DepartureDate)
	if lsplog.CheckReport(3, err) {
		return lsplog.MakeErr("Error fetching flight ids")
	}

	flightsInfo, ferr := ts.doGetFlightsFromIds(flightids)
	if lsplog.CheckReport(3, ferr) {
		return ferr
	}

	reply.Flights = flightsInfo
	reply.Status = tribproto.OK
	return nil
}