func (pc *pacClient) ReconnectToLB() error { fmt.Println("reconnect called") args := &loadbalancerrpc.RouteArgs{Attempt: loadbalancerrpc.RETRY, HostPort: pc.serverHostPort} reply := new(loadbalancerrpc.RouteReply) pc.loadBalancer.Call("LoadBalancer.RouteToServer", args, &reply) if reply.Status == loadbalancerrpc.MOSTFAIL { fmt.Println("SERVER ALL FAILED") return errors.New("reconnect fail, most servers dead") } serverConn, err := rpc.DialHTTP("tcp", reply.HostPort) for err != nil { fmt.Println("trying to get new server") args := &loadbalancerrpc.RouteArgs{Attempt: loadbalancerrpc.RETRY, HostPort: reply.HostPort} pc.loadBalancer.Call("LoadBalancer.RouteToServer", args, &reply) if reply.Status == loadbalancerrpc.MOSTFAIL { fmt.Println("SERVER ALL FAILED") return errors.New("reconnect fail, most servers dead") } serverConn, err = rpc.DialHTTP("tcp", reply.HostPort) } pc.serverHostPort = reply.HostPort pc.serverConn = serverConn return nil }
func (pn *paxosNode) AcceptByNode(nodeID int, hostPort string, args *paxosrpc.ProposeArgs, retChan chan int) { pn.nodeMutex.Lock() client, ok := pn.nodeClientMap[nodeID] var err error if !ok { client, err = rpc.DialHTTP("tcp", pn.hostMap[nodeID]) for i := 0; i < 5 && err != nil; i++ { client, err = rpc.DialHTTP("tcp", pn.hostMap[nodeID]) } if err == nil { pn.nodeClientMap[nodeID] = client } else { pn.nodeMutex.Unlock() retChan <- 0 return } } pn.nodeMutex.Unlock() acceptArgs := paxosrpc.AcceptArgs{ Key: args.Key, N: args.N, V: args.V, } acceptReply := paxosrpc.AcceptReply{} // client.Call("PaxosNode.RecvAccept", acceptArgs, &acceptReply) acceptcall := client.Go("PaxosNode.RecvAccept", acceptArgs, &acceptReply, nil) // if err != nil { // retChan <- 0 // return // } // switch acceptReply.Status { // case paxosrpc.OK: // // fmt.Println("ok accept", nodeID) // retChan <- 1 // case paxosrpc.Reject: // // fmt.Println("reject accept", nodeID) // retChan <- 0 // } select { case <-acceptcall.Done: if acceptcall.Error != nil { retChan <- 0 return } switch acceptReply.Status { case paxosrpc.OK: // fmt.Println("accept by node ok accept", nodeID) retChan <- 1 case paxosrpc.Reject: // fmt.Println("accept by node reject accept", nodeID) retChan <- 0 } case <-time.After(time.Duration(10) * time.Millisecond): // fmt.Println("accept by node timeout", nodeID) retChan <- 0 return } }
// Connect to the daemon for RPC communication. // Starts the daemon if he's not yet running. func dialDaemon() *rpc.Client { // try to call the daemon client, err := rpc.DialHTTP("tcp", "localhost"+Port) // if daemon does not seem to be running, start him. if SpawnDaemon { const SLEEP = 10e6 // nanoseconds if err != nil { forkDaemon() time.Sleep(SLEEP) } // try again to call the daemon, // give him some time to come up. trials := 0 for err != nil && trials < 10 { client, err = rpc.DialHTTP("tcp", "localhost"+Port) time.Sleep(SLEEP) trials++ } } if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } return client }
// Connects to the rpc server. func (self *Client) Connect() (err error) { if len(self.socket) > 0 && self.socket[0] != '/' { self.Client, err = rpc.DialHTTP("tcp", self.socket) } else { self.Client, err = rpc.DialHTTP("unix", self.socket) } return }
func connect(addr string) *rpc.Client { client, err := rpc.DialHTTP("tcp", addr) for err != nil { logrus.Error(err.Error()) time.Sleep(1 * time.Second) client, err = rpc.DialHTTP("tcp", addr) } return client }
func NewPacClient(loadHostPort string, port int, ID string) (PacClient, error) { pac := new(pacClient) pac.loadHostPort = loadHostPort pac.ID = ID pac.logs = make(map[int]string) cli, err := rpc.DialHTTP("tcp", loadHostPort) if err != nil { return nil, err } pac.loadBalancer = cli args := &loadbalancerrpc.RouteArgs{Attempt: loadbalancerrpc.INIT, HostPort: ""} var reply loadbalancerrpc.RouteReply cli.Call("LoadBalancer.RouteToServer", args, &reply) for reply.Status == loadbalancerrpc.NotReady { fmt.Println("retrying to connect") time.Sleep(1000 * time.Millisecond) err = cli.Call("LoadBalancer.RouteToServer", args, &reply) } if reply.Status == loadbalancerrpc.MOSTFAIL { return nil, err } //connect to server cli2, err := rpc.DialHTTP("tcp", reply.HostPort) pac.serverConn = cli2 pac.serverHostPort = reply.HostPort if err != nil { err1 := pac.ReconnectToLB() /*for err != nil { fmt.Println("trying to get new server") args.HostPort = reply.HostPort args.Attempt = loadbalancerrpc.RETRY cli.Call("LoadBalancer.RouteToServer", args, &reply) time.Sleep(time.Second) if reply.Status != loadbalancerrpc.OK { if reply.Status == loadbalancerrpc.MOSTFAIL { return nil, errors.New("most servers failed") } } cli2, err = rpc.DialHTTP("tcp", reply.HostPort) }*/ if err1 != nil { fmt.Println("SERVER ALL FAILED") return nil, errors.New("reconnect fail, most servers dead") } } fmt.Println("Server connected", reply.HostPort) pac.GetLogs() go pac.RefreshTimer() return pac, nil }
func NewLoadBalancer(port int, monitorhostport string) *LoadBalancer { lb := new(LoadBalancer) lsplog.Vlogf(1, "[NewLoadBalancer] Dialing master... %v", monitorhostport) // connexion to monitor // server to communicate with workers lb.server, _ = lsp12.NewLspServer(port+10, &lsp12.LspParams{5, 2000}) lb.monitor, _ = rpc.DialHTTP("tcp", monitorhostport) name, _ := os.Hostname() addrs, _ := net.LookupHost(name) lb.myAddress = addrs[0] args := &commproto.RegisterLoadBalancerArgs{fmt.Sprintf("%s:%v", addrs[0], port)} var reply commproto.RegisterLoadBalancerReply lb.monitor.Call("MonitorRPC.RegisterLoadBalancer", args, &reply) lsplog.Vlogf(1, "[NewLoadBalancer] Completed registration %s %+v", reply.Buddy, reply.LoadBalancersHostPort) lb.buddy = reply.Buddy lb.loadBalancers = reply.LoadBalancersHostPort // get workers lb.workers = reply.Workers lb.numberOfWorkers = len(lb.workers) lsplog.Vlogf(2, "[LoadBalancer] Establishing conn to workers") // establish RPC connexion load balancers lb.workersRPC = make(map[string]*rpc.Client) for i := 0; i < len(lb.workers); i++ { lb.workersRPC[lb.workers[i]], _ = rpc.DialHTTP("tcp", lb.workers[i]) } lsplog.Vlogf(2, "[LoadBalancer] Establishing conn to LB") // establish RPC connexion with workers //numLbs := len(lb.loadBalancers) lb.LbsRPC = make(map[string]*rpc.Client) //for i := 0; i < numLbs; i ++ { //lb.LbsRPC[lb.loadBalancers[i]], _ = rpc.DialHTTP("tcp", lb.loadBalancers[i]) //} lsplog.Vlogf(2, "[LoadBalancer] Established connections") lb.clientDict = make(map[uint16]*commproto.ClientS) lb.replicatedInformation = make(map[string]int) //lsplog.Vlogf(1, "[LoadBalancer] Received buddy: %s loadBalancers: %s", reply.Buddy, lb.loadBalancers) // connexion to switch addr, errResolve := lspnet.ResolveUDPAddr("udp", fmt.Sprintf(":%d", port)) if errResolve != nil { return nil } connexion, errDial := lspnet.ListenUDP("udp", addr) if errDial != nil { return nil } lb.connSwitch = connexion lsplog.Vlogf(1, "[LoadBalancer] Received buddy: %s loadBalancers: %s", reply.Buddy, lb.loadBalancers) go lb.runLoadBalancer() go lb.buddyHeartbeat() return lb }
func (ss *Storageserver) getConnection(clientCallback string) *rpc.Client { ss.callbackLocker.Lock() defer ss.callbackLocker.Unlock() cli := ss.callbackConnections[clientCallback] var e error if cli == nil { cli, e = rpc.DialHTTP("tcp", clientCallback) for e != nil { cli, e = rpc.DialHTTP("tcp", clientCallback) } ss.callbackConnections[clientCallback] = cli } return cli }
func (ls *libstore) findConnection(serverHostPort string) *rpc.Client { client, exist := ls.connectionMap[serverHostPort] if exist == true { return client } else { client, err := rpc.DialHTTP("tcp", serverHostPort) for err != nil { client, err = rpc.DialHTTP("tcp", serverHostPort) } ls.connectionMap[serverHostPort] = client return client } return nil }
//Join the group by finding successor and getting all the required data from it func (self *Ring) JoinGroup(address string) (err error) { client, err := rpc.DialHTTP("tcp", address) if err != nil { log.Fatal("dialing:", err) } //Get Successor hostPort := net.JoinHostPort(self.Address, self.Port) hashedKey := data.Hasher(hostPort + time.Now().String()) // TODO this is a hack successor := self.callForSuccessor(hashedKey, address) argi := data.NewLocationStore(hashedKey, hostPort) client, err = rpc.DialHTTP("tcp", successor.Address) if err != nil { log.Fatal("dialing:", err) } fmt.Println(successor) //Get smallest key less then key and initiate data transfer var data_t []*data.DataStore err = client.Call("Ring.GetEntryData", argi, &data_t) //TODO:: Iterate throught array and add items like below except all at once as shown. Straightforward. length := len(data_t) for i := 0; i < length; i++ { //Insert Key into my table self.KeyValTable.Insert(*(data_t[i])) //Insert Value of Key as my Id newMember := data.NewGroupMember(data_t[i].Key, hostPort, 0, Joining) self.updateMember(newMember) //Start Gossiping if self.isGossiping == false { go self.Gossip() } } if self.isGossiping == false { go self.Gossip() fmt.Println("Am i done") } //Make hashed key my id finalMember := data.NewGroupMember(hashedKey, hostPort, 0, Stable) self.updateMember(finalMember) return }
// NewLibstore creates a new instance of a TribServer's libstore. masterServerHostPort // is the master storage server's host:port. myHostPort is this Libstore's host:port // (i.e. the callback address that the storage servers should use to send back // notifications when leases are revoked). // // The mode argument is a debugging flag that determines how the Libstore should // request/handle leases. If mode is Never, then the Libstore should never request // leases from the storage server (i.e. the GetArgs.WantLease field should always // be set to false). If mode is Always, then the Libstore should always request // leases from the storage server (i.e. the GetArgs.WantLease field should always // be set to true). If mode is Normal, then the Libstore should make its own // decisions on whether or not a lease should be requested from the storage server, // based on the requirements specified in the project PDF handout. Note that the // value of the mode flag may also determine whether or not the Libstore should // register to receive RPCs from the storage servers. // // To register the Libstore to receive RPCs from the storage servers, the following // line of code should suffice: // // rpc.RegisterName("LeaseCallbacks", librpc.Wrap(libstore)) // // Note that unlike in the NewTribServer and NewStorageServer functions, there is no // need to create a brand new HTTP handler to serve the requests (the Libstore may // simply reuse the TribServer's HTTP handler since the two run in the same process). func NewLibstore(masterServerHostPort, myHostPort string, mode LeaseMode) (Libstore, error) { master_server, err := rpc.DialHTTP("tcp", masterServerHostPort) if err != nil { return nil, errors.New("Cannot connect to the master server") } // Call GetServers to get storage servers' information var args storagerpc.GetServersArgs var reply storagerpc.GetServersReply master_server.Call("StorageServer.GetServers", args, &reply) if reply.Status == storagerpc.NotReady { for i := 0; i < 5; i++ { time.Sleep(1 * time.Second) master_server.Call("StorageServer.GetServers", args, &reply) if reply.Status == storagerpc.OK { break } } } master_server.Close() if reply.Status == storagerpc.NotReady { return nil, errors.New("Storage Server is not ready yet") } // Register RPC connection for each storage server ls := &libstore{} // Sort the servers by NodeID sort.Sort(SortNodeByNodeID(reply.Servers)) ls.servers = reply.Servers ls.rpc_connection = make([]*rpc.Client, len(ls.servers)) ls.host_port = myHostPort ls.lease_mode = mode ls.query_record = make(map[string]*list.List) ls.value_cache = make(map[string]*ValueCacheElement) ls.list_cache = make(map[string]*ListCacheElement) ls.query_record_locker = new(sync.Mutex) ls.value_cache_locker = new(sync.Mutex) ls.list_cache_locker = new(sync.Mutex) go ls.CacheCleaner() err = rpc.RegisterName("LeaseCallbacks", librpc.Wrap(ls)) if err != nil { return nil, errors.New("Could not register Libstore") } for i, server := range ls.servers { ls.rpc_connection[i], _ = rpc.DialHTTP("tcp", server.HostPort) } return ls, nil }
// NewLibstore creates a new instance of a TribServer's libstore. masterServerHostPort // is the master storage server's host:port. myHostPort is this Libstore's host:port // (i.e. the callback address that the storage servers should use to send back // notifications when leases are revoked). // // The mode argument is a debugging flag that determines how the Libstore should // request/handle leases. If mode is Never, then the Libstore should never request // leases from the storage server (i.e. the GetArgs.WantLease field should always // be set to false). If mode is Always, then the Libstore should always request // leases from the storage server (i.e. the GetArgs.WantLease field should always // be set to true). If mode is Normal, then the Libstore should make its own // decisions on whether or not a lease should be requested from the storage server, // based on the requirements specified in the project PDF handout. Note that the // value of the mode flag may also determine whether or not the Libstore should // register to receive RPCs from the storage servers. // // To register the Libstore to receive RPCs from the storage servers, the following // line of code should suffice: // // rpc.RegisterName("LeaseCallbacks", librpc.Wrap(libstore)) // // Note that unlike in the NewTribServer and NewStorageServer functions, there is no // need to create a brand new HTTP handler to serve the requests (the Libstore may // simply reuse the TribServer's HTTP handler since the two run in the same process). func NewLibstore(masterServerHostPort, myHostPort string, mode LeaseMode) (Libstore, error) { ls := &libstore{ myHostPort: myHostPort, mode: mode, storageServers: make(map[uint32]*storagerpc.Node), cache: newCache(), storageRPCHandler: make(map[uint32]*rpc.Client), accessInfoHub: newAccessInfoHub(), } // connect to the master server and get the server list master, err := rpc.DialHTTP("tcp", masterServerHostPort) if err != nil { return nil, err } var args storagerpc.GetServersArgs var reply storagerpc.GetServersReply ok := false for i := 0; i < maximumTrials; i++ { err = master.Call("StorageServer.GetServers", &args, &reply) if reply.Status == storagerpc.OK { ok = true break } time.Sleep(time.Second) } if !ok { return nil, errors.New("Cannot get servers after " + strconv.Itoa(maximumTrials) + " trials") } // adding the server list for _, s := range reply.Servers { ls.storageServers[s.NodeID] = &s ls.storageRPCHandler[s.NodeID], err = rpc.DialHTTP("tcp", s.HostPort) if err != nil { return nil, err } } // register the callback rpc.RegisterName("LeaseCallbacks", librpc.Wrap(ls)) go ls.gc() return ls, nil }
func SendStore(k *Kademlia, key ID, value []byte, nodeID ID) error { c, ok := LookupContact(k, nodeID) if !ok { return errors.New("node not found") } address := c.Address() client, err := rpc.DialHTTP("tcp", address) if err != nil { k.removeContact(c.NodeID) return nil } msgID := NewRandomID() req := StoreRequest{k.Self, msgID, key, value} var res StoreResult err = client.Call("Kademlia.Store", req, &res) if err != nil { return err } defer client.Close() return res.Err }
// Create a new AgentClient. func NewLBClient(addr string) (s *LBClient, err error) { s = new(LBClient) s.addr = addr rpcClient, err := rpc.DialHTTP("tcp", s.addr) s.rpcClient = rpcClient return s, err }
// NewStorageServer creates and starts a new StorageServer. masterServerHostPort // is the master storage server's host:port address. If empty, then this server // is the master; otherwise, this server is a slave. numNodes is the total number of // servers in the ring. port is the port number that this server should listen on. // nodeID is a random, unsigned 32-bit ID identifying this server. // // This function should return only once all storage servers have joined the ring, // and should return a non-nil error if the storage server could not be started. func NewStorageServer(masterServerHostPort string, numNodes, port int, nodeID uint32) (StorageServer, error) { // Set upt this server's info serverInfo := storagerpc.Node{HostPort: fmt.Sprintf("localhost:%d", port), NodeID: nodeID} var ss storageServer if masterServerHostPort == "" { // If this is the master server, set up a list of servers var servers = make([]storagerpc.Node, numNodes) servers[0] = serverInfo // Create the master server ss = storageServer{topMap: make(map[string]interface{}), nodeID: nodeID, servers: servers, count: 1, countLock: sync.Mutex{}, keyLocks: make(map[string]chan int)} } else { // Try to connect to the master at most five times args := storagerpc.RegisterArgs{ServerInfo: serverInfo} var reply storagerpc.RegisterReply var err error var master *rpc.Client for try := 1; try <= 5; try++ { master, err = rpc.DialHTTP("tcp", masterServerHostPort) if err == nil { break } if try == 5 { return nil, err } time.Sleep(time.Millisecond * 20) } for i := 1; i <= 5; i++ { master.Call("StorageServer.RegisterServer", args, &reply) if reply.Status == storagerpc.OK { // All servers are connected, create this slave server ss = storageServer{topMap: make(map[string]interface{}), nodeID: nodeID, servers: reply.Servers, count: numNodes, countLock: sync.Mutex{}, keyLocks: make(map[string]chan int)} break } // Wait one second, try to connect to master again if i == 5 { return nil, errors.New("couldn't connect to master") } time.Sleep(time.Millisecond * 20) } } // Start listening for connections from other storageServers and libstores rpc.RegisterName("StorageServer", &ss) rpc.HandleHTTP() l, e := net.Listen("tcp", serverInfo.HostPort) if e != nil { return nil, errors.New("Storage server couldn't start listening") } go http.Serve(l, nil) return &ss, nil }
/** * Proxy validates the requests going into a PaxosNode and the responses coming out of it. * It logs errors that occurs during a test. */ func NewProxy(nodePort, myPort int) (Proxy, error) { p := new(proxy) p.prop = new(proposal) p.prop.status = UNSET p.prop.num = 0 p.prop.key = "" p.prop.val = 0 p.err = make([]string, 0) // Start server l, err := net.Listen("tcp", fmt.Sprintf(":%d", myPort)) if err != nil { LOGE.Println("Failed to listen:", err) return nil, err } // Create RPC connection to paxos node. srv, err := rpc.DialHTTP("tcp", fmt.Sprintf("localhost:%d", nodePort)) if err != nil { LOGE.Println("Failed to dial node %d", nodePort) return nil, err } p.srv = srv // register RPC rpc.RegisterName("PaxosNode", paxosrpc.Wrap(p)) rpc.HandleHTTP() go http.Serve(l, nil) // log.Printf("Proxy started") return p, nil }
func main() { flag.Parse() var err error master, err = rpc.DialHTTP("tcp", *masterHostPort) if err != nil { log.Fatalln("Failed to connect to the master server") } tests := []testFunc{ {"testPaxosBasic1", testPaxosBasic1}, {"testPaxosBasic2", testPaxosBasic2}, {"testPaxosBasic3", testPaxosBasic3}, {"testPaxosDuelingLeaders", testPaxosDuelingLeaders}, } if *testType == "dead" { *numNodes-- } else if *testType == "replace" { tests = []testFunc{ {"testPaxosReplaceNode", testPaxosReplaceNode}, } } // Run tests. rand.Seed(time.Now().Unix()) myHostPort = "localhost:" + strconv.Itoa(10000+(rand.Int()%10000)) t = tester{make(chan string, 1000), myHostPort, nil, 0} go t.acceptConnections() for _, t := range tests { if b, err := regexp.MatchString(*testRegex, t.name); b && err == nil { log.Printf("Running %s:\n", t.name) t.f() time.Sleep(time.Millisecond * 100) } } log.Printf("Passed (%d/%d) tests\n", passCount, passCount+failCount) }
func main() { flag.Usage = printUsage flag.Parse() if flag.NArg() != 1 { printUsage() os.Exit(2) } clientName := fmt.Sprintf("%s:%d", *subHostname, *subPortNum) client, err := rpc.DialHTTP("tcp", clientName) if err != nil { fmt.Fprintf(os.Stderr, "Error dialing\t%s\n", err) os.Exit(1) } for _, subcommand := range subcommands { if flag.Arg(0) == subcommand.command { if flag.NArg()-1 != subcommand.numArgs { printUsage() os.Exit(2) } subcommand.cmdFunc(client, flag.Args()[1:]) os.Exit(3) } } printUsage() os.Exit(2) }
// Current node is going to connect to remote http server (@host, @port) func connect(host string, port string) *rpc.Client { client, err := rpc.DialHTTP("tcp", host+":"+port) if err != nil { log.Fatal("dialing:", err) } return client }
func (c *Client) rpc() *rpc.Client { client, err := rpc.DialHTTP("tcp", c.url) if err != nil { FatalCli("Error connecting: %s", err) } return client }
func SendFindValue(k *Kademlia, key ID, nodeID ID) (ret *FindValueResult, err error) { contact, _ := LookupContact(k, nodeID) client, err := rpc.DialHTTP("tcp", contact.Address()) if err != nil { k.removeContact(nodeID) return } req := new(FindValueRequest) req.MsgID = NewRandomID() req.Sender = k.Self req.Key = key err = client.Call("Kademlia.FindValue", req, &ret) if err != nil { k.removeContact(nodeID) return } defer client.Close() if !ret.MsgID.Equals(ret.MsgID) { err = errors.New("FindValue MsgID didn't match SendFindValue MsgID") } if ret.Value != nil && CorrectHash(key[:], ret.Value) { err = errors.New("Bad hash") } return }
func main() { client, err := rpc.DialHTTP("tcp", "127.0.0.1:1234") if err != nil { log.Fatal("dialing:", err) } var str = "hello rpc" var reply string err = client.Call("Echo.Hi", str, &reply) if err != nil { log.Fatal("arith error:", err) } fmt.Printf("Echo: %s\n", reply) var args = &Args{7, 8} var sum int err = client.Call("Sum.Sum", args, &sum) if err != nil { log.Fatal("sum error:", err) } fmt.Printf("Sum: %d + %d = %d\n", args.A, args.B, sum) }
func iNewLibstore(server, myhostport string, flags int) (*Libstore, error) { ls := new(Libstore) lsplog.Vlogf(3, "[iNewLibstore] Libstore flag %d", flags) ls.cache = make(map[string]*Leased) ls.cacheLocker = new(sync.Mutex) ls.cacheCallbackHostPort = myhostport if flags == NONE { ls.requests = make(map[string]*Request) ls.requestsLocker = new(sync.Mutex) } if myhostport != "" { go ls.revokeLeasesListenerInit(myhostport) go ls.cleanCache() } ls.flags = flags masterConnection, e := rpc.DialHTTP("tcp", server) if e != nil { lsplog.Vlogf(3, "[iNewLibstore] Error while connecting master node") return nil, e } ls.connections = make(map[string]*rpc.Client) ls.connections[server] = masterConnection e = ls.getServers(masterConnection) if e != nil { return nil, e } return ls, nil }
func main() { client, err := rpc.DialHTTP("tcp", ":8080") if err != nil { log.Fatal("dialing:", err) } defer client.Close() var metrics messages.MetricList err = client.Call("MetricsServer.ListMetrics", "", &metrics) if err != nil { log.Fatal("Calling:", err) } printAsJson("All metrics", metrics) err = client.Call("MetricsServer.ListMetrics", "/aaa/bbb", &metrics) if err != nil { log.Fatal("Calling:", err) } printAsJson("aaa/bbb metrics", metrics) var single messages.Metric err = client.Call("MetricsServer.GetMetric", "/proc/foo/bar/baz", &single) if err != nil { log.Fatal("Calling:", err) } printAsJson("/proc/foo/bar/baz metric", single) err = client.Call("MetricsServer.GetMetric", "/proc/foo/ddd", &single) if err != nil { log.Println("Got error for /proc/foo/ddd:", err) } else { printAsJson("/proc/foo/ddd metric", single) } time.Sleep(5 * time.Second) }
// StartHTTP start listen http. func StartHTTP() { // external httpServeMux := http.NewServeMux() // 1.0 httpServeMux.HandleFunc("/public/send", SendPublicMsg) httpServeMux.HandleFunc("/sub/send", SendSubMsg) httpServeMux.HandleFunc("/get/connectserver", GetAvalConnSrv) httpServeMux.HandleFunc("/get/offlinemsg", GetOfflineMsgs) for _, bind := range Conf.HttpBind { fmt.Printf("start http listen addr:\"%s\"", bind) go httpListen(httpServeMux, bind) } //开启send srv rpc client, err := rpc.DialHTTP("tcp", Conf.Ms) if err != nil { panic(err.Error()) fmt.Printf("web start rpc failed, connect %s failed\n", Conf.Ms) return } msClient = client conn, err := redis.Dial("tcp", Conf.Redis) if err != nil { panic(err.Error()) } redClient = conn zkConn = common.ZkConnect(Conf.ZooKeeper) }
func (lc *Libconn) GetServerWithAddress(entityId string) (*rpc.Client, string, error) { // Get entity info args := &storageproto.GetArgs{entityId, false, ""} var reply storageproto.GetListReply err := lc.conn.Call("StorageRPC.GetList", args, &reply) if err != nil { log.Printf("[%s:%s] Cannot connect to master storage\n", lc.entityid, lc.myhostport) return nil, "", lsplog.MakeErr("Connect master storage failed") } if reply.Status != storageproto.OK { log.Printf("[%s:%s] Cannot find address for: %s\n", lc.entityid, lc.myhostport, entityId) return nil, "", lsplog.MakeErr("Get agency info failed") } if len(reply.Value) == 0 { return nil, "", lsplog.MakeErr("GetServer: Empty list from master for airline:" + entityId) } // Create RPC connection to airline server pos := 0 if lc.prefer == 0 { pos = int(time.Now().UnixNano()) % len(reply.Value) } else { pos = lc.prefer % len(reply.Value) } server := reply.Value[pos] cli, err := rpc.DialHTTP("tcp", server) if err != nil { log.Printf("[%s:%s] Cannot connect to: %s\n", lc.entityid, lc.myhostport, entityId) return nil, "", err } return cli, server, nil }
//Broadcast message to all replicas func (lp *Libpaxos) broadcast(PacketMsg *Msg) { if PacketMsg != nil { //lsplog.Vlogf(6, "[Libpaxos] Broadcast type: %d", PacketMsg.MsgType) p := Packet{} p.PacketFrom = lp.self p.PacketMsg = *PacketMsg var reply Reply for _, r := range lp.replicas { // if r == lp.self { // continue // } //lsplog.Vlogf(6, "[Libpaxos] Broadcast: %s", r) client, err := rpc.DialHTTP("tcp", r) if lsplog.CheckReport(6, err) { lsplog.Vlogf(6, "[Libpaxos] Broadcast to %s failed", r) //client.Close() continue } err = client.Call("Libpaxos.ReceiveMessage", p, &reply) if lsplog.CheckReport(1, err) { lsplog.Vlogf(6, "[Libpaxos] Broadcast call to %s failed", r) } client.Close() } } }
func getRpcClient(c *cli.Context) *rpc.Client { client, err := rpc.DialHTTP("tcp", fmt.Sprintf("%s:%d", c.GlobalString("host"), c.GlobalInt("port"))) if err != nil { log.Fatal("dialing:", err) } return client }
func main() { if len(os.Args) != 2 { fmt.Println("Usage: ", os.Args[0], "server") os.Exit(1) } serverAddress := os.Args[1] client, err := rpc.DialHTTP("tcp", serverAddress+":1234") if err != nil { log.Fatal("dialing", err) } // Synchronous call args := rpc_common.Args{17, 8} var reply int err = client.Call("Arith.Multiply", args, &reply) if err != nil { log.Fatal("arith error:", err) } fmt.Printf("Arith: %d*%d=%d\n", args.A, args.B, reply) var quot rpc_common.Quotient err = client.Call("Arith.Divide", args, ") if err != nil { log.Fatal("arith error:", err) } fmt.Printf("Arith: %d/%d=%d reminder %d\n", args.A, args.B, quot.Quo, quot.Rem) }
func TestRPCTerminate(t *testing.T) { c, err := rpc.DialHTTP("tcp", ":9027") if err != nil { t.Fatal(err) } dir, err := ioutil.TempDir(os.TempDir(), "etcd-agent") if err != nil { t.Fatal(err) } var pid int err = c.Call("Agent.RPCStart", []string{"--data-dir", dir}, &pid) if err != nil { t.Fatal(err) } err = c.Call("Agent.RPCTerminate", struct{}{}, nil) if err != nil { t.Fatal(err) } if _, err := os.Stat(dir); !os.IsNotExist(err) { t.Fatal(err) } }