// This serves a single RPC connection on the given RPC server on // a random port. func serve(server *rpc.Server) (err error) { if os.Getenv(MagicCookieKey) != MagicCookieValue { return errors.New("Please do not execute plugins directly. Packer will execute these for you.") } // If there is no explicit number of Go threads to use, then set it if os.Getenv("GOMAXPROCS") == "" { runtime.GOMAXPROCS(runtime.NumCPU()) } minPort, err := strconv.ParseInt(os.Getenv("PACKER_PLUGIN_MIN_PORT"), 10, 32) if err != nil { return } maxPort, err := strconv.ParseInt(os.Getenv("PACKER_PLUGIN_MAX_PORT"), 10, 32) if err != nil { return } log.Printf("Plugin minimum port: %d\n", minPort) log.Printf("Plugin maximum port: %d\n", maxPort) // Set the RPC port range packrpc.PortRange(int(minPort), int(maxPort)) var address string var listener net.Listener for port := minPort; port <= maxPort; port++ { address = fmt.Sprintf("127.0.0.1:%d", port) listener, err = net.Listen("tcp", address) if err != nil { err = nil continue } break } defer listener.Close() // Output the address to stdout log.Printf("Plugin address: %s\n", address) fmt.Println(address) os.Stdout.Sync() // Accept a connection log.Println("Waiting for connection...") conn, err := listener.Accept() if err != nil { log.Printf("Error accepting connection: %s\n", err.Error()) return } // Serve a single connection log.Println("Serving a plugin connection...") server.ServeConn(conn) return }
func (c *ConsumerServer) RegistFunctions(server *rpc.Server) error { err := server.Register(new(Call)) if err != nil { log.Printf("Consumer regist object failed! err:%s \n", err) return err } return nil }
func ConnectServer(addr string, s *rpc.Server) error { if conn, err := net.Dial("tcp", addr); err == nil { s.ServeCodec(NewServerCodec(conn)) } else { return err } return nil }
func RegisterServices(server *rpc.Server) { for _, s := range services { //TODO : the service type is as of now int, need to find out a // way how to get the type of an object server.Register(s) } }
// Create a new DryMartini object with its own kademlia and RPC server func NewDryMartini(listenStr string, keylen int) *DryMartini { var err error var s *rpc.Server var dm *DryMartini dm = new(DryMartini) dm.EasyNewFlowIndex = 0 //Initialize key pair dm.KeyPair, err = rsa.GenerateKey(rand.Reader, keylen) if err != nil { dbg.Printf("Failed to generate key! %s", true, err) panic(1) } //Initialize flow struct dm.Bartender = make(map[UUID]MartiniPick) dm.Momento = make(map[UUID][]FlowIDSymmKeyPair) dm.MapFlowIndexToFlowID = make(map[int]FlowInfo) var host net.IP var port uint16 host, port, err = kademlia.AddrStrToHostPort(listenStr) //Initialize our Kademlia //portStr := strconv.FormatUint(uint64(port), 10) //var rpcPathStr string = kademlia.RpcPath+portStr var rpcPathStr = "junk" dbg.Printf("making new Kademlia with listenStr:%s, rpcPath\n", Verbose, listenStr, rpcPathStr) dm.KademliaInst, s = kademlia.NewKademlia(listenStr, &rpcPathStr) kademlia.BucketsAsArray(dm.KademliaInst) //myMartiniContact <- ip, port, public key dm.myMartiniContact.NodeIP = host.String() dm.myMartiniContact.NodePort = port dm.myMartiniContact.PubKey = dm.KeyPair.PublicKey.N.String() dm.myMartiniContact.PubExp = dm.KeyPair.PublicKey.E dbg.Printf("NewDryMartini: making new Kademlia with NodeIP: %s. NodePort:%d\n", Verbose, dm.myMartiniContact.NodeIP, dm.myMartiniContact.NodePort) /* if Verbose { dbg.Printf("NodeIP: %s\n", dm.myMartiniContact.NodeIP) dbg.Printf("NodePort: %d\n", dm.myMartiniContact.NodePort) dbg.Printf("PubKey: %s\n", dm.myMartiniContact.PubKey) dbg.Printf("PubExp: %d\n", dm.myMartiniContact.PubKey) }*/ //register err = s.Register(dm) if err != nil { dbg.Printf("Failed to register Drymartini! %s", true, err) panic(1) } return dm }
func Serve(s *rpc.Server, l net.Listener) { for { conn, err := l.Accept() if err != nil { log.Fatal("rpc.Serve: accept:", err.Error()) } go s.ServeCodec(NewServerCodec(conn)) } }
// registerComponent registers a single Packer RPC component onto // the RPC server. If id is true, then a unique ID number will be appended // onto the end of the endpoint. // // The endpoint name is returned. func registerComponent(server *rpc.Server, name string, rcvr interface{}, id bool) string { endpoint := name if id { fmt.Sprintf("%s.%d", endpoint, atomic.AddUint64(&endpointId, 1)) } server.RegisterName(endpoint, rcvr) return endpoint }
func waitForConnExit(c net.Conn, server *rpc.Server) (ret chan bool) { ret = make(chan bool) go func() { tcpConn := c.(*net.TCPConn) tcpConn.SetKeepAlive(true) server.ServeConn(c) ret <- true }() return ret }
func ServeRedis(l net.Listener, s *rpc.Server) error { for { conn, err := l.Accept() if err != nil { return err } codec := NewRedisServerCodec(conn) go s.ServeCodec(codec) } }
func ServeMsgpack(l net.Listener, s *rpc.Server) error { for { conn, err := l.Accept() if err != nil { return err } codec := codec.MsgpackSpecRpc.ServerCodec(conn, &msgpackHandle) go s.ServeCodec(codec) } }
func DispatchForever(connch <-chan net.Conn, srv *rpc.Server, clientch chan<- *rpc.Client) { for conn := range connch { muxed, err := muxconn.Split(conn, 2) if err != nil { log.Println("birpc: Failed to mux incoming connection from", conn.RemoteAddr().String(), "to", conn.LocalAddr().String(), ", dropping") continue } // Server on first muxed conn, client on second go srv.ServeConn(muxed[0]) clientch <- rpc.NewClient(muxed[1]) } }
// NewJSONRPCHandler makes a JSON-RPC handler for s. func NewJSONRPCHandler(s *rpc.Server) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() w.Header().Set("Content-Type", "application/json") var b bytes.Buffer var codec = jsonrpc.NewServerCodec(&readWriteCloser{r: r.Body, w: &b}) if err := s.ServeRequest(codec); err != nil { panic(err) } codec.Close() io.Copy(w, &b) }) }
// NewServerCodec returns a new rpc.ServerCodec using JSON-RPC 2.0 on conn, // which will use srv to execute batch requests. // // If srv is nil then rpc.DefaultServer will be used. func NewServerCodec(conn io.ReadWriteCloser, srv *rpc.Server) rpc.ServerCodec { if srv == nil { srv = rpc.DefaultServer } srv.Register(JSONRPC2{}) return &serverCodec{ dec: json.NewDecoder(conn), enc: json.NewEncoder(conn), c: conn, srv: srv, pending: make(map[uint64]*json.RawMessage), } }
func (p *ProducerServer) RegistFunctions(server *rpc.Server) error { err := server.Register(new(Call)) if err != nil { log.Printf("register function failed!err:%s \n", err) return err } err = server.Register(new(PMSync)) if err != nil { log.Printf("regist PMSync failed! err:%s \n", err) return err } return nil }
// // the application wants to create a paxos peer. // the ports of all the paxos peers (including this one) // are in peers[]. this servers port is peers[me]. // func Make(peers []string, me int, rpcs *rpc.Server) *Paxos { px := &Paxos{} px.peers = peers px.me = me // Your initialization code here. if rpcs != nil { // caller will create socket &c rpcs.Register(px) } else { rpcs = rpc.NewServer() rpcs.Register(px) // prepare to receive connections from clients. // change "unix" to "tcp" to use over a network. os.Remove(peers[me]) // only needed for "unix" l, e := net.Listen("unix", peers[me]) if e != nil { log.Fatal("listen error: ", e) } px.l = l // please do not change any of the following code, // or do anything to subvert it. // create a thread to accept RPC connections go func() { for px.dead == false { conn, err := px.l.Accept() if err == nil && px.dead == false { if px.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if px.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } px.rpcCount++ go rpcs.ServeConn(conn) } else { px.rpcCount++ go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && px.dead == false { fmt.Printf("Paxos(%v) accept: %v\n", me, err.Error()) } } }() } return px }
func (lt *localRPCTransport) accept(server *rpc.Server, listener net.Listener) { for { conn, err := listener.Accept() if err != nil { if opError, ok := err.(*net.OpError); ok { if opError.Err.Error() == "use of closed network connection" { return } } glog.Errorf("rpc.Serve: accept: %s", err.Error()) } go server.ServeConn(conn) } }
//function for handling RPC connection func handle(server *rpc.Server, conn net.Conn) { fmt.Println("start handle") //defer conn.Close() //make sure connection gets closed remote := conn.RemoteAddr().String() + " --> " + conn.LocalAddr().String() fmt.Println("==conn " + remote) //requests //doRequests(conn) // time.Sleep(3 * time.Second) fmt.Println("==conn " + remote) server.ServeCodec(jsonrpc.NewServerCodec(conn)) fmt.Println("==discon " + remote) fmt.Println("end handle") }
// Serve announces an RPC service on the client using the given name // (which must currently be unique amongst all clients). func (c *Client) Serve(clientName string, rpcServer *rpc.Server) error { var clientId string rpcServer.RegisterName("ClientRPC", clientRPC{}) // TODO better name if err := c.Server.Call("Ncnet-publisher.Publish", &clientName, &clientId); err != nil { return err } clientconn, err := ncnet.Dial(c.Importer, clientId) if err != nil { return err } go rpcServer.ServeConn(clientconn) return nil }
// NewPolicyMgr Creates a new policy manager func NewPolicyAgent(agent *OfnetAgent, rpcServ *rpc.Server) *PolicyAgent { policyAgent := new(PolicyAgent) // initialize policyAgent.agent = agent policyAgent.Rules = make(map[string]*PolicyRule) policyAgent.DstGrpFlow = make(map[string]*ofctrl.Flow) // Register for Master add/remove events rpcServ.Register(policyAgent) // done return policyAgent }
func Dial(url string, srv *rpc.Server) (client *rpc.Client, err error) { conn, err := direct.Dial(url) if err != nil { return } muxed, err := muxconn.Split(conn, 2) if err != nil { return } // Server on second, client on first (reverse of above) client = rpc.NewClient(muxed[0]) go srv.ServeConn(muxed[1]) return }
// Register registers an Otto thing with the RPC server and returns // the name it is registered under. func Register(server *rpc.Server, thing interface{}) (name string, err error) { nextLock.Lock() defer nextLock.Unlock() switch t := thing.(type) { case app.App: name = fmt.Sprintf("Otto%d", nextId) err = server.RegisterName(name, &AppServer{App: t}) default: return "", errors.New("Unknown type to register for RPC server.") } nextId += 1 return }
func serveSingleConn(s *rpc.Server) string { l := netListenerInRange(portRangeMin, portRangeMax) // Accept a single connection in a goroutine and then exit go func() { defer l.Close() conn, err := l.Accept() if err != nil { panic(err) } s.ServeConn(conn) }() return l.Addr().String() }
// serve starts listening for RPC calls, and creates a new thread for // each incoming connection. func serve(address string, rpcs *rpc.Server) { l, err := net.Listen("tcp", address) if err != nil { panic(err) } for { conn, err := l.Accept() if err != nil { panic(err) } go rpcs.ServeConn(conn) } }
// Create a new vrouter instance func NewVrouter(agent *OfnetAgent, rpcServ *rpc.Server) *Vrouter { vrouter := new(Vrouter) // Keep a reference to the agent vrouter.agent = agent // Create a route table and my router mac vrouter.routeTable = make(map[string]*OfnetRoute) vrouter.flowDb = make(map[string]*ofctrl.Flow) vrouter.portVlanFlowDb = make(map[uint32]*ofctrl.Flow) vrouter.myRouterMac, _ = net.ParseMAC("00:00:11:11:11:11") // Register for Route rpc callbacks rpcServ.Register(vrouter) return vrouter }
func (c *Simple) waitForConnections(rpcs *rpc.Server) { for { conn, err := c.listener.Accept() if err == nil { if *use_codec { //rpcCodec := codec.GoRpc.ServerCodec(conn, &mh) rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, &mh) go rpcs.ServeCodec(rpcCodec) } else { go rpcs.ServeConn(conn) } } else { // handle error //fmt.Println("ERROR: ", err) } } }
// Register registers a Terraform thing with the RPC server and returns // the name it is registered under. func Register(server *rpc.Server, thing interface{}) (name string, err error) { nextLock.Lock() defer nextLock.Unlock() switch t := thing.(type) { case terraform.ResourceProvider: name = fmt.Sprintf("Terraform%d", nextId) err = server.RegisterName(name, &ResourceProviderServer{Provider: t}) case terraform.ResourceProvisioner: name = fmt.Sprintf("Terraform%d", nextId) err = server.RegisterName(name, &ResourceProvisionerServer{Provisioner: t}) default: return "", errors.New("Unknown type to register for RPC server.") } nextId += 1 return }
// // servers[] contains the ports of the set of // servers that will cooperate via Paxos to // form the fault-tolerant shardmaster service. // me is the index of the current server in servers[]. // func FinishStartServer(sm *ShardMaster, servers []string, me int, rpcs *rpc.Server) *ShardMaster { os.Remove(servers[me]) l, e := net.Listen("unix", servers[me]) if e != nil { log.Fatal("listen error: ", e) } sm.l = l go sm.LogWalker() // please do not change any of the following code, // or do anything to subvert it. go func() { for sm.dead == false { conn, err := sm.l.Accept() if err == nil && sm.dead == false { if sm.unreliable && (rand.Int63()%1000) < 100 { // discard the request. conn.Close() } else if sm.unreliable && (rand.Int63()%1000) < 200 { // process the request but force discard of reply. c1 := conn.(*net.UnixConn) f, _ := c1.File() err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR) if err != nil { fmt.Printf("shutdown: %v\n", err) } go rpcs.ServeConn(conn) } else { go rpcs.ServeConn(conn) } } else if err == nil { conn.Close() } if err != nil && sm.dead == false { fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error()) sm.Kill() } } }() return sm }
// RPCAccept accepts connections on the listener and dispatches them to the // RPC server for service. Unfortunately the native Go rpc.Accept function // fatals on any accept error, including temporary failures and closure of // the listener. func RPCAccept(ln net.Listener, server *rpc.Server) error { errClosing := errors.New("use of closed network connection") for { conn, err := ln.Accept() if err != nil { if ne, ok := err.(net.Error); ok && ne.Temporary() { log.Warningf("RPC accept temporary error: %v", err) time.Sleep(1 * time.Second) continue } if oe, ok := err.(*net.OpError); ok && oe.Err.Error() == errClosing.Error() { log.Infoln("RPC accept connection closed") return nil } log.Errorf("RPC accept error: %v", err) return err } go server.ServeConn(conn) } }
func (s *Server) handleConnection(conn net.Conn) { // We create a new server each time so that we can have access to the // underlying connection. The standard rpc package does not give us access // to the calling connection :/ var server *rpc.Server = rpc.NewServer() // Get a free service from the pool. var service *Discovery select { case service = <-s.servicePool: // Success default: service = newDiscoveryService(s) } // Set up the service variables. service.init(conn, atomic.AddInt32(&s.nextConnId, 1)) // If debugging is enabled, log all rpc traffic. var rwc io.ReadWriteCloser = conn if *debug { rwc = &debugInput{conn} } // Set up the rpc service and start serving the connection. server.Register(service) server.ServeCodec(jsonrpc.NewServerCodec(rwc)) // Connection has disconnected. Remove any registered services. s.removeAll(service) // Reset the service state. service.init(nil, -1) select { case s.servicePool <- service: // Success default: // Buffer is full } }
// Create a new vxlan instance func NewVxlan(agent *OfnetAgent, rpcServ *rpc.Server) *Vxlan { vxlan := new(Vxlan) // Keep a reference to the agent vxlan.agent = agent // init DBs vxlan.macRouteDb = make(map[string]*MacRoute) vxlan.vlanDb = make(map[uint16]*Vlan) vxlan.macFlowDb = make(map[string]*ofctrl.Flow) vxlan.portVlanFlowDb = make(map[uint32]*ofctrl.Flow) log.Infof("Registering vxlan RPC calls") // Register for Route rpc callbacks err := rpcServ.Register(vxlan) if err != nil { log.Fatalf("Error registering vxlan RPC") } return vxlan }