func (this *Spout) InitSocket(pull, push string) (err error) { var reader, writer *zmq.Context reader, err = zmq.NewContext() if err != nil { return } this.Component.Reader, err = reader.NewSocket(zmq.PULL) if err != nil { return } err = this.Component.Reader.Connect("tcp://127.0.0.1:" + pull) if err != nil { return } writer, err = zmq.NewContext() if err != nil { return } this.Component.Writer, err = writer.NewSocket(zmq.PUSH) if err != nil { return } err = this.Component.Writer.Bind("tcp://127.0.0.1:" + push) return }
func BindPublisher(endpoint string) *GlcZmq { context, err := zmq.NewContext() if err != nil { log.Fatal(err) } socket, err := context.NewSocket(zmq.PUB) if err != nil { log.Fatal(err) } go func(socket *zmq.Socket, endpoint string) { defer socket.Close() err = socket.Connect(endpoint) if err != nil { log.Fatal(err) } for { time.Sleep(1 * time.Second) } }(socket, endpoint) return &GlcZmq{socket} }
func BindSubscriber(endpoint string, filter string, callback func(message string)) *GlcZmq { context, err := zmq.NewContext() socket, err := context.NewSocket(zmq.PUB) defer socket.Close() if err != nil { log.Fatal(err) } err = socket.Connect(endpoint) if err != nil { log.Fatal(err) } for { msg, _ := socket.Recv(zmq.DONTWAIT) if msg != "" { callback(msg) } } return &GlcZmq{socket} }
// Initiate connections to all replicas func NewCommunicator(name string, replicas map[string]*ReplicaInfo) (*Communicator, error) { communicator := new(Communicator) context, err := zmq.NewContext() if err != nil { return nil, err } communicator.context = context communicator.pubSocket, err = context.NewSocket(zmq.PUB) if err != nil { return nil, err } communicator.subSocket, err = context.NewSocket(zmq.SUB) communicator.chunkSocket, err = context.NewSocket(zmq.REP) for key, val := range replicas { if replicas[key].Name == name { communicator.pubSocket.Bind("tcp://*:" + val.Port) communicator.chunkSocket.Bind("tcp://*:" + val.DemandPort) communicator.tag = val.Pid } else { communicator.subSocket.Connect("tcp://" + val.IpAddr + ":" + val.Port) communicator.subSocket.SetSubscribe(val.Pid) } } communicator.replicas = replicas return communicator, nil }
func BindProxy(frontend string, backend string) { context, err := zmq.NewContext() if err != nil { log.Fatal(err) } // create XSUB for publishers to connect to xSub, _ := context.NewSocket(zmq.XSUB) defer xSub.Close() err = xSub.Bind(frontend) if err != nil { log.Fatal(err) } // create XPUB for subscribers to connect to xPub, _ := context.NewSocket(zmq.XPUB) defer xPub.Close() err = xPub.Bind(backend) if err != nil { log.Fatal(err) } err = zmq.Proxy(xSub, xPub, nil) log.Fatalln("Proxy interrupted:", err) }
// createSockets sets up the 0MQ sockets through which the kernel will // communicate. func createSockets(connInfo *ConnectionInfo) (*zmq.Context, *sockets, error) { context, err := zmq.NewContext() if err != nil { return nil, nil, err } bindSocket := func(t zmq.Type, port int) (*zmq.Socket, error) { addr := fmt.Sprintf( "%s://%s:%v", connInfo.Transport, connInfo.IP, port, ) socket, err := context.NewSocket(t) if err != nil { return nil, err } if err := socket.Bind(addr); err != nil { socket.Close() return nil, err } return socket, nil } var sockets sockets var heartbeatSocket *zmq.Socket socketPorts := []struct { Name string Port int Type zmq.Type Socket **zmq.Socket }{ {"heartbeat", connInfo.HeartbeatPort, zmq.REP, &heartbeatSocket}, {"shell", connInfo.ShellPort, zmq.ROUTER, &sockets.Shell}, {"control", connInfo.ControlPort, zmq.ROUTER, &sockets.Control}, {"stdin", connInfo.StdinPort, zmq.ROUTER, &sockets.Stdin}, {"iopub", connInfo.IOPubPort, zmq.PUB, &sockets.IOPub}, } for _, socketPort := range socketPorts { socket, err := bindSocket(socketPort.Type, socketPort.Port) if err != nil { // TODO(axw) do we need to close all sockets if one // fails? Is terminating the context good enough? if err := context.Term(); err != nil { log.Printf("terminating context: %v", err) } return nil, nil, fmt.Errorf( "creating %v socket: %v", socketPort.Name, err, ) } *socketPort.Socket = socket } go zmq.Proxy(heartbeatSocket, heartbeatSocket, nil) return context, &sockets, nil }
/****************************************************************************** * 概述: Zmq初始化 * 函数名: Init * 返回值: error * 参数列表: 参数名 参数类型 取值范围 描述 * *******************************************************************************/ func (this *Zmq) Init(dataCache *DataCache, event *Event) error { this.MdataCache = dataCache this.Mevent = event if this.Mcontext != nil { return nil } context, err := zmq4.NewContext() if err != nil { return err } this.Mcontext = context return nil }
func New(addr string) (*Router, error) { ctx, err := zmq.NewContext() if err != nil { return nil, err } router := &Router{ ctx: ctx, receivers: make(map[*zmq.Socket]*receiver), poller: zmq.NewPoller(), addr: addr, } return router, nil }
func (this *sock) setSock(pattern zmq.Type) { var err error this.ctx, err = zmq.NewContext() if err != nil { panic(err) } this.zmqSock, err = zmq.NewSocket(pattern) if err != nil { panic(err) } this.initialiser.init(this.zmqSock) }
func main() { context, _ := zmq.NewContext() server, _ := context.NewSocket(zmq.REP) defer server.Close() server.Bind(SERVER_END) for cycles := 1; ; cycles++ { request, _ := server.Recv(0) fmt.Printf("Request (%d) : (%s)\n", cycles, request) server.Send(request, 0) } }
// createSockets sets up the 0MQ sockets through which the kernel will // communicate. func createSockets(connInfo *ConnectionInfo) (*zmq.Context, *sockets, error) { context, err := zmq.NewContext() if err != nil { return nil, nil, err } bindSocket := func(t zmq.Type, port int) (*zmq.Socket, error) { addr := fmt.Sprintf( "%s://%s:%v", connInfo.Transport, connInfo.IP, port, ) socket, err := context.NewSocket(t) if err != nil { return nil, err } if err := socket.Bind(addr); err != nil { socket.Close() return nil, err } return socket, nil } sockets := sockets{ Heartbeat: socket{Name: "heartbeat", Port: connInfo.HeartbeatPort, Type: zmq.REP}, Shell: socket{Name: "shell", Port: connInfo.ShellPort, Type: zmq.ROUTER}, Control: socket{Name: "control", Port: connInfo.ControlPort, Type: zmq.ROUTER}, Stdin: socket{Name: "stdin", Port: connInfo.StdinPort, Type: zmq.ROUTER}, IOPub: socket{Name: "iopub", Port: connInfo.IOPubPort, Type: zmq.PUB}, } for _, socketPtr := range sockets.sockets() { socket, err := bindSocket(socketPtr.Type, socketPtr.Port) if err == nil { socketPtr.Socket = socket err = socket.SetLinger(0) } if err != nil { sockets.tryClose() if err := context.Term(); err != nil { log.Printf("error terminating context: %v", err) } return nil, nil, fmt.Errorf( "creating %v socket: %v", socketPtr.Name, err, ) } } go zmq.Proxy(sockets.Heartbeat.Socket, sockets.Heartbeat.Socket, nil) return context, &sockets, nil }
// New acts as a queue constructor func NewQueue(uri string) (Queue, error) { u := "tcp://" + uri c, _ := zmq.NewContext() pullSoc, pullErr := c.NewSocket(zmq.PULL) if pullErr != nil { return Queue{}, fmt.Errorf("pull socket initialization failed: %v", pullErr) } pushSoc, pushErr := c.NewSocket(zmq.PUSH) if pushErr != nil { return Queue{}, fmt.Errorf("push socket initialization failed: %v", pushErr) } q := Queue{u, pushSoc, pullSoc} return q, nil }
func createPushServer(impl *server) error { ctx, err := zmq.NewContext() if err != nil { return err } socket, err := ctx.NewSocket(zmq.PUB) if err != nil { return err } if err = socket.Bind(fmt.Sprintf("tcp://*:%d", *pushPort)); err != nil { return err } impl.socket = socket log.Printf("PUSH service on %d", *pushPort) impl.start() return nil }
func main() { context, _ := zmq.NewContext() fmt.Println("Connecting to server .... ") client, _ := context.NewSocket(zmq.REQ) client.Connect(SERVER_ENDPOINT) for i := 0; i < 10; i++ { msg := fmt.Sprintf("msg %d", i) client.Send((msg), 0) fmt.Println("Sending Message ", msg) client.Recv(0) } }
func newDealer() *dealer { ctx, _ := zmq.NewContext() msgCh := make(chan string) doneCh := make(chan bool) errCh := make(chan error) client, err := ctx.NewSocket(zmq.DEALER) if err != nil { logger.Error.Println("Error openinng DEALER socket", err) os.Exit(1) } return &dealer{ ctx: ctx, msgCh: msgCh, doneCh: doneCh, errCh: errCh, client: client, } }
// Dial connects to a zrpc server at the specified network address // Protocol is limited to tcp func Dial(address ...string) (*Client, error) { // Don't use the global context to avoid package level confusion ctx, err := zmq.NewContext() if err != nil { glog.Fatal(err) } // A dealer socket handles the actual connection socket, err := ctx.NewSocket(zmq.DEALER) if err != nil { glog.Fatal(err) } client := NewClientWithConnection(ctx, socket) for _, addr := range address { if err := client.endpoints.add(addr); err != nil { return nil, err } } return client, nil }
// Initiate connections to all replicas func NewCommunicator(name string, replicas map[string]*ReplicaInfo) (*Communicator, error) { communicator := new(Communicator) context, err := zmq.NewContext() if err != nil { return nil, err } communicator.context = context communicator.pubSocket, err = context.NewSocket(zmq.PUB) if err != nil { return nil, err } communicator.subSocket, err = context.NewSocket(zmq.SUB) communicator.chunkSocket, err = context.NewSocket(zmq.REP) communicator.raftPubSocket, err = context.NewSocket(zmq.PUB) communicator.raftSubSocket, err = context.NewSocket(zmq.SUB) communicator.raftPointToPointSocket, err = context.NewSocket(zmq.REP) communicator.raftClientSocket, err = context.NewSocket(zmq.REP) found := false for _, val := range replicas { if !found && ((name == "auto" && (val.IpAddr == "localhost" || IsOurIpAddr(val.IpAddr))) || val.Name == name) { communicator.pubSocket.Bind("tcp://*:" + val.Port) communicator.chunkSocket.Bind("tcp://*:" + val.DemandPort) // raft communicator.raftPubSocket.Bind("tcp://*:" + val.RaftPubPort) communicator.raftPointToPointSocket.Bind("tcp://*:" + val.RaftP2PPort) communicator.raftClientSocket.Bind("tcp://*:" + val.RaftClientPort) communicator.tag = val.Pid found = true } else { communicator.subSocket.Connect("tcp://" + val.IpAddr + ":" + val.Port) communicator.subSocket.SetSubscribe(val.Pid) communicator.raftSubSocket.Connect("tcp://" + val.IpAddr + ":" + val.RaftPubPort) communicator.raftSubSocket.SetSubscribe(val.Pid) } } communicator.replicas = replicas return communicator, nil }
func TestServiceDispatcher(t *testing.T) { numPeers := 10 numServices := 10 var c *zmq.Context var sd *svcrouter.ServiceDispatcher var socks []*zmq.Socket = make([]*zmq.Socket, numPeers) var addr []string = make([]string, numPeers) var killReflectors []chan bool = make([]chan bool, numPeers) var err error var wg sync.WaitGroup defer func() { for _, s := range socks { if s != nil { s.Close() } } if sd != nil { sd.Close() } if c != nil { c.Term() } }() if c, err = zmq.NewContext(); err != nil { t.Fatalf("Failed to create ZMQ context: %v", err) } if sd, err = svcrouter.NewServiceDispatcher(c, 1); err != nil { t.Fatalf("Failed to create a new service dispatcher: %v", err) } sd.NoEnvRouterPeer = dummyPeerName(0) sd.EnvRouterPeer = dummyPeerName(2) for np := 0; np < numPeers; np++ { if np%2 == 0 { name := fmt.Sprintf("C%04d st=ST%d si=x", np, (np/2)%2) if socks[np], addr[np], err = svcrouter.NewSocketAndAddress(c, true, zmq.REQ, name); err != nil { t.Fatalf("Failed to create half pair A: %v", err) } if err = sd.AddPeer(svcrouter.PeerDefinition{ Name: dummyPeerName(np), ZmqType: zmq.ROUTER, Address: addr[np], Bind: false, }, svcrouter.PT_CLIENTSNOENV); err != nil { t.Fatalf("Could not add peer %d: %v", np, err) } } else { name := fmt.Sprintf("C%04d st=ST%d si=x", np, ((np-1)/2)%2) if socks[np], addr[np], err = svcrouter.NewSocketAndAddress(c, true, zmq.REP, name); err != nil { t.Fatalf("Failed to create half pair A: %v", err) } if err = sd.AddPeer(svcrouter.PeerDefinition{ Name: dummyPeerName(np), ZmqType: zmq.DEALER, Address: addr[np], Bind: false, }, svcrouter.PT_DOWNSTREAMENVREP); err != nil { t.Fatalf("Could not add peer %d: %v", np, err) } killReflectors[np] = reflector(t, socks[np], np, &wg) } } svcrouter.Barrier() for np := 1; np < numPeers; np += 2 { for ns := 0; ns < numServices; ns++ { serviceType := fmt.Sprintf("ST%d", ns) serviceId := "x" e := svcrouter.NewEndpointEnvelope(serviceType, serviceId, "") if err := sd.AddService(sd.RouterElement, dummyPeerName(np), e); err != nil { t.Fatalf("Could not add service %d to peer %d: %v", ns, np, err) } } } msg := [][]byte{[]byte("Hello"), []byte("World")} for it := 0; it < 2; it++ { if _, err := socks[0].SendMessage(msg); err != nil { t.Fatalf("socks send error: %v", err) } if msg2, err := socks[0].RecvMessageBytes(0); err != nil { t.Fatalf("socks receive error: %v", err) } else { svcrouter.DumpMsg("FINAL", msg2) if !msgEqual(msg, msg2) { t.Fatalf("socks messages differ") } } } for _, v := range killReflectors { if v != nil { close(v) } } wg.Wait() }
func TestMultipleContexts(t *testing.T) { chQuit := make(chan interface{}) chErr := make(chan error, 2) needQuit := false var sock1, sock2, serv1, serv2 *zmq.Socket var serv_ctx1, serv_ctx2, ctx1, ctx2 *zmq.Context var err error defer func() { if needQuit { chQuit <- true chQuit <- true <-chErr <-chErr } for _, s := range []*zmq.Socket{sock1, sock2, serv1, serv2} { if s != nil { s.SetLinger(0) s.Close() } } for _, c := range []*zmq.Context{serv_ctx1, serv_ctx2, ctx1, ctx2} { if c != nil { c.Term() } } }() addr1 := "tcp://127.0.0.1:9997" addr2 := "tcp://127.0.0.1:9998" serv_ctx1, err = zmq.NewContext() if err != nil { t.Fatal("NewContext:", err) } serv1, err = serv_ctx1.NewSocket(zmq.REP) if err != nil { t.Fatal("NewSocket:", err) } err = serv1.Bind(addr1) if err != nil { t.Fatal("Bind:", err) } serv_ctx2, err = zmq.NewContext() if err != nil { t.Fatal("NewContext:", err) } serv2, err = serv_ctx2.NewSocket(zmq.REP) if err != nil { t.Fatal("NewSocket:", err) } err = serv2.Bind(addr2) if err != nil { t.Fatal("Bind:", err) } new_service := func(sock *zmq.Socket, addr string) { socket_handler := func(state zmq.State) error { msg, err := sock.RecvMessage(0) if err != nil { return err } _, err = sock.SendMessage(addr, msg) return err } quit_handler := func(interface{}) error { return errors.New("quit") } reactor := zmq.NewReactor() reactor.AddSocket(sock, zmq.POLLIN, socket_handler) reactor.AddChannel(chQuit, 1, quit_handler) err = reactor.Run(100 * time.Millisecond) chErr <- err } go new_service(serv1, addr1) go new_service(serv2, addr2) needQuit = true time.Sleep(time.Second) // default context sock1, err = zmq.NewSocket(zmq.REQ) if err != nil { t.Fatal("NewSocket:", err) } sock2, err = zmq.NewSocket(zmq.REQ) if err != nil { t.Fatal("NewSocket:", err) } err = sock1.Connect(addr1) if err != nil { t.Fatal("sock1.Connect:", err) } err = sock2.Connect(addr2) if err != nil { t.Fatal("sock2.Connect:", err) } _, err = sock1.SendMessage(addr1) if err != nil { t.Fatal("sock1.SendMessage:", err) } _, err = sock2.SendMessage(addr2) if err != nil { t.Fatal("sock2.SendMessage:", err) } msg, err := sock1.RecvMessage(0) expected := []string{addr1, addr1} if err != nil || !arrayEqual(msg, expected) { t.Errorf("sock1.RecvMessage: expected %v %v, got %v %v", nil, expected, err, msg) } msg, err = sock2.RecvMessage(0) expected = []string{addr2, addr2} if err != nil || !arrayEqual(msg, expected) { t.Errorf("sock2.RecvMessage: expected %v %v, got %v %v", nil, expected, err, msg) } err = sock1.Close() sock1 = nil if err != nil { t.Fatal("sock1.Close:", err) } err = sock2.Close() sock2 = nil if err != nil { t.Fatal("sock2.Close:", err) } // non-default contexts ctx1, err = zmq.NewContext() if err != nil { t.Fatal("NewContext:", err) } ctx2, err = zmq.NewContext() if err != nil { t.Fatal("NewContext:", err) } sock1, err = ctx1.NewSocket(zmq.REQ) if err != nil { t.Fatal("ctx1.NewSocket:", err) } sock2, err = ctx2.NewSocket(zmq.REQ) if err != nil { t.Fatal("ctx2.NewSocket:", err) } err = sock1.Connect(addr1) if err != nil { t.Fatal("sock1.Connect:", err) } err = sock2.Connect(addr2) if err != nil { t.Fatal("sock2.Connect:", err) } _, err = sock1.SendMessage(addr1) if err != nil { t.Fatal("sock1.SendMessage:", err) } _, err = sock2.SendMessage(addr2) if err != nil { t.Fatal("sock2.SendMessage:", err) } msg, err = sock1.RecvMessage(0) expected = []string{addr1, addr1} if err != nil || !arrayEqual(msg, expected) { t.Errorf("sock1.RecvMessage: expected %v %v, got %v %v", nil, expected, err, msg) } msg, err = sock2.RecvMessage(0) expected = []string{addr2, addr2} if err != nil || !arrayEqual(msg, expected) { t.Errorf("sock2.RecvMessage: expected %v %v, got %v %v", nil, expected, err, msg) } err = sock1.Close() sock1 = nil if err != nil { t.Fatal("sock1.Close:", err) } err = sock2.Close() sock2 = nil if err != nil { t.Fatal("sock2.Close:", err) } err = ctx1.Term() ctx1 = nil if err != nil { t.Fatal("ctx1.Term", nil) } err = ctx2.Term() ctx1 = nil if err != nil { t.Fatal("ctx2.Term", nil) } needQuit = false for i := 0; i < 2; i++ { // close(chQuit) doesn't work because the reactor removes closed channels, instead of acting on them chQuit <- true err = <-chErr if err.Error() != "quit" { t.Errorf("Expected error value quit, got %v", err) } } }
func Example_multiple_contexts() { chQuit := make(chan interface{}) chReactor := make(chan bool) addr1 := "tcp://127.0.0.1:9997" addr2 := "tcp://127.0.0.1:9998" serv_ctx1, err := zmq.NewContext() if checkErr(err) { return } serv1, err := serv_ctx1.NewSocket(zmq.REP) if checkErr(err) { return } err = serv1.Bind(addr1) if checkErr(err) { return } defer func() { serv1.Close() serv_ctx1.Term() }() serv_ctx2, err := zmq.NewContext() if checkErr(err) { return } serv2, err := serv_ctx2.NewSocket(zmq.REP) if checkErr(err) { return } err = serv2.Bind(addr2) if checkErr(err) { return } defer func() { serv2.Close() serv_ctx2.Term() }() new_service := func(sock *zmq.Socket, addr string) { socket_handler := func(state zmq.State) error { msg, err := sock.RecvMessage(0) if checkErr(err) { return err } _, err = sock.SendMessage(addr, msg) if checkErr(err) { return err } return nil } quit_handler := func(interface{}) error { return errors.New("quit") } defer func() { chReactor <- true }() reactor := zmq.NewReactor() reactor.AddSocket(sock, zmq.POLLIN, socket_handler) reactor.AddChannel(chQuit, 1, quit_handler) err = reactor.Run(100 * time.Millisecond) fmt.Println(err) } go new_service(serv1, addr1) go new_service(serv2, addr2) time.Sleep(time.Second) // default context sock1, err := zmq.NewSocket(zmq.REQ) if checkErr(err) { return } sock2, err := zmq.NewSocket(zmq.REQ) if checkErr(err) { return } err = sock1.Connect(addr1) if checkErr(err) { return } err = sock2.Connect(addr2) if checkErr(err) { return } _, err = sock1.SendMessage(addr1) if checkErr(err) { return } _, err = sock2.SendMessage(addr2) if checkErr(err) { return } msg, err := sock1.RecvMessage(0) fmt.Println(err, msg) msg, err = sock2.RecvMessage(0) fmt.Println(err, msg) err = sock1.Close() if checkErr(err) { return } err = sock2.Close() if checkErr(err) { return } // non-default contexts ctx1, err := zmq.NewContext() if checkErr(err) { return } ctx2, err := zmq.NewContext() if checkErr(err) { return } sock1, err = ctx1.NewSocket(zmq.REQ) if checkErr(err) { return } sock2, err = ctx2.NewSocket(zmq.REQ) if checkErr(err) { return } err = sock1.Connect(addr1) if checkErr(err) { return } err = sock2.Connect(addr2) if checkErr(err) { return } _, err = sock1.SendMessage(addr1) if checkErr(err) { return } _, err = sock2.SendMessage(addr2) if checkErr(err) { return } msg, err = sock1.RecvMessage(0) fmt.Println(err, msg) msg, err = sock2.RecvMessage(0) fmt.Println(err, msg) err = sock1.Close() if checkErr(err) { return } err = sock2.Close() if checkErr(err) { return } err = ctx1.Term() if checkErr(err) { return } err = ctx2.Term() if checkErr(err) { return } // close(chQuit) doesn't work because the reactor removes closed channels, instead of acting on them chQuit <- true <-chReactor chQuit <- true <-chReactor fmt.Println("Done") // Output: // <nil> [tcp://127.0.0.1:9997 tcp://127.0.0.1:9997] // <nil> [tcp://127.0.0.1:9998 tcp://127.0.0.1:9998] // <nil> [tcp://127.0.0.1:9997 tcp://127.0.0.1:9997] // <nil> [tcp://127.0.0.1:9998 tcp://127.0.0.1:9998] // quit // quit // Done }
func TestServiceRouter(t *testing.T) { numPeers := 50 numServices := 100 var c *zmq.Context var m *Mirror var socks []*zmq.Socket = make([]*zmq.Socket, numPeers) var addr []string = make([]string, numPeers) var err error defer func() { for _, s := range socks { if s != nil { s.Close() } } if m != nil { m.Close() } if c != nil { c.Term() } }() if c, err = zmq.NewContext(); err != nil { t.Fatalf("Failed to create ZMQ context: %v", err) } if m, err = NewMirror(t, c, 1); err != nil { t.Fatalf("Failed to create a new mirror: %v", err) } for np := 0; np < numPeers; np++ { if socks[np], addr[np], err = svcrouter.NewHalfPair(c, true); err != nil { t.Fatalf("Failed to create half pair A: %v", err) } if err = m.AddPeer(svcrouter.PeerDefinition{ Name: dummyPeerName(np), ZmqType: zmq.PAIR, Address: addr[np], Bind: false, PeerImpl: &MirrorPeerImpl{}, }); err != nil { t.Fatalf("Could not add peer %d: %v", np, err) } } svcrouter.Barrier() r := svcrouter.NewServiceRouter() t.Log("Adding by Peer/Service") for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) for ns := 0; ns < numServices; ns++ { serviceType, serviceId := dummyServiceName(ns) e := svcrouter.NewEndpointEnvelope(serviceType, serviceId, "") if err := r.AddService(m.RouterElement, peerName, e); err != nil { t.Fatalf("Could not add service %d to peer %d: %v", ns, np, err) } } } if err := r.Validate(); err != nil { t.Fatalf("Failed validation 1: %v", err) } for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) if l := r.LenPeerEntries(m.RouterElement, peerName); l != numServices { t.Fatalf("Peer %d has service element mismatch: %d != %d", l, numServices) } } t.Log("Removing by Service/Peer") for ns := 0; ns < numServices; ns++ { serviceType, serviceId := dummyServiceName(ns) for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) e := svcrouter.NewEndpointEnvelope(serviceType, serviceId, "") if err := r.DeleteService(m.RouterElement, peerName, e); err != nil { t.Fatalf("Could not delete service %d to peer %d: %v", ns, np, err) } } if ns%(numServices/10) == 0 { if err := r.Validate(); err != nil { t.Fatalf("Failed validation 2: %v", err) } } } for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) if l := r.LenPeerEntries(m.RouterElement, peerName); l != 0 { t.Fatalf("Peer %d has service element mismatch: %d != 0", l) } } if err := r.Validate(); err != nil { t.Fatalf("Failed validation 3: %v", err) } t.Log("Adding by Service/Peer") for ns := 0; ns < numServices; ns++ { serviceType, serviceId := dummyServiceName(ns) for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) e := svcrouter.NewEndpointEnvelope(serviceType, serviceId, "") if err := r.AddService(m.RouterElement, peerName, e); err != nil { t.Fatalf("Could not add service %d to peer %d: %v", ns, np, err) } } } if err := r.Validate(); err != nil { t.Fatalf("Failed validation 4: %v", err) } for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) if l := r.LenPeerEntries(m.RouterElement, peerName); l != numServices { t.Fatalf("Peer %d has service element mismatch: %d != %d", l, numServices) } } t.Log("Removing by Peer/Service") for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) for ns := 0; ns < numServices; ns++ { serviceType, serviceId := dummyServiceName(ns) e := svcrouter.NewEndpointEnvelope(serviceType, serviceId, "") if err := r.DeleteService(m.RouterElement, peerName, e); err != nil { t.Fatalf("Could not delete service %d to peer %d: %v", ns, np, err) } } if np%10 == 0 { if err := r.Validate(); err != nil { t.Fatalf("Failed validation 5: %v", err) } } } if err := r.Validate(); err != nil { t.Fatalf("Failed validation 6: %v", err) } for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) if l := r.LenPeerEntries(m.RouterElement, peerName); l != 0 { t.Fatalf("Peer %d has service element mismatch: %d != 0", l) } } t.Log("Adding by Service/Peer") for ns := 0; ns < numServices; ns++ { serviceType, serviceId := dummyServiceName(ns) for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) e := svcrouter.NewEndpointEnvelope(serviceType, serviceId, "") if err := r.AddService(m.RouterElement, peerName, e); err != nil { t.Fatalf("Could not add service %d to peer %d: %v", ns, np, err) } } } if err := r.Validate(); err != nil { t.Fatalf("Failed validation 7: %v", err) } for it := 0; it < 10; it++ { // How exciting. Let's try some routing! serviceType, serviceId := dummyServiceName(0) d := make(map[string]int, numPeers) for np := 0; np < numPeers; np++ { se := r.GetServerEntry(serviceType, serviceId, "") d[se.Peer.Name] += 1 } if len(d) != numPeers { t.Fatalf("Failed load balance test 1") } for np := 0; np < numPeers; np++ { se := r.GetServerEntry(serviceType, serviceId, "") if d[se.Peer.Name] != 1 { t.Fatalf("Failed load balance test 2") } d[se.Peer.Name] += 1 } if len(d) != numPeers { t.Fatalf("Failed load balance test 3") } } for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) if l := r.LenPeerEntries(m.RouterElement, peerName); l != numServices { t.Fatalf("Peer %d has service element mismatch: %d != %d", l, numServices) } } t.Log("Removing by Peer (all entries)") for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) if err := r.DeleteAllPeerEntries(m.RouterElement, peerName); err != nil { t.Fatalf("Could not add delete all services from peer %d: %v", np, err) } } for np := 0; np < numPeers; np++ { peerName := dummyPeerName(np) if l := r.LenPeerEntries(m.RouterElement, peerName); l != 0 { t.Fatalf("Peer %d has service element mismatch: %d != 0", l) } } if err := r.Validate(); err != nil { t.Fatalf("Failed validation final: %v", err) } }
func NewSockets(cinfo ConnectionFile) (Sockets, error) { context, err := zmq4.NewContext() if err != nil { return Sockets{}, err } s := Sockets{ context: context, Key: []byte(cinfo.Key)} address := func(port int) string { return fmt.Sprintf("%s://%s:%d", cinfo.Transport, cinfo.Ip, port) } // setup heartbeat heartBeat, err := context.NewSocket(zmq4.REP) if err != nil { log.Println("Error in setting up heart beat") log.Fatal(err) } err = heartBeat.Bind(address(cinfo.HbPort)) if err != nil { log.Println("Error in setting up heart beat") log.Fatal(err) } // setup sockets s.ShellSocket, err = context.NewSocket(zmq4.ROUTER) if err != nil { return s, err } s.ControlSocket, err = context.NewSocket(zmq4.ROUTER) if err != nil { return s, err } s.StdinSocket, err = context.NewSocket(zmq4.ROUTER) if err != nil { return s, err } s.IOPubSocket, err = context.NewSocket(zmq4.ROUTER) if err != nil { return s, err } err = s.ShellSocket.Bind(address(cinfo.ShellPort)) if err != nil { return s, err } err = s.ControlSocket.Bind(address(cinfo.ControlPort)) if err != nil { return s, err } err = s.StdinSocket.Bind(address(cinfo.StdinPort)) if err != nil { return s, err } err = s.IOPubSocket.Bind(address(cinfo.IOpubPort)) if err != nil { return s, err } go func(heartBeat *zmq4.Socket) { err = zmq4.Proxy(heartBeat, heartBeat, nil) if err != nil { log.Fatal(err) } }(heartBeat) return s, nil }
// Start registers a zmq endpoint at passed address answering requests for registered services func (server *Server) Start(addr string) { // Don't use the global context to avoid package level confusion ctx, err := zmq.NewContext() if err != nil { glog.Fatal(err) } // A router socket handles the actual connection sock, _ := ctx.NewSocket(zmq.ROUTER) server.conn = sock // If no prefix is passed, default to tcp if !strings.HasPrefix(addr, "tcp://") { addr = "tcp://" + addr } server.conn.Bind(addr) glog.Info("Server listening on ", addr) // Socket monitor monitorURL := "inproc://monitor" if err := server.conn.Monitor(monitorURL, zmq.EVENT_ACCEPTED|zmq.EVENT_DISCONNECTED); err != nil { glog.Fatal(err) } go server.monitor(ctx, monitorURL) // A dealer socket multiplexes requests to workers mux, _ := ctx.NewSocket(zmq.DEALER) defer mux.Close() mux.Bind("inproc://mux") // Start backing worker processes for i := 0; i < server.numWorkers; i++ { go func(i int) { worker, _ := ctx.NewSocket(zmq.REP) defer worker.Close() worker.Connect("inproc://mux") glog.V(2).Infof("Started worker #%d", i) for { if server.closing { glog.Warning(ErrShutdown) break } reqBytes, err := worker.RecvBytes(0) if err != nil { switch zmq.AsErrno(err) { // If was interrupted there is no need to log as an error case zmq.Errno(zmq.ETERM): glog.Info(err) default: // Error receiving is usually fatal glog.Error(err) } break } // Decode the request envelope req := &Request{} if err := proto.Unmarshal(reqBytes, req); err != nil { glog.Error(err) sendError(worker, nil, err) continue } // Make sure it's not expired on arrival if req.Expires != nil { if time.Unix(*req.Expires, 0).Before(time.Now()) { glog.Infof("discarding expired message: '%s'", req.UUID) sendError(worker, req, NewExpiredError("message expired on arrival")) continue } } serviceName := path.Dir(strings.TrimPrefix(req.GetPath(), "zrpc://")) methodName := path.Base(req.GetPath()) // Make sure a handler for this request exists server.mu.RLock() service, ok := server.serviceMap[serviceName] server.mu.RUnlock() if !ok { err := fmt.Sprintf("service '%s' is not served", serviceName) if serviceName == "." { err = "no service name passed" } glog.Warning(err) sendError(worker, req, errors.New(err)) continue } // Make sure the message is registered for this server if mType, ok := service.method[methodName]; ok { // Decode the incoming request message var argv reflect.Value argIsValue := false // if true, need to indirect before calling. if mType.ArgType.Kind() == reflect.Ptr { argv = reflect.New(mType.ArgType.Elem()) } else { argv = reflect.New(mType.ArgType) argIsValue = true } // argv guaranteed to be a pointer now. if err := proto.Unmarshal(req.Payload, argv.Interface().(proto.Message)); err != nil { glog.Error(err) sendError(worker, req, err) continue } if argIsValue { argv = reflect.Indirect(argv) } glog.V(3).Infof("Received '%s' (%s)", argv.Type().Elem(), req.UUID) // Invoke the method, providing a new value for the reply (if expected) var ( returnValues []reflect.Value replyv reflect.Value ) if mType.ReplyType != nil { replyv = reflect.New(mType.ReplyType.Elem()) returnValues = mType.method.Func.Call([]reflect.Value{service.rcvr, argv, replyv}) } else { returnValues = mType.method.Func.Call([]reflect.Value{service.rcvr, argv}) } // The return value for the method is an error. errInter := returnValues[0].Interface() if errInter != nil { err := errInter.(error) sendError(worker, req, err) continue } // Envelope the response message envelope := &Response{ Path: req.Path, UUID: req.UUID, } // Marshal the response message (if exists) if mType.ReplyType != nil { replyBytes, err := proto.Marshal(replyv.Interface().(proto.Message)) if err != nil { glog.Error(err) sendError(worker, req, err) continue } envelope.Payload = replyBytes } // Marshal the envelope envBytes, err := proto.Marshal(envelope) if err != nil { glog.Error(err) sendError(worker, req, err) continue } // Send the response if _, err := worker.SendBytes(envBytes, 0); err != nil { // Since we could not send, we could not send an error either, just log glog.Error(err) } if mType.ReplyType != nil { glog.V(3).Infof("Replied '%s' (%s)", mType.ReplyType.Elem(), envelope.UUID) } else { glog.V(3).Infof("Replied nil (%s)", envelope.UUID) } } else { // If reached here, the message was not handled by the server glog.V(1).Infof("message '%s' is not handled by this service", methodName) sendError(worker, req, fmt.Errorf("message '%s' is not handled by this service", methodName)) } } glog.Infof("Closing worker #%d", i) }(i + 1) } // This is blocking so we put it last if err := zmq.Proxy(sock, mux, nil); err != nil { switch zmq.AsErrno(err) { // If was interrupted there is no need to log as an error case zmq.Errno(syscall.EINTR): glog.Info(err) case zmq.Errno(zmq.ETERM): glog.Info(err) default: glog.Error(err) } } // Since it was blocking we could safely close the server if reached here server.Close() }
/* InitZMQTransport creates ZeroMQ transport. It multiplexes incoming connections which are then processed in separate go routines (workers). Multiplexer spawns go routines as needed, but 10 worker routines are created on startup. Every request times out after provided timeout duration. ZMQ pattern is: zmq.ROUTER(incoming) -> proxy -> zmq.DEALER -> [zmq.REP(worker), zmq.REP...] */ func InitZMQTransport(hostname string, timeout time.Duration, logger *log.Logger) (Transport, error) { // use default logger if one is not provided if logger == nil { logger = log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile) } // initialize ZMQ Context context, err := zmq.NewContext() if err != nil { return nil, err } // setup router and bind() to tcp address for clients to connect to router_sock, err := context.NewSocket(zmq.ROUTER) if err != nil { return nil, err } err = router_sock.Bind("tcp://" + hostname) if err != nil { return nil, err } // setup dealer dealer_sock, err := context.NewSocket(zmq.DEALER) if err != nil { return nil, err } err = dealer_sock.Bind("inproc://dendrite-zmqdealer") if err != nil { return nil, err } poller := zmq.NewPoller() poller.Add(router_sock, zmq.POLLIN) poller.Add(dealer_sock, zmq.POLLIN) transport := &ZMQTransport{ lock: new(sync.Mutex), clientTimeout: timeout, ClientTimeout: timeout, minHandlers: 10, maxHandlers: 1024, incrHandlers: 10, activeRequests: 0, workerIdleTimeout: 10 * time.Second, table: make(map[string]*localHandler), control_c: make(chan *workerComm), dealer_sock: dealer_sock, router_sock: router_sock, zmq_context: context, ZMQContext: context, hooks: make([]TransportHook, 0), Logger: logger, } go zmq.Proxy(router_sock, dealer_sock, nil) // Scheduler goroutine keeps track of running workers // It spawns new ones if needed, and cancels ones that are idling go func() { sched_ticker := time.NewTicker(60 * time.Second) workers := make(map[*workerComm]bool) // fire up initial set of workers for i := 0; i < transport.minHandlers; i++ { go transport.zmq_worker() } for { select { case comm := <-transport.control_c: // worker sent something... msg := <-comm.worker_out switch { case msg == workerRegisterReq: if len(workers) == transport.maxHandlers { comm.worker_in <- workerRegisterDenied logger.Println("[DENDRITE][INFO]: TransportListener - max number of workers reached") continue } if _, ok := workers[comm]; ok { // worker already registered continue } comm.worker_in <- workerRegisterAllowed workers[comm] = true logger.Println("[DENDRITE][INFO]: TransportListener - registered new worker, total:", len(workers)) case msg == workerShutdownReq: //logger.Println("Got shutdown req") if len(workers) > transport.minHandlers { comm.worker_in <- workerShutdownAllowed for _ = range comm.worker_out { // wait until worker closes the channel } delete(workers, comm) } else { comm.worker_in <- workerShutdownDenied } } case <-sched_ticker.C: // check if requests are piling up and start more workers if that's the case if transport.activeRequests > 3*len(workers) { for i := 0; i < transport.incrHandlers; i++ { go transport.zmq_worker() } } } } }() return transport, nil }
func TestIO(t *testing.T) { const ( addr1 = "tcp://127.0.0.1:12345" addr2 = "tcp://127.0.0.1:12346" ) ctx, err := zmq.NewContext() if err != nil { t.Fatal(err) } defer ctx.Term() go func() { s, err := ctx.NewSocket(zmq.REQ) if err != nil { t.Fatal(err) } defer s.Close() if err := s.Connect(addr1); err != nil { t.Fatal(err) } if err := s.Connect(addr2); err != nil { t.Fatal(err) } for { t.Logf("0 sending") if _, err := s.SendMessage("hello", "world"); err != nil { t.Logf("0 %s", err) return } t.Logf("0 receiving") msg, err := s.RecvMessage(0) if err != nil { t.Logf("0 %s", err) return } t.Logf("0 %s", msg) } }() ctx1, err := zmq.NewContext() if err != nil { t.Fatal(err) } defer ctx1.Term() ctx2, err := zmq.NewContext() if err != nil { t.Fatal(err) } defer ctx2.Term() s1, err := ctx1.NewSocket(zmq.REP) if err != nil { t.Fatal(err) } defer io.Remove(s1) s2, err := ctx2.NewSocket(zmq.REP) if err != nil { t.Fatal(err) } defer io.Remove(s2) if err := s1.Bind(addr1); err != nil { t.Fatal(err) } if err := s2.Bind(addr2); err != nil { t.Fatal(err) } send1 := make(chan zmqchan.Data) recv1 := make(chan zmqchan.Data) if err := io.Add(s1, send1, recv1); err != nil { t.Fatal(err) } send2 := make(chan zmqchan.Data) recv2 := make(chan zmqchan.Data) if err := io.Add(s2, send2, recv2); err != nil { t.Fatal(err) } var ( send1buf zmqchan.Data send2buf zmqchan.Data recv1count = 0 recv2count = 0 ) for recv1 != nil || send1 != nil || recv2 != nil || send2 != nil { var ( recv1chan <-chan zmqchan.Data recv2chan <-chan zmqchan.Data send1chan chan<- zmqchan.Data send2chan chan<- zmqchan.Data ) if send1buf.Bytes == nil { recv1chan = recv1 } else { send1chan = send1 } if send2buf.Bytes == nil { recv2chan = recv2 } else { send2chan = send2 } select { case data, ok := <-recv1chan: if !ok { t.Fatal("1 closed") } t.Logf("1 '%s'", data) if !data.More { if recv1count++; recv1count == 10 { t.Logf("1 no more") recv1 = nil } send1buf.Bytes = []byte("okie") } case send1chan <- send1buf: send1buf.Bytes = nil if recv1 == nil { close(send1) send1 = nil } case b, ok := <-recv2chan: if !ok { t.Fatal("2 closed") } t.Logf("2 '%s'", b) if !b.More { if recv2count++; recv2count == 10 { t.Logf("2 no more") recv2 = nil } send2buf.Bytes = []byte("dokie") } case send2chan <- send2buf: send2buf.Bytes = nil if recv2 == nil { close(send2) send2 = nil } } } }
func TestRouterElement(t *testing.T) { var c *zmq.Context var m *Mirror var sa, sb *zmq.Socket var err error var addra string var addrb string defer func() { if sa != nil { sa.Close() } if sb != nil { sb.Close() } if m != nil { m.Close() } if c != nil { c.Term() } }() if c, err = zmq.NewContext(); err != nil { t.Fatalf("Failed to create ZMQ context: %v", err) } if m, err = NewMirror(t, c, 1); err != nil { t.Fatalf("Failed to create a new mirror: %v", err) } if sa, addra, err = svcrouter.NewHalfPair(c, true); err != nil { t.Fatalf("Failed to create half pair A: %v", err) } if sb, addrb, err = svcrouter.NewHalfPair(c, true); err != nil { t.Fatalf("Failed to create half pair B: %v", err) } if err = m.AddPeer(svcrouter.PeerDefinition{ Name: "ab", ZmqType: zmq.PAIR, Address: addra, Bind: false, PeerImpl: &MirrorPeerImpl{}, }); err != nil { t.Fatalf("Could not add peer A: %v", err) } if err = m.AddPeer(svcrouter.PeerDefinition{ Name: "ba", ZmqType: zmq.PAIR, Address: addrb, Bind: false, PeerImpl: &MirrorPeerImpl{}, }); err != nil { t.Fatalf("Could not add peer B: %v", err) } svcrouter.Barrier() num := 100 var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() for i := 0; i < num; i++ { if _, err := sb.RecvMessage(0); err != nil { t.Fatalf("sb receive error: %v", err) } } }() msg := [][]byte{[]byte("Hello"), []byte("World")} for i := 0; i < num; i++ { if _, err := sa.SendMessage(msg); err != nil { t.Fatalf("sa send error: %v", err) } } wg.Wait() }
// connectToStore connects to the Store service using the following two endpoints: // * API: push notifications from master to store // * subscription service: notifications from store to nodes func connectToStore(apiAddr string, subscriptionAddr string) (*store, error) { ctx, err := zmq.NewContext() if err != nil { return nil, err } // frontend subscribes to all topics on the Store subscriptions service frontend, err := ctx.NewSocket(zmq.SUB) if err != nil { return nil, err } err = frontend.Connect(fmt.Sprintf("tcp://%s", subscriptionAddr)) if err != nil { return nil, err } // backend is a local publisher that will help distribute topics locally backend, err := ctx.NewSocket(zmq.PUB) if err != nil { return nil, err } err = backend.Bind(updateEndpoint) if err != nil { return nil, err } if err = zmq.Proxy(frontend, backend, nil); err != nil { return nil, err } mux, err := router.New(updateEndpoint) if err != nil { return nil, err } conn, err := grpc.Dial(apiAddr) if err != nil { return nil, err } client := storepb.NewStoreServiceClient(conn) s := &store{ conn: conn, client: client, schedules: make(chan []byte), jobs: make(chan []byte), mux: mux, done: make(chan struct{}), } if err = mux.Add(service.ScheduleUpdateKey, s.schedules); err != nil { return nil, err } if err = mux.Add(service.JobUpdateKey, s.jobs); err != nil { return nil, err } go s.loop() go mux.Run() return s, nil }