func runZmqStream() { var context zmq.Context var socket zmq.Socket // connect to zmq var err error if context, err = zmq.NewContext(); err != nil { panic("No ZMQ Context?") } defer context.Close() if socket, err = context.NewSocket(zmq.SUB); err != nil { panic("No ZMQ Socket Outbound??") } defer socket.Close() socket.Connect("tcp://localhost:5558") socket.SetSockOptString(zmq.SUBSCRIBE, "") for { // block here, waiting for inbound requests msg, _ := socket.Recv(0) if len(msg) > 0 { parts := strings.Split(string(msg), "\n\n") process(parts[0]) } } }
func proxyRouting(status chan bool) { // intialize the zmq context. context, err := zmq.NewContext() if err != nil { status <- false log.Fatal("Intialize the zeromq context failure.\n") } defer context.Close() var subscriber, publisher *zmq.Socket subscriber, err = context.NewSocket(zmq.XSUB) if err != nil { status <- false log.Fatal("Intialize the subscriber failure.\n") } defer subscriber.Close() var ( sub_address, pub_address = "*", "*" subPort, pubPort = 6001, 6000 ) // Bind the subscriber address := fmt.Sprintf("tcp://%s:%v", sub_address, subPort) err = subscriber.Bind(address) if err != nil { status <- false log.Fatalf("Subscriber bind on the address %s failure\n", address) } log.Printf("Subscriber bind on the address %s.\n", address) publisher, err = context.NewSocket(zmq.XPUB) if err != nil { status <- false log.Fatal("Intialize the publisher failure.\n") } defer publisher.Close() // Bind the publisher address = fmt.Sprintf("tcp://%s:%v", pub_address, pubPort) err = publisher.Bind(address) if err != nil { status <- false log.Fatalf("Publisher bind on the address %s failure.\n", address) } log.Printf("Publisher bind on the address %s.\n", address) log.Println("Proxy successfully launched...") // Poll the events on relevant sockets. zmq.Proxy(subscriber, publisher, nil) }
func (t *TransportZmq) bridge(bridge_in *zmq.Socket) { var message interface{} // Wait on channel, passing into socket // This keeps the socket in a single thread, otherwise we have to lock the entire publisher runtime.LockOSThread() BridgeLoop: for { select { case notify := <-t.bridge_chan: bridge_in.Send(notify, 0) // Shutdown? if string(notify) == zmq_signal_shutdown { break BridgeLoop } case message = <-t.recv_bridge_chan: // The reason we flush recv through the bridge and not directly to recv_chan is so that if // the poller was quick and had to cache a receive as the channel was full, it will stop // polling - flushing through bridge allows us to signal poller to start polling again // It is not the publisher's responsibility to do this, and TLS wouldn't need it bridge_in.Send([]byte(zmq_signal_input), 0) // Keep trying to forward on the message ForwardLoop: for { select { case notify := <-t.bridge_chan: bridge_in.Send(notify, 0) // Shutdown? if string(notify) == zmq_signal_shutdown { break BridgeLoop } case t.recv_chan <- message: break ForwardLoop } } } } // We should linger by default to ensure shutdown is transmitted bridge_in.Close() runtime.UnlockOSThread() t.wait.Done() }
func (t *TransportZmq) poller(bridge_out *zmq.Socket) { // ZMQ sockets are not thread-safe, so we have to send/receive on same thread // Thus, we cannot use a sender/receiver thread pair like we can with TLS so we use a single threaded poller instead // In order to asynchronously send and receive we just poll and do necessary actions // When data is ready to send we'll get a channel ping, that is bridged to ZMQ so we can then send data // For receiving, we receive here and bridge it to the channels, then receive more once that's through runtime.LockOSThread() t.poll_items = make([]zmq.PollItem, 3) t.poll_items[0].Socket = bridge_out t.poll_items[0].Events = zmq.POLLIN | zmq.POLLOUT t.poll_items[1].Socket = t.dealer t.poll_items[1].Events = zmq.POLLIN | zmq.POLLOUT t.poll_items[2].Socket = t.monitor t.poll_items[2].Events = zmq.POLLIN for { // Poll for events if _, err := zmq.Poll(t.poll_items, -1); err != nil { // Retry on EINTR if err == syscall.EINTR { continue } // Failure t.recv_chan <- fmt.Errorf("zmq.Poll failure %s", err) break } // Process control channel if t.poll_items[0].REvents&zmq.POLLIN != 0 { if !t.processControlIn(bridge_out) { break } } // Process dealer send if t.poll_items[1].REvents&zmq.POLLOUT != 0 { if !t.processDealerOut() { break } } // Process dealer receive if t.poll_items[1].REvents&zmq.POLLIN != 0 { if !t.processDealerIn() { break } } // Process monitor receive if t.poll_items[2].REvents&zmq.POLLIN != 0 { if !t.processMonitorIn() { break } } } bridge_out.Close() runtime.UnlockOSThread() t.wait.Done() }
// the listen and server for mongrel, expects an address like this // @addr = string config parameter like this: // m2go.ListenAndServe("tcp://127.0.0.1:9555|tcp://127.0.0.1:9556|54c6755b-9628-40a4-9a2d-cc82a816345e", handler) func ListenAndServe(addr string, handler http.Handler) { var Context zmq.Context var SocketIn zmq.Socket var SocketOut zmq.Socket var hasExited bool var err error m2addr := strings.Split(addr, "|") // log.Printf("m2go serving %s\n", addr) /* Connection to ZMQ setup */ connect := func() { if Context, err = zmq.NewContext(); err != nil { panic("No ZMQ Context?") } // listen for incoming requests if SocketIn, err = Context.NewSocket(zmq.PULL); err != nil { panic("No ZMQ Socket?") } SocketIn.Connect(m2addr[0]) if SocketOut, err = Context.NewSocket(zmq.PUB); err != nil { panic("No ZMQ Socket Outbound??") } // outbound response on a different channel SocketOut.SetSockOptString(zmq.IDENTITY, m2addr[2]) //socket.SetSockOptString(zmq.SUBSCRIBE, filter) SocketOut.Connect(m2addr[1]) } connect() handleResponse := func(response []byte) { SocketOut.Send(response, 0) } stopper := func() { if !hasExited { hasExited = true SocketOut.Close() SocketIn.Close() Context.Close() } } defer stopper() for { // each inbound request m2data, err := SocketIn.Recv(0) //log.Println(string(m2data)) if err != nil { log.Println("ZMQ Socket Input accept error ", err.Error()) } else { go HandleM2Request(m2data, handleResponse, handler) } } log.Print("after close of runner") }