func main() { context, _ := zmq.NewContext() defer context.Close() frontend, _ := context.NewSocket(zmq.ROUTER) defer frontend.Close() frontend.Bind("tcp://*:5555") // For clients backend, _ := context.NewSocket(zmq.ROUTER) defer backend.Close() backend.Bind("tcp://*:5556") // For workers // Queue of available workers workers := make([][]byte, 0, 0) for { items := zmq.PollItems{ zmq.PollItem{Socket: backend, Events: zmq.POLLIN}, zmq.PollItem{Socket: frontend, Events: zmq.POLLIN}, } // Poll frontend only if we have available workers if len(workers) > 0 { zmq.Poll(items, -1) } else { zmq.Poll(items[:1], -1) } // Handle worker activity on backend if items[0].REvents&zmq.POLLIN != 0 { // Use worker identity for load-balancing msg, err := backend.RecvMultipart(0) if err != nil { panic(err) // Interrupted } address := msg[0] workers = append(workers, address) // Forward message to client if it's not a READY if reply := msg[2:]; string(reply[0]) != LRU_READY { frontend.SendMultipart(reply, 0) } } if items[1].REvents&zmq.POLLIN != 0 { // Get client request, route to first available worker msg, err := frontend.RecvMultipart(0) if err != nil { panic(err) // Interrupted } last := workers[len(workers)-1] workers = workers[:len(workers)-1] request := append([][]byte{last, nil}, msg...) backend.SendMultipart(request, 0) } } }
func (s *FFS) Send(data []byte, flags zmq.SendRecvOption) (err error) { for { s.ensure_connect() pi := zmq.PollItems{zmq.PollItem{Socket: s.socket, Events: zmq.POLLOUT}} count, err := zmq.Poll(pi, s.SendTimeout) if count == 0 { // not ready in time, fail the socket and try again. log.Printf("%s: timed out waiting to Send(): %s\n", s.endpoint, err) s.fail_socket() } else { //log.Printf("%s: sending %d payload\n", s.endpoint, len(data)) err = s.socket.Send(data, flags) if err != nil { log.Printf("%s: Failed to Send() %d byte message: %s\n", s.endpoint, len(data), err) s.fail_socket() } else { // Success! break } } } return }
func (s *FFS) Recv(flags zmq.SendRecvOption) (data []byte, err error) { s.ensure_connect() pi := zmq.PollItems{zmq.PollItem{Socket: s.socket, Events: zmq.POLLIN}} count, err := zmq.Poll(pi, s.RecvTimeout) if count == 0 { // not ready in time, fail the socket and try again. s.fail_socket() err = syscall.ETIMEDOUT log.Printf("%s: timed out waiting to Recv(): %s\n", s.endpoint, err) return nil, err } else { data, err = s.socket.Recv(flags) if err != nil { log.Printf("%s: Failed to Recv() %d byte message: %s\n", s.endpoint, len(data), err) s.fail_socket() return nil, err } else { // Success! } } return }
func broker_task() { context, _ := zmq.NewContext() frontend, _ := context.NewSocket(zmq.ROUTER) backend, _ := context.NewSocket(zmq.ROUTER) defer context.Close() defer frontend.Close() defer backend.Close() frontend.Bind("tcp://*:5555") backend.Bind("tcp://*:5556") // Initialize poll set items := zmq.PollItems{ zmq.PollItem{Socket: frontend, Events: zmq.POLLIN}, zmq.PollItem{Socket: backend, Events: zmq.POLLIN}, } for { zmq.Poll(items, -1) switch { case items[0].REvents&zmq.POLLIN != 0: msg, _ := frontend.RecvMultipart(0) msg[0][0] = 'W' backend.SendMultipart(msg, 0) case items[1].REvents&zmq.POLLIN != 0: msg, _ := backend.RecvMultipart(0) msg[0][0] = 'C' frontend.SendMultipart(msg, 0) } } }
func main() { context, _ := zmq.NewContext() defer context.Close() frontend, _ := context.NewSocket(zmq.ROUTER) backend, _ := context.NewSocket(zmq.DEALER) defer frontend.Close() defer backend.Close() frontend.Bind("tcp://*:5559") backend.Bind("tcp://*:5560") // Initialize poll set toPoll := zmq.PollItems{ zmq.PollItem{Socket: frontend, Events: zmq.POLLIN}, zmq.PollItem{Socket: backend, Events: zmq.POLLIN}, } for { _, _ = zmq.Poll(toPoll, -1) switch { case toPoll[0].REvents&zmq.POLLIN != 0: parts, _ := frontend.RecvMultipart(0) backend.SendMultipart(parts, 0) case toPoll[1].REvents&zmq.POLLIN != 0: parts, _ := backend.RecvMultipart(0) frontend.SendMultipart(parts, 0) } } }
func (self *mdClient) Send(service []byte, request [][]byte) (reply [][]byte) { // Prefix request with protocol frames // Frame 1: "MDPCxy" (six bytes, MDP/Client x.y) // Frame 2: Service name (printable string) frame := append([][]byte{[]byte(MDPC_CLIENT), service}, request...) if self.verbose { log.Printf("I: send request to '%s' service:", service) Dump(request) } for retries := self.retries; retries > 0; { self.client.SendMultipart(frame, 0) items := zmq.PollItems{ zmq.PollItem{Socket: self.client, Events: zmq.POLLIN}, } _, err := zmq.Poll(items, self.timeout) if err != nil { panic(err) // Interrupted } if item := items[0]; item.REvents&zmq.POLLIN != 0 { msg, _ := self.client.RecvMultipart(0) if self.verbose { log.Println("I: received reply: ") Dump(msg) } // We would handle malformed replies better in real code if len(msg) < 3 { panic("Error msg len") } header := msg[0] if string(header) != MDPC_CLIENT { panic("Error header") } replyService := msg[1] if string(service) != string(replyService) { panic("Error reply service") } reply = msg[2:] break } else if retries--; retries > 0 { if self.verbose { log.Println("W: no reply, reconnecting...") } self.reconnect() } else { if self.verbose { log.Println("W: permanent error, abandoning") } break } } return }
func (af *AtFrame) SendCall(dsc string, data interface{}, timeout int) (interface{}, error) { if dsc != af.lastREQId { if af.cmdREQ != nil { af.cmdREQ.Close() } af.lastREQId = dsc af.cmdREQ, _ = af.ZmqContext.NewSocket(zmq.REQ) af.cmdREQ.Connect(AF_ZMQ_BASE_REP + af.lastREQId) } af_cmd := AtFrameCommandJson{Src: af.id, Dsc: dsc, Cmd: AF_CMD_CALL, Data: data} af_cmd_json, err := json.Marshal(af_cmd) if err != nil { return nil, err } af_cmd_json_str := string(af_cmd_json) err = af.cmdREQ.Send([]byte(af_cmd_json_str), 0) if err != nil { return nil, err } pi := []zmq.PollItem{zmq.PollItem{Socket: af.cmdREQ, Events: zmq.POLLIN}} event_count, err := zmq.Poll(pi, time.Millisecond*time.Duration(timeout)) if err != nil { return nil, err } if event_count == 0 { return nil, errors.New("af call wait timeout") } buf, rx_err := af.cmdREQ.Recv(0) if rx_err != nil { return nil, rx_err } err = json.Unmarshal(buf, &af.cmdJSON) if err != nil { return nil, err } // str := string(buf) // fmt.Printf( "CALL RX CMD : [%s]\n", str ); // // fmt.Printf( "cmd.Cmd : [%s]\n", af.cmdJSON.Cmd ); // fmt.Printf( "cmd.Src : [%s]\n", af.cmdJSON.Src ); // fmt.Printf( "cmd.Dsc : [%s]\n", af.cmdJSON.Dsc ); // fmt.Printf( "cmd.Data : [%s]\n", af.cmdJSON.Data ); // // fmt.Printf( "CALL END\n" ); return af.cmdJSON.Data, err }
// WaitForSend polls a ZMQ socket until it's writable. After this returns true, // you should be able to write to the socket immediately. Note that this often // returns true while a socket is still being connected -- ZMQ likes to buffer. func WaitForSend(sock *zmq.Socket, timeout int) bool { pi := make([]zmq.PollItem, 1) pi[0] = zmq.PollItem{Socket: sock, Events: zmq.POLLOUT} zmq.Poll(pi, time.Duration(timeout)*time.Second) if pi[0].REvents == zmq.POLLOUT { return true } return false }
func (self *mdClient) Send(service string, request [][]byte) (reply [][]byte, err error) { frame := append([][]byte{[]byte(MDPC_CLIENT), []byte(service)}, request...) for retries := self.retries; retries > 0; retries-- { if err != nil { err = self.connectToBroker() if err != nil { continue } } err = self.client.SendMultipart(frame, 0) if err != nil { continue } items := zmq.PollItems{ zmq.PollItem{Socket: self.client, Events: zmq.POLLIN}, } _, err = zmq.Poll(items, self.timeout) if err != nil { continue } if item := items[0]; item.REvents&zmq.POLLIN != 0 { msg, e := self.client.RecvMultipart(0) if e != nil { err = e continue } if len(msg) < 3 { err = fmt.Errorf("Invalid msg length %d", len(msg)) continue } header := msg[0] if string(header) != MDPC_CLIENT { err = fmt.Errorf("Incorrect header: %s, expected: %s", header, MDPC_CLIENT) continue } replyService := msg[1] if string(service) != string(replyService) { err = fmt.Errorf("Incorrect reply service: %s, expected: %s", service, replyService) continue } reply = msg[2:] err = nil return } else { err = fmt.Errorf("Poll timeout") } } return }
// Poll polls, with the specified timeout, all sockets for all events that have // been registered with event handlers. // // A negative timeout means forever; otherwise, timeout wll be truncated to // millisecond precision. // // Execution will halt and return first error encountered from polling // or handling. // func (p *Poller) Poll(timeout time.Duration) (err error) { p.locker.Lock() defer p.locker.Unlock() // This PollItems construction may become inefficient for large // numbers of handlers. baseItems := make(zmq.PollItems, 0, len(p.items)) for s, item := range p.items { baseItems = append(baseItems, zmq.PollItem{ Socket: s, Events: item.events, }) } p.logf("poller: polling %d sockets for %s", len(baseItems), timeout) n, err := zmq.Poll(baseItems, timeout) // Possible errors returned from Poll() are: ETERM, meaning a // context was closed; EFAULT, meaning a mistake was made in // setting up the PollItems list; and EINTR, meaning a signal // was delivered before any events were available. Here, we // treat all errors the same: if err != nil { p.logf("poller: error while polling: %s", err) return err } if n > 0 { p.logf("poller: events detected.") // Check all other sockets, sending any available messages to // their associated channels: for _, base := range baseItems { item := p.items[base.Socket] if (base.Events&zmq.POLLIN) != 0 && item.handleIn != nil { for { m, err := base.Socket.RecvMultipart(zmq.DONTWAIT) if err == syscall.EAGAIN { break } else if err != nil { if item.handleErr != nil { item.handleErr(err) } break } item.handleIn(m) } } if (base.Events&zmq.POLLOUT) != 0 && item.handleOut != nil { item.handleOut() } } } return nil }
func SleepCheckMsg(sleep_time int) { ad.Println("SleepCheckMsg() start") // 1 m sec 마다 끝났는가를 확인한다. start_time := time.Now() time_out_msec := time.Duration(sleep_time) * time.Millisecond for !ThreadCheckMsgReqEnd { ThreadCheckMsgLive++ current_time := time.Now() pass_time := current_time.Sub(start_time) if pass_time > time_out_msec { break } pi := zmq.PollItems{ zmq.PollItem{Socket: PortInAsciiSUB, Events: zmq.POLLIN}, } event_count, err := zmq.Poll(pi, 1*time.Millisecond) if err != nil { ad.Println("fail do not poll[%s]", err) reason := fmt.Sprintf("do not poll[%s]", err) ar.SetResultError(reason) break } if event_count == 0 { } else { if pi[0].REvents&zmq.POLLIN != 0 { buf, err := pi[0].Socket.Recv(0) if err != nil { ad.Println("fail do not read [%s]", err) reason := fmt.Sprintf("do not read [%s]", err) ar.SetResultError(reason) break } str := string(buf) ad.Println("IN ASCII : [%s]\n", str) } } } ad.Println("SleepCheckMsg() end") }
func (conn *Connection) Recv(timeout float64) (message Message, err error) { pi := zmq.PollItem{Socket: conn.sock, Events: zmq.POLLIN} pis := zmq.PollItems{pi} _, err = zmq.Poll(pis, int64(timeout*1e6)) if err != nil { } else if i := pis[0]; i.REvents&zmq.POLLIN != 0 { message, err = conn.sock.RecvMultipart(0) } else { err = timeoutError{"Connection.Recv() timeout"} } return }
func (conn *Connection) Send(message Message, timeout float64) (err error) { pi := zmq.PollItem{Socket: conn.sock, Events: zmq.POLLOUT} pis := zmq.PollItems{pi} _, err = zmq.Poll(pis, int64(timeout*1e6)) if err != nil { } else if i := pis[0]; i.REvents&zmq.POLLOUT != 0 { err = conn.sock.SendMultipart(message, 0) } else { err = timeoutError{"Connection.Send() timeout"} } return }
func ThreadRS232Tx() { ad.Println("ThreadRS232Tx() start") ThreadRS232TxReqEnd = false ThreadRS232TxRun = true for !ThreadRS232TxReqEnd { ThreadRS232Live++ // ad.Println( "wait read PortTxSUB" ) pi := zmq.PollItems{ zmq.PollItem{Socket: PortTxSUB, Events: zmq.POLLIN}, } event_count, err := zmq.Poll(pi, 1*time.Millisecond) if err != nil { ad.Println("fail do not poll[%s]", err) reason := fmt.Sprintf("do not poll[%s]", err) ar.SetResultError(reason) break } if event_count == 0 { } else { if pi[0].REvents&zmq.POLLIN != 0 { buf, err := pi[0].Socket.Recv(0) if err != nil { ad.Println("fail do not read [%s]", err) reason := fmt.Sprintf("do not read [%s]", err) ar.SetResultError(reason) break } RS232Port.Write(buf) str := string(buf) ad.Println("OUT ASCII : [%s]\n", str) } } } ad.Println("ThreadRS232Tx() End") ThreadRS232TxRun = false }
// RunKernel is the main entry point to start the kernel. This is what is called by the // igo executable. func RunKernel(connection_file string, logwriter io.Writer) { logger = log.New(logwriter, "igopkg ", log.LstdFlags) SetupExecutionEnvironment() var conn_info ConnectionInfo bs, err := ioutil.ReadFile(connection_file) if err != nil { log.Fatalln(err) } err = json.Unmarshal(bs, &conn_info) if err != nil { log.Fatalln(err) } logger.Printf("%+v\n", conn_info) sockets := PrepareSockets(conn_info) pi := zmq.PollItems{ zmq.PollItem{Socket: sockets.Shell_socket, Events: zmq.POLLIN}, zmq.PollItem{Socket: sockets.Stdin_socket, Events: zmq.POLLIN}, zmq.PollItem{Socket: sockets.Control_socket, Events: zmq.POLLIN}, } var msgparts [][]byte // Message receiving loop: for { _, err = zmq.Poll(pi, -1) if err != nil { log.Fatalln(err) } switch { case pi[0].REvents&zmq.POLLIN != 0: // shell socket msgparts, _ = pi[0].Socket.RecvMultipart(0) msg, ids, err := WireMsgToComposedMsg(msgparts, sockets.Key) if err != nil { fmt.Println(err) return } HandleShellMsg(MsgReceipt{msg, ids, sockets}) case pi[1].REvents&zmq.POLLIN != 0: // stdin socket - not implemented. pi[1].Socket.RecvMultipart(0) case pi[2].REvents&zmq.POLLIN != 0: // control socket - treat like shell socket. msgparts, _ = pi[2].Socket.RecvMultipart(0) msg, ids, err := WireMsgToComposedMsg(msgparts, sockets.Key) if err != nil { fmt.Println(err) return } HandleShellMsg(MsgReceipt{msg, ids, sockets}) } } }
// Polls a bunch of ZeroMQ sockets and notifies the result through a // channel. This makes it possible to combine ZeroMQ polling with Go's // own built-in channels. func asyncPoll(notifier chan zmqPollResult, items zmq.PollItems, stop chan bool) { for { timeout := time.Duration(1) * time.Second count, err := zmq.Poll(items, timeout) if count > 0 || err != nil { notifier <- zmqPollResult{err} } select { case <-stop: stop <- true return default: } } }
func (af *AtFrame) MainLoop() (int, error) { af.ReqEnd = false for !af.ReqEnd { pi := af.ZmqPollItems event_count, err := zmq.Poll(pi, af.Period) if err != nil { break } if event_count == 0 { if af.OnPeriod != nil { if af.OnPeriod(af) { af.ReqEnd = true } } } else { if pi[0].REvents&zmq.POLLIN != 0 { if _, err = af.CmdMain(0); err != nil { af.ReqEnd = true } } if pi[1].REvents&zmq.POLLIN != 0 { if _, err = af.CmdMain(1); err != nil { af.ReqEnd = true } } for i := 2; i < len(pi); i++ { if pi[i].REvents&zmq.POLLIN != 0 { if _, err = af.RxIn(i); err != nil { af.ReqEnd = true } } } } } return 0, nil }
func (server *Server) runBroker() { context, _ := zmq.NewContext() defer context.Close() clientAddress := fmt.Sprintf(clientAddressTemplate, server.port) frontend := newBoundSocket(context, clientAddress, zmq.ROUTER) defer frontend.Close() toPoll := zmq.PollItems{ zmq.PollItem{zmq.Socket: frontend, zmq.Events: zmq.POLLIN}, } socketByName := make(map[string]zmq.Socket) for name, _ := range server.services { serviceAddress := fmt.Sprintf(serviceAddressTemplate, name) serviceSocket := newBoundSocket(context, serviceAddress, zmq.DEALER) defer serviceSocket.Close() socketByName[name] = serviceSocket toPoll = append(toPoll, zmq.PollItem{zmq.Socket: serviceSocket, zmq.Events: zmq.POLLIN}, ) } numSockets := len(toPoll) for { zmq.Poll(toPoll, noTimeOut) if toPoll[0].REvents&zmq.POLLIN != 0 { messages, _ := toPoll[0].Socket.RecvMultipart(0) serviceName := string(messages[len(messages)-1]) println("Request for service:", serviceName) if serviceSocket, found := socketByName[serviceName]; found { messages = messages[:len(messages)-1] println("forwarding to service socket") serviceSocket.SendMultipart(messages, 0) } } else { for i := 1; i < numSockets; i++ { if toPoll[i].REvents&zmq.POLLIN != 0 { messages, _ := toPoll[i].Socket.RecvMultipart(0) frontend.SendMultipart(messages, 0) break } } } } }
func main() { context, _ := zmq.NewContext() defer context.Close() // Socket to receive messages on receiver, _ := context.NewSocket(zmq.PULL) defer receiver.Close() receiver.Connect("tcp://localhost:5557") // Socket to send messages to task sink sender, _ := context.NewSocket(zmq.PUSH) defer sender.Close() sender.Connect("tcp://localhost:5558") // Socket for control input controller, _ := context.NewSocket(zmq.SUB) defer controller.Close() controller.Connect("tcp://localhost:5559") controller.SetSockOptString(zmq.SUBSCRIBE, "") items := zmq.PollItems{ zmq.PollItem{Socket: receiver, zmq.Events: zmq.POLLIN}, zmq.PollItem{Socket: controller, zmq.Events: zmq.POLLIN}, } // Process tasks forever for { zmq.Poll(items, -1) switch { case items[0].REvents&zmq.POLLIN != 0: msgbytes, _ := receiver.Recv(0) fmt.Printf("%s.", string(msgbytes)) // Do the work msec, _ := strconv.ParseInt(string(msgbytes), 10, 64) time.Sleep(time.Duration(msec) * 1e6) // Send results to sink sender.Send([]byte(""), 0) case items[1].REvents&zmq.POLLIN != 0: fmt.Println("stopping") return } } }
func server_task() { context, _ := zmq.NewContext() defer context.Close() // Frontend socket talks to clients over TCP frontend, _ := context.NewSocket(zmq.ROUTER) frontend.Bind("ipc://frontend.ipc") defer frontend.Close() // Backend socket talks to workers over inproc backend, _ := context.NewSocket(zmq.DEALER) backend.Bind("ipc://backend.ipc") defer backend.Close() // Launch pool of worker threads, precise number is not critical for i := 0; i < 5; i++ { go server_worker() } // Connect backend to frontend via a proxy items := zmq.PollItems{ zmq.PollItem{Socket: frontend, zmq.Events: zmq.POLLIN}, zmq.PollItem{Socket: backend, zmq.Events: zmq.POLLIN}, } for { _, err := zmq.Poll(items, -1) if err != nil { fmt.Println("Server exited with error:", err) break } if items[0].REvents&zmq.POLLIN != 0 { parts, _ := frontend.RecvMultipart(0) backend.SendMultipart(parts, 0) } if items[1].REvents&zmq.POLLIN != 0 { parts, _ := backend.RecvMultipart(0) frontend.SendMultipart(parts, 0) } } }
func main() { if len(os.Args) < 2 { fmt.Println("syntax: peering1 me {you}...") return } myself := os.Args[1] fmt.Printf("I: preparing broker at %s...\n", myself) rand.Seed(time.Now().UnixNano()) context, _ := zmq.NewContext() statebe, _ := context.NewSocket(zmq.PUB) defer context.Close() defer statebe.Close() // Bind state backend to endpoint bindAddress := fmt.Sprintf("ipc://%s-state.ipc", myself) statebe.Bind(bindAddress) // Connect statefe to all peers statefe, _ := context.NewSocket(zmq.SUB) defer statefe.Close() statefe.SetSubscribe("") for i := 2; i < len(os.Args); i++ { peer := os.Args[i] fmt.Printf("I: connecting to state backend at '%s'\n", peer) statefe.Connect(fmt.Sprintf("ipc://%s-state.ipc", peer)) } items := zmq.PollItems{ zmq.PollItem{Socket: statefe, Events: zmq.POLLIN}, } for { zmq.Poll(items, time.Second) // Handle incomming status messages if items[0].REvents&zmq.POLLIN != 0 { msg, _ := statefe.RecvMultipart(0) fmt.Printf("%s - %s workers free\n", string(msg[0]), string(msg[1])) } else { // Send random values for worker availability statebe.SendMultipart([][]byte{[]byte(myself), []byte(fmt.Sprintf("%d", rand.Intn(10)))}, 0) } } }
// Main broker working loop func (self *mdBroker) Run() { for { items := zmq.PollItems{ zmq.PollItem{Socket: self.socket, Events: zmq.POLLIN}, } _, err := zmq.Poll(items, HEARTBEAT_INTERVAL) if err != nil { panic(err) // Interrupted } if item := items[0]; item.REvents&zmq.POLLIN != 0 { msg, _ := self.socket.RecvMultipart(0) if self.verbose { log.Printf("I: received message:") Dump(msg) } sender := msg[0] header := msg[2] msg = msg[3:] if string(header) == MDPC_CLIENT { self.processClient(sender, msg) } else if string(header) == MDPW_WORKER { self.processWorker(sender, msg) } else { log.Println("E: invalid message:") Dump(msg) } } if self.heartbeatAt.Before(time.Now()) { self.purgeWorkers() for elem := self.waiting.Front(); elem != nil; elem = elem.Next() { worker, _ := elem.Value.(*mdbWorker) self.sendToWorker(worker, MDPW_HEARTBEAT, nil, nil) } self.heartbeatAt = time.Now().Add(HEARTBEAT_INTERVAL) } } }
func client_task(name string, i int) { context, _ := zmq.NewContext() client, _ := context.NewSocket(zmq.REQ) monitor, _ := context.NewSocket(zmq.PUSH) defer context.Close() defer client.Close() defer monitor.Close() client.SetIdentity(fmt.Sprintf("Client-%s-%d", name, i)) client.Connect(fmt.Sprintf("ipc://%s-localfe.ipc", name)) monitor.Connect(fmt.Sprintf("ipc://%s-monitor.ipc", name)) for { time.Sleep(time.Duration(rand.Intn(5)) * time.Second) burst := rand.Intn(15) for burst > 0 { burst-- task_id := fmt.Sprintf("%04X", rand.Intn(0x10000)) // Send request with random hex ID client.Send([]byte(task_id), 0) // Wait max ten seconds for a reply, then complain pollset := zmq.PollItems{ zmq.PollItem{Socket: client, Events: zmq.POLLIN}, } zmq.Poll(pollset, 10*time.Second) if pollset[0].REvents&zmq.POLLIN != 0 { reply, err := client.Recv(0) if err != nil { break } if string(reply) != task_id { panic("Worker is supposed to answer us with our task id") } monitor.Send(reply, 0) } else { monitor.Send([]byte(fmt.Sprintf("E: CLIENT EXIT - lost task %s", task_id)), 0) } } } }
func main() { context, _ := zmq.NewContext() defer context.Close() // Connect to task ventilator receiver, _ := context.NewSocket(zmq.PULL) defer receiver.Close() receiver.Connect("tcp://localhost:5557") // Connect to weather server subscriber, _ := context.NewSocket(zmq.SUB) defer subscriber.Close() subscriber.Connect("tcp://localhost:5556") subscriber.SetSockOptString(zmq.SUBSCRIBE, "10001") pi := zmq.PollItems{ zmq.PollItem{Socket: receiver, zmq.Events: zmq.POLLIN}, zmq.PollItem{Socket: subscriber, zmq.Events: zmq.POLLIN}, } // Process messages from both sockets for { _, _ = zmq.Poll(pi, -1) switch { case pi[0].REvents&zmq.POLLIN != 0: // Process task pi[0].Socket.Recv(0) // eat the incoming message case pi[1].REvents&zmq.POLLIN != 0: // Process weather update pi[1].Socket.Recv(0) // eat the incoming message } } fmt.Println("done") }
func client_task() { context, _ := zmq.NewContext() defer context.Close() // Set random identity to make tracing easier identity := "Client-" + randomString() client, _ := context.NewSocket(zmq.DEALER) client.SetIdentity(identity) client.Connect("ipc://frontend.ipc") defer client.Close() items := zmq.PollItems{ zmq.PollItem{Socket: client, zmq.Events: zmq.POLLIN}, } reqs := 0 for { //Read for a response 100 times for every message we send out for i := 0; i < 100; i++ { _, err := zmq.Poll(items, time.Millisecond*10) if err != nil { break // Interrupted } if items[0].REvents&zmq.POLLIN != 0 { reply, _ := client.Recv(0) fmt.Println(identity, "received", string(reply)) } } reqs += 1 req_str := "Request #" + strconv.Itoa(reqs) client.Send([]byte(req_str), 0) } }
func (self *mdWorker) Recv(reply [][]byte) (msg [][]byte) { // Format and send the reply if we were provided one if len(reply) == 0 && self.expectReply { panic("Error reply") } if len(reply) > 0 { if len(self.replyTo) == 0 { panic("Error replyTo") } reply = append([][]byte{self.replyTo, nil}, reply...) self.sendToBroker(MDPW_REPLY, nil, reply) } self.expectReply = true for { items := zmq.PollItems{ zmq.PollItem{Socket: self.worker, Events: zmq.POLLIN}, } _, err := zmq.Poll(items, self.heartbeat) if err != nil { panic(err) // Interrupted } if item := items[0]; item.REvents&zmq.POLLIN != 0 { msg, _ = self.worker.RecvMultipart(0) if self.verbose { log.Println("I: received message from broker: ") Dump(msg) } self.liveness = HEARTBEAT_LIVENESS if len(msg) < 3 { panic("Invalid msg") // Interrupted } header := msg[1] if string(header) != MDPW_WORKER { panic("Invalid header") // Interrupted } switch command := string(msg[2]); command { case MDPW_REQUEST: // We should pop and save as many addresses as there are // up to a null part, but for now, just save one... self.replyTo = msg[3] msg = msg[5:] return case MDPW_HEARTBEAT: // do nothin case MDPW_DISCONNECT: self.reconnectToBroker() default: log.Println("E: invalid input message:") Dump(msg) } } else if self.liveness--; self.liveness <= 0 { if self.verbose { log.Println("W: disconnected from broker - retrying...") } time.Sleep(self.reconnect) self.reconnectToBroker() } // Send HEARTBEAT if it's time if self.heartbeatAt.Before(time.Now()) { self.sendToBroker(MDPW_HEARTBEAT, nil, nil) self.heartbeatAt = time.Now().Add(self.heartbeat) } } return }
// This is the main task. It starts the clients and workers, and then // routes requests between the two layers. Workers signal READY when // they start; after that we treat them as ready when they reply with // a response back to a client. The load-balancing data structure is // just a queue of next available workers. func main() { context, _ := zmq.NewContext() defer context.Close() frontend, _ := context.NewSocket(zmq.ROUTER) defer frontend.Close() frontend.Bind("ipc://frontend.ipc") backend, _ := context.NewSocket(zmq.ROUTER) defer backend.Close() backend.Bind("ipc://backend.ipc") var client_nbr int var worker_nbr int for client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++ { go client_task() } for worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++ { go worker_task() } // Here is the main loop for the least-recently-used queue. It has two // sockets; a frontend for clients and a backend for workers. It polls // the backend in all cases, and polls the frontend only when there are // one or more workers ready. This is a neat way to use 0MQ's own queues // to hold messages we're not ready to process yet. When we get a client // reply, we pop the next available worker, and send the request to it, // including the originating client identity. When a worker replies, we // re-queue that worker, and we forward the reply to the original client, // using the reply envelope. // Queue of available workers available_workers := 0 var worker_queue []string = make([]string, 0) for { items := zmq.PollItems{ zmq.PollItem{Socket: backend, zmq.Events: zmq.POLLIN}, zmq.PollItem{Socket: frontend, zmq.Events: zmq.POLLIN}, } // Poll frontend only if we have available workers var err error if available_workers > 0 { _, err = zmq.Poll(items, -1) } else { _, err = zmq.Poll(items[:1], -1) } if err != nil { break // Interrupted } // Handle worker activity on backend if items[0].REvents&zmq.POLLIN != 0 { parts, _ := backend.RecvMultipart(0) // Queue worker identity for load-balancing worker_id := string(parts[0]) worker_queue = append(worker_queue, worker_id) available_workers++ // Second frame is empty empty := parts[1] // Third frame is READY or else a client reply identity client_id := parts[2] // If client reply, send rest back to frontend if string(client_id) != "READY" { empty = parts[3] reply := parts[4] frontend.SendMultipart([][]byte{client_id, empty, reply}, 0) client_nbr-- if client_nbr == 0 { // Exit after N messages break } } } // Here is how we handle a client request: if items[1].REvents&zmq.POLLIN != 0 { // Now get next client request, route to last-used worker // Client request is [identity][empty][request] parts, _ := frontend.RecvMultipart(0) client_id := parts[0] empty := parts[1] request := parts[2] backend.SendMultipart([][]byte{[]byte(worker_queue[0]), empty, client_id, empty, request}, 0) worker_queue = worker_queue[1:] available_workers-- } } }
func main() { src := rand.NewSource(time.Now().UnixNano()) random := rand.New(src) context, _ := zmq.NewContext() defer context.Close() worker := WorkerSocket(context) liveness := HEARTBEAT_LIVENESS interval := INTERVAL_INIT heartbeatAt := time.Now().Add(HEARTBEAT_INTERVAL) cycles := 0 for { items := zmq.PollItems{ zmq.PollItem{Socket: worker, Events: zmq.POLLIN}, } zmq.Poll(items, HEARTBEAT_INTERVAL) if items[0].REvents&zmq.POLLIN != 0 { frames, err := worker.RecvMultipart(0) if err != nil { panic(err) } if len(frames) == 3 { cycles++ if cycles > 3 { switch r := random.Intn(5); r { case 0: fmt.Println("I: Simulating a crash") worker.Close() return case 1: fmt.Println("I: Simulating CPU overload") time.Sleep(3 * time.Second) } } fmt.Println("I: Normal reply") worker.SendMultipart(frames, 0) liveness = HEARTBEAT_LIVENESS time.Sleep(1 * time.Second) } else if len(frames) == 1 && string(frames[0]) == PPP_HEARTBEAT { fmt.Println("I: Queue heartbeat") liveness = HEARTBEAT_LIVENESS } else { fmt.Println("E: Invalid message") } interval = INTERVAL_INIT } else if liveness--; liveness == 0 { fmt.Println("W: Heartbeat failure, can't reach queue") fmt.Printf("W: Reconnecting in %ds...\n", interval/time.Second) time.Sleep(interval) if interval < INTERVAL_MAX { interval *= 2 } worker.Close() worker = WorkerSocket(context) liveness = HEARTBEAT_LIVENESS } if heartbeatAt.Before(time.Now()) { heartbeatAt = time.Now().Add(HEARTBEAT_INTERVAL) worker.Send([]byte(PPP_HEARTBEAT), 0) } } }
// RunKernel is the main entry point to start the kernel. func RunKernel(connectionFile string, logwriter io.Writer) { logger = log.New(logwriter, "gophernotes ", log.LstdFlags) // Set up the "Session" with the replpkg. SetupExecutionEnvironment() var connInfo ConnectionInfo bs, err := ioutil.ReadFile(connectionFile) if err != nil { log.Fatalln(err) } if err = json.Unmarshal(bs, &connInfo); err != nil { log.Fatalln(err) } logger.Printf("%+v\n", connInfo) // Set up the ZMQ sockets through which the kernel will communicate. sockets, err := PrepareSockets(connInfo) if err != nil { log.Fatalln(err) } pi := zmq.PollItems{ zmq.PollItem{Socket: sockets.ShellSocket, Events: zmq.POLLIN}, zmq.PollItem{Socket: sockets.StdinSocket, Events: zmq.POLLIN}, zmq.PollItem{Socket: sockets.ControlSocket, Events: zmq.POLLIN}, } // Start a message receiving loop. var msgparts [][]byte for { if _, err = zmq.Poll(pi, -1); err != nil { log.Fatalln(err) } switch { case pi[0].REvents&zmq.POLLIN != 0: // shell socket msgparts, _ = pi[0].Socket.RecvMultipart(0) msg, ids, err := WireMsgToComposedMsg(msgparts, sockets.Key) if err != nil { log.Println(err) return } HandleShellMsg(MsgReceipt{msg, ids, sockets}) case pi[1].REvents&zmq.POLLIN != 0: // stdin socket - not implemented. pi[1].Socket.RecvMultipart(0) case pi[2].REvents&zmq.POLLIN != 0: // control socket - treat like shell socket. msgparts, err = pi[2].Socket.RecvMultipart(0) if err != nil { log.Println(err) return } msg, ids, err := WireMsgToComposedMsg(msgparts, sockets.Key) if err != nil { log.Println(err) return } HandleShellMsg(MsgReceipt{msg, ids, sockets}) } } }
func (sub *Subscription) loop() { defer sub.Done() defer close(sub.Ch) ctx, err := GetGlobalContext() if err != nil { sub.Kill(err) return } // Establish a connection and subscription filter socket, err := ctx.NewSocket(zmq.SUB) if err != nil { sub.Kill(err) return } for _, filter := range sub.filters { err = socket.SetSockOptString(zmq.SUBSCRIBE, filter) if err != nil { sub.Kill(err) return } } err = socket.Connect(sub.addr) if err != nil { sub.Killf("Couldn't connect to %s: %s", sub.addr, err) return } defer socket.Close() // Read and stream the results in a channel pollItems := []zmq.PollItem{zmq.PollItem{socket, 0, zmq.POLLIN, 0}} for { n, err := zmq.Poll(pollItems, time.Duration(1)*time.Second) if err != nil { sub.Kill(err) return } select { case <-sub.Dying(): return default: } if n > 0 { data, err := socket.Recv(zmq.DONTWAIT) if err != nil { sub.Kill(err) return } select { case sub.Ch <- NewMessage(data): case <-sub.Dying(): return } } } }