func main() { lbbroker := &lbbroker_t{} lbbroker.frontend, _ = zmq.NewSocket(zmq.ROUTER) lbbroker.backend, _ = zmq.NewSocket(zmq.ROUTER) defer lbbroker.frontend.Close() defer lbbroker.backend.Close() lbbroker.frontend.Bind("ipc://frontend.ipc") lbbroker.backend.Bind("ipc://backend.ipc") for client_nbr := 0; client_nbr < NBR_CLIENTS; client_nbr++ { go client_task() } for worker_nbr := 0; worker_nbr < NBR_WORKERS; worker_nbr++ { go worker_task() } // Queue of available workers lbbroker.workers = make([]string, 0, 10) // Prepare reactor and fire it up lbbroker.reactor = zmq.NewReactor() lbbroker.reactor.AddSocket(lbbroker.backend, zmq.POLLIN, func(e zmq.State) error { return handle_backend(lbbroker) }) lbbroker.reactor.Run(-1) }
func New(primary bool, local, remote string) (bstar *Bstar, err error) { bstar = &Bstar{} // Initialize the Binary Star bstar.Reactor = zmq.NewReactor() if primary { bstar.state = state_PRIMARY } else { bstar.state = state_BACKUP } // Create publisher for state going to peer bstar.statepub, err = zmq.NewSocket(zmq.PUB) bstar.statepub.Bind(local) // Create subscriber for state coming from peer bstar.statesub, err = zmq.NewSocket(zmq.SUB) bstar.statesub.SetSubscribe("") bstar.statesub.Connect(remote) // Set-up basic reactor events bstar.Reactor.AddChannelTime(time.Tick(bstar_HEARTBEAT), 1, func(i interface{}) error { return bstar.send_state() }) bstar.Reactor.AddSocket(bstar.statesub, zmq.POLLIN, func(e zmq.State) error { return bstar.recv_state() }) return }
func main() { // Socket to receive messages on receiver, _ := zmq.NewSocket(zmq.PULL) defer receiver.Close() receiver.Connect("tcp://localhost:5557") // Socket to send messages to sender, _ := zmq.NewSocket(zmq.PUSH) defer sender.Close() sender.Connect("tcp://localhost:5558") // Process tasks forever for { s, _ := receiver.Recv(0) // Simple progress indicator for the viewer fmt.Print(s + ".") // Do the work msec, _ := strconv.Atoi(s) time.Sleep(time.Duration(msec) * time.Millisecond) // Send results to sink sender.Send("", 0) } }
func main() { // Connect to task ventilator receiver, _ := zmq.NewSocket(zmq.PULL) defer receiver.Close() receiver.Connect("tcp://localhost:5557") // Connect to weather server subscriber, _ := zmq.NewSocket(zmq.SUB) defer subscriber.Close() subscriber.Connect("tcp://localhost:5556") subscriber.SetSubscribe("10001 ") // Initialize poll set poller := zmq.NewPoller() poller.Add(receiver, zmq.POLLIN) poller.Add(subscriber, zmq.POLLIN) // Process messages from both sockets for { sockets, _ := poller.Poll(-1) for _, socket := range sockets { switch s := socket.Socket; s { case receiver: task, _ := s.Recv(0) // Process task fmt.Println("Got task:", task) case subscriber: update, _ := s.Recv(0) // Process weather update fmt.Println("Got weather update:", update) } } } }
func main() { // Socket to send messages on sender, _ := zmq.NewSocket(zmq.PUSH) defer sender.Close() sender.Bind("tcp://*:5557") // Socket to send start of batch message on sink, _ := zmq.NewSocket(zmq.PUSH) defer sink.Close() sink.Connect("tcp://localhost:5558") fmt.Print("Press Enter when the workers are ready: ") var line string fmt.Scanln(&line) fmt.Println("Sending tasks to workers...") // The first message is "0" and signals start of batch sink.Send("0", 0) // Initialize random number generator rand.Seed(time.Now().UnixNano()) // Send 100 tasks total_msec := 0 for task_nbr := 0; task_nbr < 100; task_nbr++ { // Random workload from 1 to 100msecs workload := rand.Intn(100) + 1 total_msec += workload s := fmt.Sprintf("%d", workload) sender.Send(s, 0) } fmt.Println("Total expected cost:", time.Duration(total_msec)*time.Millisecond) time.Sleep(time.Second) // Give 0MQ time to deliver }
func main() { // Socket to receive messages on receiver, _ := zmq.NewSocket(zmq.PULL) defer receiver.Close() receiver.Bind("tcp://*:5558") // Socket for worker control controller, _ := zmq.NewSocket(zmq.PUB) defer controller.Close() controller.Bind("tcp://*:5559") // Wait for start of batch receiver.Recv(0) // Start our clock now start_time := time.Now() // Process 100 confirmations for task_nbr := 0; task_nbr < 100; task_nbr++ { receiver.Recv(0) if task_nbr%10 == 0 { fmt.Print(":") } else { fmt.Print(".") } } fmt.Println("\nTotal elapsed time:", time.Since(start_time)) // Send kill signal to workers controller.Send("KILL", 0) // Finished time.Sleep(time.Second) // Give 0MQ time to deliver }
func main() { // Prepare our context and sockets publisher, _ := zmq.NewSocket(zmq.PUB) publisher.Bind("tcp://*:5557") sequence := int64(0) rand.Seed(time.Now().UnixNano()) // Start state manager and wait for synchronization signal updates, _ := zmq.NewSocket(zmq.PAIR) updates.Bind("inproc://pipe") go state_manager() updates.RecvMessage(0) // "READY" for { // Distribute as key-value message sequence++ kvmsg := kvsimple.NewKvmsg(sequence) kvmsg.SetKey(fmt.Sprint(rand.Intn(10000))) kvmsg.SetBody(fmt.Sprint(rand.Intn(1000000))) if kvmsg.Send(publisher) != nil { break } if kvmsg.Send(updates) != nil { break } } fmt.Printf("Interrupted\n%d messages out\n", sequence) }
func main() { srv := &clonesrv_t{ port: 5556, kvmap: make(map[string]*kvmsg.Kvmsg), } // Set up our clone server sockets srv.snapshot, _ = zmq.NewSocket(zmq.ROUTER) srv.snapshot.Bind(fmt.Sprint("tcp://*:", srv.port)) srv.publisher, _ = zmq.NewSocket(zmq.PUB) srv.publisher.Bind(fmt.Sprint("tcp://*:", srv.port+1)) srv.collector, _ = zmq.NewSocket(zmq.PULL) srv.collector.Bind(fmt.Sprint("tcp://*:", srv.port+2)) // Register our handlers with reactor reactor := zmq.NewReactor() reactor.AddSocket(srv.snapshot, zmq.POLLIN, func(e zmq.State) error { return snapshots(srv) }) reactor.AddSocket(srv.collector, zmq.POLLIN, func(e zmq.State) error { return collector(srv) }) reactor.AddChannelTime(time.Tick(1000*time.Millisecond), 1, func(v interface{}) error { return flush_ttl(srv) }) log.Println(reactor.Run(100 * time.Millisecond)) // precision: .1 seconds }
func main() { frontend, _ := zmq.NewSocket(zmq.ROUTER) backend, _ := zmq.NewSocket(zmq.ROUTER) defer frontend.Close() defer backend.Close() frontend.Bind("tcp://*:5555") // For clients backend.Bind("tcp://*:5556") // For workers // Queue of available workers workers := make([]string, 0) poller1 := zmq.NewPoller() poller1.Add(backend, zmq.POLLIN) poller2 := zmq.NewPoller() poller2.Add(backend, zmq.POLLIN) poller2.Add(frontend, zmq.POLLIN) // The body of this example is exactly the same as lbbroker2. LOOP: for { // Poll frontend only if we have available workers var sockets []zmq.Polled var err error if len(workers) > 0 { sockets, err = poller2.Poll(-1) } else { sockets, err = poller1.Poll(-1) } if err != nil { break // Interrupted } for _, socket := range sockets { switch s := socket.Socket; s { case backend: // Handle worker activity on backend // Use worker identity for load-balancing msg, err := s.RecvMessage(0) if err != nil { break LOOP // Interrupted } var identity string identity, msg = unwrap(msg) workers = append(workers, identity) // Forward message to client if it's not a READY if msg[0] != WORKER_READY { frontend.SendMessage(msg) } case frontend: // Get client request, route to first available worker msg, err := s.RecvMessage(0) if err == nil { backend.SendMessage(workers[0], "", msg) workers = workers[1:] } } } } }
func main() { // Socket to receive messages on receiver, _ := zmq.NewSocket(zmq.PULL) defer receiver.Close() receiver.Connect("tcp://localhost:5557") // Socket to send messages to sender, _ := zmq.NewSocket(zmq.PUSH) defer sender.Close() sender.Connect("tcp://localhost:5558") // Socket for control input controller, _ := zmq.NewSocket(zmq.SUB) defer controller.Close() controller.Connect("tcp://localhost:5559") controller.SetSubscribe("") // Process messages from receiver and controller chReceive := make(chan string) chControle := make(chan string) go func() { for { msg, e := receiver.Recv(0) if e != nil { break } chReceive <- msg } }() go func() { for { msg, e := controller.Recv(0) if e != nil { break } chControle <- msg } }() // Process messages from both sockets for run := true; run; { select { case msg := <-chReceive: // Do the work t, _ := strconv.Atoi(msg) time.Sleep(time.Duration(t) * time.Millisecond) // Send results to sink sender.Send(msg, 0) // Simple progress indicator for the viewer fmt.Printf(".") case <-chControle: // Any controller command acts as 'KILL' run = false } } fmt.Println() }
func BrokerTask() { // Prepare our sockets frontend, _ := zmq.NewSocket(zmq.DEALER) frontend.Bind("tcp://*:5555") backend, _ := zmq.NewSocket(zmq.DEALER) backend.Bind("tcp://*:5556") zmq.Proxy(frontend, backend, nil) }
func main() { // Prepare our sockets frontend, _ := zmq.NewSocket(zmq.ROUTER) defer frontend.Close() backend, _ := zmq.NewSocket(zmq.DEALER) defer backend.Close() frontend.Bind("tcp://*:5559") backend.Bind("tcp://*:5560") chFront := make(chan *Msg) chBack := make(chan *Msg) go func() { for { msg, e := frontend.Recv(0) if e != nil { break } more, _ := frontend.GetRcvmore() chFront <- &Msg{msg, more} } }() go func() { for { msg, e := backend.Recv(0) if e != nil { break } more, _ := backend.GetRcvmore() chBack <- &Msg{msg, more} } }() for { select { case msg := <-chFront: for { if msg.more { backend.Send(msg.s, zmq.SNDMORE) } else { backend.Send(msg.s, 0) break } msg = <-chFront } case msg := <-chBack: for { if msg.more { frontend.Send(msg.s, zmq.SNDMORE) } else { frontend.Send(msg.s, 0) break } msg = <-chBack } } } }
func TestKvmsg(t *testing.T) { // Prepare our context and sockets output, err := zmq.NewSocket(zmq.DEALER) if err != nil { t.Error(err) } err = output.Bind("ipc://kvmsg_selftest.ipc") if err != nil { t.Error(err) } input, err := zmq.NewSocket(zmq.DEALER) if err != nil { t.Error(err) } err = input.Connect("ipc://kvmsg_selftest.ipc") if err != nil { t.Error(err) } kvmap := make(map[string]*Kvmsg) // Test send and receive of simple message kvmsg := NewKvmsg(1) kvmsg.SetKey("key") kvmsg.SetBody("body") kvmsg.Dump() err = kvmsg.Send(output) kvmsg.Store(kvmap) if err != nil { t.Error(err) } kvmsg, err = RecvKvmsg(input) if err != nil { t.Error(err) } kvmsg.Dump() key, err := kvmsg.GetKey() if err != nil { t.Error(err) } if key != "key" { t.Error("Expected \"key\", got \"" + key + "\"") } kvmsg.Store(kvmap) input.Close() output.Close() os.Remove("kvmsg_selftest.ipc") }
func main() { frontend, _ := zmq.NewSocket(zmq.SUB) frontend.Bind("tcp://*:5557") backend, _ := zmq.NewSocket(zmq.XPUB) backend.Bind("tcp://*:5558") // Subscribe to every single topic from publisher frontend.SetSubscribe("") // Store last instance of each topic in a cache cache := make(map[string]string) // We route topic updates from frontend to backend, and // we handle subscriptions by sending whatever we cached, // if anything: poller := zmq.NewPoller() poller.Add(frontend, zmq.POLLIN) poller.Add(backend, zmq.POLLIN) LOOP: for { polled, err := poller.Poll(1000 * time.Millisecond) if err != nil { break // Interrupted } for _, item := range polled { switch socket := item.Socket; socket { case frontend: // Any new topic data we cache and then forward msg, err := frontend.RecvMessage(0) if err != nil { break LOOP } cache[msg[0]] = msg[1] backend.SendMessage(msg) case backend: // When we get a new subscription we pull data from the cache: msg, err := backend.RecvMessage(0) if err != nil { break LOOP } frame := msg[0] // Event is one byte 0=unsub or 1=sub, followed by topic if frame[0] == 1 { topic := frame[1:] fmt.Println("Sending cached topic", topic) previous, ok := cache[topic] if ok { backend.SendMessage(topic, previous) } } } } } }
func agent_new() (agent *agent_t) { agent = &agent_t{ servers: make(map[string]*server_t), actives: make([]*server_t, 0), request: make([]string, 0), reply: make([]string, 0), } agent.pipe, _ = zmq.NewSocket(zmq.PAIR) agent.pipe.Connect("inproc://pipe") agent.router, _ = zmq.NewSocket(zmq.ROUTER) return }
func main() { snapshot, _ := zmq.NewSocket(zmq.DEALER) snapshot.Connect("tcp://localhost:5556") subscriber, _ := zmq.NewSocket(zmq.SUB) subscriber.SetRcvhwm(100000) // or messages between snapshot and next are lost subscriber.SetSubscribe("") subscriber.Connect("tcp://localhost:5557") time.Sleep(time.Second) // or messages between snapshot and next are lost kvmap := make(map[string]*kvsimple.Kvmsg) // Get state snapshot sequence := int64(0) snapshot.SendMessage("ICANHAZ?") for { kvmsg, err := kvsimple.RecvKvmsg(snapshot) if err != nil { fmt.Println(err) break // Interrupted } if key, _ := kvmsg.GetKey(); key == "KTHXBAI" { sequence, _ = kvmsg.GetSequence() fmt.Printf("Received snapshot=%d\n", sequence) break // Done } kvmsg.Store(kvmap) } snapshot.Close() first := true // Now apply pending updates, discard out-of-sequence messages for { kvmsg, err := kvsimple.RecvKvmsg(subscriber) if err != nil { fmt.Println(err) break // Interrupted } if seq, _ := kvmsg.GetSequence(); seq > sequence { sequence, _ = kvmsg.GetSequence() kvmsg.Store(kvmap) if first { // Show what the first regular update is after the snapshot, // to see if we missed updates. first = false fmt.Println("Next:", sequence) } } } }
func server_new(address string, port int, subtree string) (server *server_t) { server = &server_t{} fmt.Printf("I: adding server %s:%d...\n", address, port) server.address = address server.port = port server.snapshot, _ = zmq.NewSocket(zmq.DEALER) server.snapshot.Connect(fmt.Sprintf("%s:%d", address, port)) server.subscriber, _ = zmq.NewSocket(zmq.SUB) server.subscriber.Connect(fmt.Sprintf("%s:%d", address, port+1)) server.subscriber.SetSubscribe(subtree) return }
func main() { // This is where the weather server sits frontend, _ := zmq.NewSocket(zmq.XSUB) defer frontend.Close() frontend.Connect("tcp://192.168.55.210:5556") // This is our public endpoint for subscribers backend, _ := zmq.NewSocket(zmq.XPUB) defer backend.Close() backend.Bind("tcp://10.1.1.0:8100") // Run the proxy until the user interrupts us zmq.Proxy(frontend, backend, nil) }
func main() { // First argument is this broker's name // Other arguments are our peers' names // if len(os.Args) < 2 { fmt.Println("syntax: peering1 me {you}...") os.Exit(1) } self := os.Args[1] fmt.Printf("I: preparing broker at %s...\n", self) rand.Seed(time.Now().UnixNano()) // Bind state backend to endpoint statebe, _ := zmq.NewSocket(zmq.PUB) defer statebe.Close() statebe.Bind("ipc://" + self + "-state.ipc") // Connect statefe to all peers statefe, _ := zmq.NewSocket(zmq.SUB) defer statefe.Close() statefe.SetSubscribe("") for _, peer := range os.Args[2:] { fmt.Printf("I: connecting to state backend at '%s'\n", peer) statefe.Connect("ipc://" + peer + "-state.ipc") } // The main loop sends out status messages to peers, and collects // status messages back from peers. The zmq_poll timeout defines // our own heartbeat: poller := zmq.NewPoller() poller.Add(statefe, zmq.POLLIN) for { // Poll for activity, or 1 second timeout sockets, err := poller.Poll(time.Second) if err != nil { break } // Handle incoming status messages if len(sockets) == 1 { msg, _ := statefe.RecvMessage(0) peer_name := msg[0] available := msg[1] fmt.Printf("%s - %s workers free\n", peer_name, available) } else { statebe.SendMessage(self, rand.Intn(10)) } } }
func worker_task() { worker, _ := zmq.NewSocket(zmq.DEALER) defer worker.Close() set_id(worker) // Set a printable identity worker.Connect("tcp://localhost:5671") total := 0 for { // Tell the broker we're ready for work worker.Send("", zmq.SNDMORE) worker.Send("Hi Boss", 0) // Get workload from broker, until finished worker.Recv(0) // Envelope delimiter workload, _ := worker.Recv(0) if workload == "Fired!" { fmt.Printf("Completed: %d tasks\n", total) break } total++ // Do some random work time.Sleep(time.Duration(rand.Intn(500)+1) * time.Millisecond) } }
func main() { publisher, _ := zmq.NewSocket(zmq.PUB) if len(os.Args) == 2 { publisher.Connect(os.Args[1]) } else { publisher.Bind("tcp://*:5556") } // Ensure subscriber connection has time to complete time.Sleep(time.Second) // Send out all 1,000 topic messages for topic_nbr := 0; topic_nbr < 1000; topic_nbr++ { _, err := publisher.SendMessage(fmt.Sprintf("%03d", topic_nbr), "Save Roger") if err != nil { fmt.Println(err) } } // Send one random update per second rand.Seed(time.Now().UnixNano()) for { time.Sleep(time.Second) _, err := publisher.SendMessage(fmt.Sprintf("%03d", rand.Intn(1000)), "Off with his head!") if err != nil { fmt.Println(err) } } }
func worker_task() { worker, _ := zmq.NewSocket(zmq.REQ) defer worker.Close() // set_id(worker) worker.Connect("ipc://backend.ipc") // Tell broker we're ready for work worker.Send("READY", 0) for { // Read and save all frames until we get an empty frame // In this example there is only 1 but it could be more identity, _ := worker.Recv(0) empty, _ := worker.Recv(0) if empty != "" { panic(fmt.Sprintf("empty is not \"\": %q", empty)) } // Get request, send reply request, _ := worker.Recv(0) fmt.Println("Worker:", request) worker.Send(identity, zmq.SNDMORE) worker.Send("", zmq.SNDMORE) worker.Send("OK", 0) } }
func NewSubscriber(endpoint string) (Subscriber, error) { sub, err := zmq3.NewSocket(zmq3.SUB) if err != nil { return nil, err } return v3Subscriber{Socket: sub}, nil }
// Connect or reconnect to broker. In this asynchronous class we use a // DEALER socket instead of a REQ socket; this lets us send any number // of requests without waiting for a reply. func (mdcli2 *Mdcli2) ConnectToBroker() (err error) { if mdcli2.client != nil { mdcli2.client.Close() mdcli2.client = nil } mdcli2.client, err = zmq.NewSocket(zmq.DEALER) if err != nil { if mdcli2.verbose { log.Println("E: ConnectToBroker() creating socket failed") } return } mdcli2.poller = zmq.NewPoller() mdcli2.poller.Add(mdcli2.client, zmq.POLLIN) if mdcli2.verbose { log.Printf("I: connecting to broker at %s...", mdcli2.broker) } err = mdcli2.client.Connect(mdcli2.broker) if err != nil && mdcli2.verbose { log.Println("E: ConnectToBroker() failed to connect to broker", mdcli2.broker) } return }
func New() (flcliapi *Flcliapi) { flcliapi = &Flcliapi{} flcliapi.pipe, _ = zmq.NewSocket(zmq.PAIR) flcliapi.pipe.Bind("inproc://pipe") go flcliapi_agent() return }
func client_task() { var mu sync.Mutex client, _ := zmq.NewSocket(zmq.DEALER) defer client.Close() // Set random identity to make tracing easier set_id(client) client.Connect("tcp://localhost:5570") go func() { for request_nbr := 1; true; request_nbr++ { time.Sleep(time.Second) mu.Lock() client.SendMessage(fmt.Sprintf("request #%d", request_nbr)) mu.Unlock() } }() for { time.Sleep(10 * time.Millisecond) mu.Lock() msg, err := client.RecvMessage(zmq.DONTWAIT) if err == nil { id, _ := client.GetIdentity() fmt.Println(msg[0], id) } mu.Unlock() } }
func main() { worker, _ := zmq.NewSocket(zmq.REQ) defer worker.Close() // Set random identity to make tracing easier rand.Seed(time.Now().UnixNano()) identity := fmt.Sprintf("%04X-%04X", rand.Intn(0x10000), rand.Intn(0x10000)) worker.SetIdentity(identity) worker.Connect("tcp://localhost:5556") // Tell broker we're ready for work fmt.Printf("I: (%s) worker ready\n", identity) worker.Send(WORKER_READY, 0) for cycles := 0; true; { msg, err := worker.RecvMessage(0) if err != nil { break // Interrupted } // Simulate various problems, after a few cycles cycles++ if cycles > 3 && rand.Intn(5) == 0 { fmt.Printf("I: (%s) simulating a crash\n", identity) break } else if cycles > 3 && rand.Intn(5) == 0 { fmt.Printf("I: (%s) simulating CPU overload\n", identity) time.Sleep(3 * time.Second) } fmt.Printf("I: (%s) normal reply\n", identity) time.Sleep(time.Second) // Do some heavy work worker.SendMessage(msg) } }
func main() { // Prepare our socket receiver, _ := zmq.NewSocket(zmq.PULL) defer receiver.Close() receiver.Bind("tcp://*:5558") // Wait for start of batch receiver.Recv(0) // Start our clock now start_time := time.Now() // Process 100 confirmations for task_nbr := 0; task_nbr < 100; task_nbr++ { receiver.Recv(0) if task_nbr%10 == 0 { fmt.Print(":") } else { fmt.Print(".") } } // Calculate and report duration of batch fmt.Println("\nTotal elapsed time:", time.Since(start_time)) }
func TestConnectResolve(t *testing.T) { sock, err := zmq.NewSocket(zmq.PUB) if err != nil { t.Fatal("NewSocket:", err) } defer func() { if sock != nil { sock.SetLinger(0) sock.Close() } }() err = sock.Connect("tcp://localhost:1234") if err != nil { t.Error("sock.Connect:", err) } fails := []string{ "tcp://localhost:invalid", "tcp://in val id:1234", "invalid://localhost:1234", } for _, fail := range fails { if err = sock.Connect(fail); err == nil { t.Errorf("Connect %s, expected fail, got success", fail) } } err = sock.Close() sock = nil if err != nil { t.Error("sock.Close:", err) } }
func main() { broker, _ := zmq.NewSocket(zmq.ROUTER) defer broker.Close() broker.Bind("tcp://*:5671") rand.Seed(time.Now().Unix()) for worker_nbr := 0; worker_nbr < NBR_WORKERS; worker_nbr++ { go worker_task() } // Run for five seconds and then tell workers to end start_time := time.Now() workers_fired := 0 for { // Next message gives us least recently used worker identity, _ := broker.Recv(0) broker.Send(identity, zmq.SNDMORE) broker.Recv(0) // Envelope delimiter broker.Recv(0) // Response from worker broker.Send("", zmq.SNDMORE) // Encourage workers until it's time to fire them if time.Since(start_time) < 5*time.Second { broker.Send("Work harder", 0) } else { broker.Send("Fired!", 0) workers_fired++ if workers_fired == NBR_WORKERS { break } } } time.Sleep(time.Second) }