예제 #1
0
func main() {
	frontend, _ := zmq.NewSocket(zmq.ROUTER)
	backend, _ := zmq.NewSocket(zmq.ROUTER)
	defer frontend.Close()
	defer backend.Close()
	frontend.Bind("tcp://*:5555") //  For clients
	backend.Bind("tcp://*:5556")  //  For workers

	//  Queue of available workers
	workers := make([]string, 0)

	poller1 := zmq.NewPoller()
	poller1.Add(backend, zmq.POLLIN)
	poller2 := zmq.NewPoller()
	poller2.Add(backend, zmq.POLLIN)
	poller2.Add(frontend, zmq.POLLIN)

	//  The body of this example is exactly the same as lbbroker2.
LOOP:
	for {
		//  Poll frontend only if we have available workers
		var sockets []zmq.Polled
		var err error
		if len(workers) > 0 {
			sockets, err = poller2.Poll(-1)
		} else {
			sockets, err = poller1.Poll(-1)
		}
		if err != nil {
			break //  Interrupted
		}
		for _, socket := range sockets {
			switch s := socket.Socket; s {
			case backend: //  Handle worker activity on backend
				//  Use worker identity for load-balancing
				msg, err := s.RecvMessage(0)
				if err != nil {
					break LOOP //  Interrupted
				}
				var identity string
				identity, msg = unwrap(msg)
				workers = append(workers, identity)

				//  Forward message to client if it's not a READY
				if msg[0] != WORKER_READY {
					frontend.SendMessage(msg)
				}

			case frontend:
				//  Get client request, route to first available worker
				msg, err := s.RecvMessage(0)
				if err == nil {
					backend.SendMessage(workers[0], "", msg)
					workers = workers[1:]
				}
			}
		}
	}
}
예제 #2
0
//  Connect or reconnect to broker. In this asynchronous class we use a
//  DEALER socket instead of a REQ socket; this lets us send any number
//  of requests without waiting for a reply.
func (mdcli2 *Mdcli2) ConnectToBroker() (err error) {
	if mdcli2.client != nil {
		mdcli2.client.Close()
		mdcli2.client = nil
	}
	mdcli2.client, err = zmq.NewSocket(zmq.DEALER)
	if err != nil {
		if mdcli2.verbose {
			log.Println("E: ConnectToBroker() creating socket failed")
		}
		return
	}
	mdcli2.poller = zmq.NewPoller()
	mdcli2.poller.Add(mdcli2.client, zmq.POLLIN)

	if mdcli2.verbose {
		log.Printf("I: connecting to broker at %s...", mdcli2.broker)
	}
	err = mdcli2.client.Connect(mdcli2.broker)
	if err != nil && mdcli2.verbose {
		log.Println("E: ConnectToBroker() failed to connect to broker", mdcli2.broker)
	}

	return
}
예제 #3
0
func main() {

	//  Connect to task ventilator
	receiver, _ := zmq.NewSocket(zmq.PULL)
	defer receiver.Close()
	receiver.Connect("tcp://localhost:5557")

	//  Connect to weather server
	subscriber, _ := zmq.NewSocket(zmq.SUB)
	defer subscriber.Close()
	subscriber.Connect("tcp://localhost:5556")
	subscriber.SetSubscribe("10001 ")

	//  Initialize poll set
	poller := zmq.NewPoller()
	poller.Add(receiver, zmq.POLLIN)
	poller.Add(subscriber, zmq.POLLIN)
	//  Process messages from both sockets
	for {
		sockets, _ := poller.Poll(-1)
		for _, socket := range sockets {
			switch s := socket.Socket; s {
			case receiver:
				task, _ := s.Recv(0)
				//  Process task
				fmt.Println("Got task:", task)
			case subscriber:
				update, _ := s.Recv(0)
				//  Process weather update
				fmt.Println("Got weather update:", update)
			}
		}
	}
}
예제 #4
0
func flcliapi_agent() {

	agent := agent_new()

	poller := zmq.NewPoller()
	poller.Add(agent.pipe, zmq.POLLIN)
	poller.Add(agent.router, zmq.POLLIN)
	for {
		//  Calculate tickless timer, up to 1 hour
		tickless := time.Now().Add(time.Hour)
		if len(agent.request) > 0 && tickless.After(agent.expires) {
			tickless = agent.expires
		}
		for key := range agent.servers {
			tickless = agent.servers[key].tickless(tickless)
		}

		polled, err := poller.Poll(tickless.Sub(time.Now()))
		if err != nil {
			break //  Context has been shut down
		}

		for _, item := range polled {
			switch item.Socket {
			case agent.pipe:
				agent.control_message()
			case agent.router:
				agent.router_message()
			}
		}

		//  If we're processing a request, dispatch to next server
		if len(agent.request) > 0 {
			if time.Now().After(agent.expires) {
				//  Request expired, kill it
				agent.pipe.SendMessage("FAILED")
				agent.request = agent.request[0:0]
			} else {
				//  Find server to talk to, remove any expired ones
				for len(agent.actives) > 0 {
					server := agent.actives[0]
					if time.Now().After(server.expires) {
						agent.actives = agent.actives[1:]
						server.alive = false
					} else {
						agent.router.SendMessage(server.endpoint, agent.request)
						break
					}
				}
			}
		}
		//  --(Disconnect and delete any expired servers)--
		//  Send heartbeats to idle servers if needed
		for key := range agent.servers {
			agent.servers[key].ping(agent.router)
		}
	}
}
예제 #5
0
func main() {
	frontend, _ := zmq.NewSocket(zmq.SUB)
	frontend.Bind("tcp://*:5557")
	backend, _ := zmq.NewSocket(zmq.XPUB)
	backend.Bind("tcp://*:5558")

	//  Subscribe to every single topic from publisher
	frontend.SetSubscribe("")

	//  Store last instance of each topic in a cache
	cache := make(map[string]string)

	//  We route topic updates from frontend to backend, and
	//  we handle subscriptions by sending whatever we cached,
	//  if anything:
	poller := zmq.NewPoller()
	poller.Add(frontend, zmq.POLLIN)
	poller.Add(backend, zmq.POLLIN)
LOOP:
	for {
		polled, err := poller.Poll(1000 * time.Millisecond)
		if err != nil {
			break //  Interrupted
		}

		for _, item := range polled {
			switch socket := item.Socket; socket {
			case frontend:
				//  Any new topic data we cache and then forward
				msg, err := frontend.RecvMessage(0)
				if err != nil {
					break LOOP
				}
				cache[msg[0]] = msg[1]
				backend.SendMessage(msg)
			case backend:
				//  When we get a new subscription we pull data from the cache:
				msg, err := backend.RecvMessage(0)
				if err != nil {
					break LOOP
				}
				frame := msg[0]
				//  Event is one byte 0=unsub or 1=sub, followed by topic
				if frame[0] == 1 {
					topic := frame[1:]
					fmt.Println("Sending cached topic", topic)
					previous, ok := cache[topic]
					if ok {
						backend.SendMessage(topic, previous)
					}
				}
			}
		}
	}
}
예제 #6
0
func (src ZmqSource) Run() error {
	//the socket has to run from the same goroutine because it is not thread safe
	//memory barrier executed when goroutines moved between threads
	//reference: https://groups.google.com/forum/#!topic/golang-nuts/eABYrBA5LEk
	defer close(src.Out())

	socket, err := zmqapi.NewSocket(zmqapi.PULL)
	if err != nil {
		log.Fatal(err)
	}
	defer socket.Close()

	socket.SetRcvhwm(src.hwm)
	err = socket.Bind(src.addr)
	if err != nil {
		log.Fatal(err)
		return err
	}

	/* using poller to allow for a timeout*/
	/* alternate between polling the zmq socket and the close channel */
	/* This method introduces a lag to close, but thats probably ok*/
	/* pebbe/zmq3 (zmqapi) has a Reactor to do something similar but has the same */
	/* lag problem and is way more complex than we need */
	poller := zmqapi.NewPoller()
	poller.Add(socket, zmqapi.POLLIN)
	count := 0
	sent := 0
	for {
		count++
		sockets, err := poller.Poll(time.Second)
		if err != nil {

			return err
		}

		if len(sockets) > 0 {
			buf, err := socket.RecvBytes(0)
			if err != nil {

				return err
			}
			sent++
			src.Out() <- buf
		}
		select {
		case <-src.StopNotifier:
			log.Println("Closing: count ", count, "Sent:", sent)
			return nil
		default:
		}
	}
}
예제 #7
0
func main() {
	verbose := false
	if len(os.Args) > 1 && os.Args[1] == "-v" {
		verbose = true
	}

	broker, _ := NewBroker(verbose)
	broker.Bind("tcp://*:5555")

	poller := zmq.NewPoller()
	poller.Add(broker.socket, zmq.POLLIN)

	//  Get and process messages forever or until interrupted
	for {
		polled, err := poller.Poll(HEARTBEAT_INTERVAL)
		if err != nil {
			break //  Interrupted
		}

		//  Process next input message, if any
		if len(polled) > 0 {
			msg, err := broker.socket.RecvMessage(0)
			if err != nil {
				break //  Interrupted
			}
			if broker.verbose {
				log.Printf("I: received message: %q\n", msg)
			}
			sender, msg := popStr(msg)
			_, msg = popStr(msg)
			header, msg := popStr(msg)

			switch header {
			case mdapi.MDPC_CLIENT:
				broker.ClientMsg(sender, msg)
			case mdapi.MDPW_WORKER:
				broker.WorkerMsg(sender, msg)
			default:
				log.Printf("E: invalid message: %q\n", msg)
			}
		}
		//  Disconnect and delete any expired workers
		//  Send heartbeats to idle workers if needed
		if time.Now().After(broker.heartbeat_at) {
			broker.Purge()
			for _, worker := range broker.waiting {
				worker.Send(mdapi.MDPW_HEARTBEAT, "", []string{})
			}
			broker.heartbeat_at = time.Now().Add(HEARTBEAT_INTERVAL)
		}
	}
	log.Println("W: interrupt received, shutting down...")
}
예제 #8
0
func s_worker_socket() (*zmq.Socket, *zmq.Poller) {
	worker, _ := zmq.NewSocket(zmq.DEALER)
	worker.Connect("tcp://localhost:5556")

	//  Tell queue we're ready for work
	fmt.Println("I: worker ready")
	worker.Send(PPP_READY, 0)

	poller := zmq.NewPoller()
	poller.Add(worker, zmq.POLLIN)

	return worker, poller
}
예제 #9
0
func main() {
	//  First argument is this broker's name
	//  Other arguments are our peers' names
	//
	if len(os.Args) < 2 {
		fmt.Println("syntax: peering1 me {you}...")
		os.Exit(1)
	}
	self := os.Args[1]
	fmt.Printf("I: preparing broker at %s...\n", self)
	rand.Seed(time.Now().UnixNano())

	//  Bind state backend to endpoint
	statebe, _ := zmq.NewSocket(zmq.PUB)
	defer statebe.Close()
	statebe.Bind("ipc://" + self + "-state.ipc")

	//  Connect statefe to all peers
	statefe, _ := zmq.NewSocket(zmq.SUB)
	defer statefe.Close()
	statefe.SetSubscribe("")
	for _, peer := range os.Args[2:] {
		fmt.Printf("I: connecting to state backend at '%s'\n", peer)
		statefe.Connect("ipc://" + peer + "-state.ipc")
	}

	//  The main loop sends out status messages to peers, and collects
	//  status messages back from peers. The zmq_poll timeout defines
	//  our own heartbeat:

	poller := zmq.NewPoller()
	poller.Add(statefe, zmq.POLLIN)
	for {
		//  Poll for activity, or 1 second timeout
		sockets, err := poller.Poll(time.Second)
		if err != nil {
			break
		}

		//  Handle incoming status messages
		if len(sockets) == 1 {
			msg, _ := statefe.RecvMessage(0)
			peer_name := msg[0]
			available := msg[1]
			fmt.Printf("%s - %s workers free\n", peer_name, available)
		} else {
			statebe.SendMessage(self, rand.Intn(10))
		}
	}
}
예제 #10
0
func main() {
	//  Socket to receive messages on
	receiver, _ := zmq.NewSocket(zmq.PULL)
	defer receiver.Close()
	receiver.Connect("tcp://localhost:5557")

	//  Socket to send messages to
	sender, _ := zmq.NewSocket(zmq.PUSH)
	defer sender.Close()
	sender.Connect("tcp://localhost:5558")

	//  Socket for control input
	controller, _ := zmq.NewSocket(zmq.SUB)
	defer controller.Close()
	controller.Connect("tcp://localhost:5559")
	controller.SetSubscribe("")

	//  Process messages from receiver and controller
	poller := zmq.NewPoller()
	poller.Add(receiver, zmq.POLLIN)
	poller.Add(controller, zmq.POLLIN)
	//  Process messages from both sockets
LOOP:
	for {
		sockets, _ := poller.Poll(-1)
		for _, socket := range sockets {
			switch s := socket.Socket; s {
			case receiver:
				msg, _ := s.Recv(0)

				//  Do the work
				t, _ := strconv.Atoi(msg)
				time.Sleep(time.Duration(t) * time.Millisecond)

				//  Send results to sink
				sender.Send(msg, 0)

				//  Simple progress indicator for the viewer
				fmt.Printf(".")
			case controller:
				//  Any controller command acts as 'KILL'
				break LOOP //  Exit loop
			}
		}
	}
	fmt.Println()
}
예제 #11
0
파일: intface.go 프로젝트: yangzhao28/zmq3
func (iface *Intface) agent() {
	//  Create agent instance to pass around
	agent := new_agent()

	//  Send first beacon immediately
	ping_at := time.Now()

	poller := zmq.NewPoller()
	poller.Add(agent.pipe, zmq.POLLIN)
	poller.Add(agent.udp, zmq.POLLIN)

	bcast := &net.UDPAddr{Port: PING_PORT_NUMBER, IP: net.IPv4bcast}
	for {
		timeout := ping_at.Add(time.Millisecond).Sub(time.Now())
		if timeout < 0 {
			timeout = 0
		}
		polled, err := poller.Poll(timeout)
		if err != nil {
			break
		}

		for _, item := range polled {
			switch socket := item.Socket; socket {
			case agent.pipe:
				//  If we had activity on the pipe, go handle the control
				//  message. Current code never sends control messages.
				agent.control_message()

			case agent.udp:
				//  If we had input on the UDP socket, go process that
				agent.handle_beacon()
			}
		}

		//  If we passed the 1-second mark, broadcast our beacon
		now := time.Now()
		if now.After(ping_at) {
			agent.conn.WriteTo(agent.uuid_bytes, bcast)
			ping_at = now.Add(PING_INTERVAL)
		}
		//  Delete and report any expired peers
		for _, peer := range agent.peers {
			agent.reap_peer(peer)
		}
	}
}
예제 #12
0
func try_request(endpoint string, request []string) (reply []string, err error) {
	fmt.Printf("I: trying echo service at %s...\n", endpoint)
	client, _ := zmq.NewSocket(zmq.REQ)
	client.Connect(endpoint)

	//  Send request, wait safely for reply
	client.SendMessage(request)
	poller := zmq.NewPoller()
	poller.Add(client, zmq.POLLIN)
	polled, err := poller.Poll(REQUEST_TIMEOUT)
	reply = []string{}
	if len(polled) == 1 {
		reply, err = client.RecvMessage(0)
	} else {
		err = errors.New("Time out")
	}
	return
}
예제 #13
0
func main() {
	//  Prepare our sockets
	frontend, _ := zmq.NewSocket(zmq.ROUTER)
	defer frontend.Close()
	backend, _ := zmq.NewSocket(zmq.DEALER)
	defer backend.Close()
	frontend.Bind("tcp://*:5559")
	backend.Bind("tcp://*:5560")

	//  Initialize poll set
	poller := zmq.NewPoller()
	poller.Add(frontend, zmq.POLLIN)
	poller.Add(backend, zmq.POLLIN)

	//  Switch messages between sockets
	for {
		sockets, _ := poller.Poll(-1)
		for _, socket := range sockets {
			switch s := socket.Socket; s {
			case frontend:
				for {
					msg, _ := s.Recv(0)
					if more, _ := s.GetRcvmore(); more {
						backend.Send(msg, zmq.SNDMORE)
					} else {
						backend.Send(msg, 0)
						break
					}
				}
			case backend:
				for {
					msg, _ := s.Recv(0)
					if more, _ := s.GetRcvmore(); more {
						frontend.Send(msg, zmq.SNDMORE)
					} else {
						frontend.Send(msg, 0)
						break
					}
				}
			}
		}
	}
}
예제 #14
0
파일: peering3.go 프로젝트: yangzhao28/zmq3
func client_task(i int) {
	client, _ := zmq.NewSocket(zmq.REQ)
	defer client.Close()
	client.Connect("ipc://" + self + "-localfe.ipc")
	monitor, _ := zmq.NewSocket(zmq.PUSH)
	defer monitor.Close()
	monitor.Connect("ipc://" + self + "-monitor.ipc")

	poller := zmq.NewPoller()
	poller.Add(client, zmq.POLLIN)
	for {
		time.Sleep(time.Duration(rand.Intn(5000)) * time.Millisecond)
		for burst := rand.Intn(15); burst > 0; burst-- {
			task_id := fmt.Sprintf("%04X-%s-%d", rand.Intn(0x10000), self, i)

			//  Send request with random hex ID
			client.Send(task_id, 0)

			//  Wait max ten seconds for a reply, then complain
			sockets, err := poller.Poll(10 * time.Second)
			if err != nil {
				break //  Interrupted
			}

			if len(sockets) == 1 {
				reply, err := client.Recv(0)
				if err != nil {
					break //  Interrupted
				}
				//  Worker is supposed to answer us with our task id
				id := strings.Fields(reply)[0]
				if id != task_id {
					panic("id != task_id")
				}
				monitor.Send(reply, 0)
			} else {
				monitor.Send("E: CLIENT EXIT - lost task "+task_id, 0)
				return
			}
		}
	}
}
예제 #15
0
func ProcessEvents(self string, statebe, statefe *zmq.Socket) {
	poller := zmq.NewPoller()
	poller.Add(statefe, zmq.POLLIN)
	for {
		//  Poll for activity, or 1 second timeout
		sockets, err := poller.Poll(time.Second)
		if err != nil {
			break
		}
		//  Handle incoming status messages
		if len(sockets) == 1 {
			msg, _ := statefe.RecvMessage(0)
			peerName := msg[0]
			available := msg[1]
			fmt.Printf("%s - %s workers free\n", peerName, available)
		} else {
			statebe.SendMessage(self, rand.Intn(10))
		}
	}
}
예제 #16
0
//  Connect or reconnect to broker.
func (mdwrk *Mdwrk) ConnectToBroker() (err error) {
	if mdwrk.worker != nil {
		mdwrk.worker.Close()
		mdwrk.worker = nil
	}
	mdwrk.worker, err = zmq.NewSocket(zmq.DEALER)
	err = mdwrk.worker.Connect(mdwrk.broker)
	if mdwrk.verbose {
		log.Printf("I: connecting to broker at %s...\n", mdwrk.broker)
	}
	mdwrk.poller = zmq.NewPoller()
	mdwrk.poller.Add(mdwrk.worker, zmq.POLLIN)

	//  Register service with broker
	err = mdwrk.SendToBroker(MDPW_READY, mdwrk.service, []string{})

	//  If liveness hits zero, queue is considered disconnected
	mdwrk.liveness = heartbeat_liveness
	mdwrk.heartbeat_at = time.Now().Add(mdwrk.heartbeat)

	return
}
예제 #17
0
파일: ser.go 프로젝트: yangzhao28/rtb
func main() {
	//  Socket to talk to clients
	responder, _ := zmq.NewSocket(zmq.REP)
	defer responder.Close()
	responder.Bind("tcp://*:5555")

	poller := zmq.NewPoller()
	poller.Add(responder, zmq.POLLIN)

	for {
		fmt.Println("wait")
		polled, err := poller.Poll(time.Second)
		if err != nil {
			fmt.Println(err.Error())
			continue
		}
		for _, p := range polled {
			msg, _ := p.Socket.RecvMessage(0)
			fmt.Println("Received ", msg)
			p.Socket.Send("1", 0)
		}
	}
}
예제 #18
0
func (client *flclient_t) request(request ...string) (reply []string, err error) {
	reply = []string{}

	//  Prefix request with sequence number and empty envelope
	client.sequence++

	//  Blast the request to all connected servers
	for server := 0; server < client.servers; server++ {
		client.socket.SendMessage("", client.sequence, request)
	}
	//  Wait for a matching reply to arrive from anywhere
	//  Since we can poll several times, calculate each one
	endtime := time.Now().Add(GLOBAL_TIMEOUT)
	poller := zmq.NewPoller()
	poller.Add(client.socket, zmq.POLLIN)
	for time.Now().Before(endtime) {
		polled, err := poller.Poll(endtime.Sub(time.Now()))
		if err == nil && len(polled) > 0 {
			//  Reply is [empty][sequence][OK]
			reply, _ = client.socket.RecvMessage(0)
			if len(reply) != 3 {
				panic("len(reply) != 3")
			}
			sequence := reply[1]
			reply = reply[2:]
			sequence_nbr, _ := strconv.Atoi(sequence)
			if sequence_nbr == client.sequence {
				break
			}
		}
	}
	if len(reply) == 0 {
		err = errors.New("No reply")
	}
	return
}
예제 #19
0
func state_manager() {
	kvmap := make(map[string]*kvsimple.Kvmsg)

	pipe, _ := zmq.NewSocket(zmq.PAIR)
	pipe.Connect("inproc://pipe")
	pipe.SendMessage("READY")
	snapshot, _ := zmq.NewSocket(zmq.ROUTER)
	snapshot.Bind("tcp://*:5556")

	poller := zmq.NewPoller()
	poller.Add(pipe, zmq.POLLIN)
	poller.Add(snapshot, zmq.POLLIN)
	sequence := int64(0) //  Current snapshot version number
LOOP:
	for {
		polled, err := poller.Poll(-1)
		if err != nil {
			break //  Context has been shut down
		}
		for _, item := range polled {
			switch socket := item.Socket; socket {
			case pipe:
				//  Apply state update from main thread
				kvmsg, err := kvsimple.RecvKvmsg(pipe)
				if err != nil {
					break LOOP //  Interrupted
				}
				sequence, _ = kvmsg.GetSequence()
				kvmsg.Store(kvmap)
			case snapshot:
				//  Execute state snapshot request
				msg, err := snapshot.RecvMessage(0)
				if err != nil {
					break LOOP //  Interrupted
				}
				identity := msg[0]
				//  Request is in second frame of message
				request := msg[1]
				if request != "ICANHAZ?" {
					fmt.Println("E: bad request, aborting")
					break LOOP
				}
				//  Send state snapshot to client

				//  For each entry in kvmap, send kvmsg to client
				for _, kvmsg := range kvmap {
					snapshot.Send(identity, zmq.SNDMORE)
					kvmsg.Send(snapshot)
				}

				// Give client some time to deal with it.
				// This reduces the risk that the client won't see
				// the END message, but it doesn't eliminate the risk.
				time.Sleep(100 * time.Millisecond)

				//  Now send END message with sequence number
				fmt.Printf("Sending state shapshot=%d\n", sequence)
				snapshot.Send(identity, zmq.SNDMORE)
				kvmsg := kvsimple.NewKvmsg(sequence)
				kvmsg.SetKey("KTHXBAI")
				kvmsg.SetBody("")
				kvmsg.Send(snapshot)
			}
		}
	}
}
예제 #20
0
파일: ppqueue.go 프로젝트: pebbe/zmq3
func main() {
	frontend, _ := zmq.NewSocket(zmq.ROUTER)
	backend, _ := zmq.NewSocket(zmq.ROUTER)
	defer frontend.Close()
	defer backend.Close()
	frontend.Bind("tcp://*:5555") //  For clients
	backend.Bind("tcp://*:5556")  //  For workers

	//  List of available workers
	workers := make([]worker_t, 0)

	//  Send out heartbeats at regular intervals
	heartbeat_at := time.Tick(HEARTBEAT_INTERVAL)

	poller1 := zmq.NewPoller()
	poller1.Add(backend, zmq.POLLIN)
	poller2 := zmq.NewPoller()
	poller2.Add(backend, zmq.POLLIN)
	poller2.Add(frontend, zmq.POLLIN)

	for {
		//  Poll frontend only if we have available workers
		var sockets []zmq.Polled
		var err error
		if len(workers) > 0 {
			sockets, err = poller2.Poll(HEARTBEAT_INTERVAL)
		} else {
			sockets, err = poller1.Poll(HEARTBEAT_INTERVAL)
		}
		if err != nil {
			break //  Interrupted
		}

		for _, socket := range sockets {
			switch socket.Socket {
			case backend:
				//  Handle worker activity on backend
				//  Use worker identity for load-balancing
				msg, err := backend.RecvMessage(0)
				if err != nil {
					break //  Interrupted
				}

				//  Any sign of life from worker means it's ready
				identity, msg := unwrap(msg)
				workers = s_worker_ready(s_worker_new(identity), workers)

				//  Validate control message, or return reply to client
				if len(msg) == 1 {
					if msg[0] != PPP_READY && msg[0] != PPP_HEARTBEAT {
						fmt.Println("E: invalid message from worker", msg)
					}
				} else {
					frontend.SendMessage(msg)
				}
			case frontend:
				//  Now get next client request, route to next worker
				msg, err := frontend.RecvMessage(0)
				if err != nil {
					break //  Interrupted
				}
				backend.SendMessage(workers[0].identity, msg)
				workers = workers[1:]
			}
		}

		//  We handle heartbeating after any socket activity. First we send
		//  heartbeats to any idle workers if it's time. Then we purge any
		//  dead workers:

		select {
		case <-heartbeat_at:
			for _, worker := range workers {
				backend.SendMessage(worker.identity, PPP_HEARTBEAT)
			}
		default:
		}
		workers = s_workers_purge(workers)
	}
}
예제 #21
0
func main() {
	//  Prepare our sockets
	frontend, _ := zmq.NewSocket(zmq.ROUTER)
	backend, _ := zmq.NewSocket(zmq.ROUTER)
	defer frontend.Close()
	defer backend.Close()
	frontend.Bind("ipc://frontend.ipc")
	backend.Bind("ipc://backend.ipc")

	for client_nbr := 0; client_nbr < NBR_CLIENTS; client_nbr++ {
		go client_task()
	}
	for worker_nbr := 0; worker_nbr < NBR_WORKERS; worker_nbr++ {
		go worker_task()
	}

	//  Queue of available workers
	workers := make([]string, 0, 10)

	poller1 := zmq.NewPoller()
	poller1.Add(backend, zmq.POLLIN)
	poller2 := zmq.NewPoller()
	poller2.Add(backend, zmq.POLLIN)
	poller2.Add(frontend, zmq.POLLIN)

LOOP:
	for {
		//  Poll frontend only if we have available workers
		var sockets []zmq.Polled
		var err error
		if len(workers) > 0 {
			sockets, err = poller2.Poll(-1)
		} else {
			sockets, err = poller1.Poll(-1)
		}
		if err != nil {
			break //  Interrupted
		}
		for _, socket := range sockets {
			switch socket.Socket {
			case backend:
				//  Handle worker activity on backend

				//  Use worker identity for load-balancing
				msg, err := backend.RecvMessage(0)
				if err != nil {
					break LOOP //  Interrupted
				}
				identity, msg := unwrap(msg)
				workers = append(workers, identity)

				//  Forward message to client if it's not a READY
				if msg[0] != WORKER_READY {
					frontend.SendMessage(msg)
				}

			case frontend:
				//  Get client request, route to first available worker
				msg, err := frontend.RecvMessage(0)
				if err == nil {
					backend.SendMessage(workers[0], "", msg)
					workers = workers[1:]
				}
			}
		}
	}

	time.Sleep(100 * time.Millisecond)
}
예제 #22
0
func main() {
	snapshot, _ := zmq.NewSocket(zmq.DEALER)
	snapshot.Connect("tcp://localhost:5556")
	subscriber, _ := zmq.NewSocket(zmq.SUB)
	subscriber.SetSubscribe("")
	subscriber.Connect("tcp://localhost:5557")
	publisher, _ := zmq.NewSocket(zmq.PUSH)
	publisher.Connect("tcp://localhost:5558")

	kvmap := make(map[string]*kvsimple.Kvmsg)
	rand.Seed(time.Now().UnixNano())

	//  We first request a state snapshot:
	sequence := int64(0)
	snapshot.SendMessage("ICANHAZ?")
	for {
		kvmsg, err := kvsimple.RecvKvmsg(snapshot)
		if err != nil {
			break //  Interrupted
		}
		if key, _ := kvmsg.GetKey(); key == "KTHXBAI" {
			sequence, _ := kvmsg.GetSequence()
			fmt.Println("I: received snapshot =", sequence)
			break //  Done
		}
		kvmsg.Store(kvmap)
	}
	snapshot.Close()

	//  Now we wait for updates from the server, and every so often, we
	//  send a random key-value update to the server:

	poller := zmq.NewPoller()
	poller.Add(subscriber, zmq.POLLIN)
	alarm := time.Now().Add(1000 * time.Millisecond)
	for {
		tickless := alarm.Sub(time.Now())
		if tickless < 0 {
			tickless = 0
		}
		polled, err := poller.Poll(tickless)
		if err != nil {
			break //  Context has been shut down
		}
		if len(polled) == 1 {
			kvmsg, err := kvsimple.RecvKvmsg(subscriber)
			if err != nil {
				break //  Interrupted
			}

			//  Discard out-of-sequence kvmsgs, incl. heartbeats
			if seq, _ := kvmsg.GetSequence(); seq > sequence {
				sequence = seq
				kvmsg.Store(kvmap)
				fmt.Println("I: received update =", sequence)
			}
		}
		//  If we timed-out, generate a random kvmsg
		if time.Now().After(alarm) {
			kvmsg := kvsimple.NewKvmsg(0)
			kvmsg.SetKey(fmt.Sprint(rand.Intn(10000)))
			kvmsg.SetBody(fmt.Sprint(rand.Intn(1000000)))
			kvmsg.Send(publisher)
			alarm = time.Now().Add(1000 * time.Millisecond)
		}
	}
	fmt.Printf("Interrupted\n%d messages in\n", sequence)
}
예제 #23
0
func main() {
	//  Prepare our sockets
	frontend, _ := zmq.NewSocket(zmq.ROUTER)
	backend, _ := zmq.NewSocket(zmq.ROUTER)
	defer frontend.Close()
	defer backend.Close()
	frontend.Bind("ipc://frontend.ipc")
	backend.Bind("ipc://backend.ipc")

	client_nbr := 0
	for ; client_nbr < NBR_CLIENTS; client_nbr++ {
		go client_task()
	}
	for worker_nbr := 0; worker_nbr < NBR_WORKERS; worker_nbr++ {
		go worker_task()
	}

	//  Here is the main loop for the least-recently-used queue. It has two
	//  sockets; a frontend for clients and a backend for workers. It polls
	//  the backend in all cases, and polls the frontend only when there are
	//  one or more workers ready. This is a neat way to use 0MQ's own queues
	//  to hold messages we're not ready to process yet. When we get a client
	//  reply, we pop the next available worker, and send the request to it,
	//  including the originating client identity. When a worker replies, we
	//  re-queue that worker, and we forward the reply to the original client,
	//  using the reply envelope.

	//  Queue of available workers
	worker_queue := make([]string, 0, 10)

	poller1 := zmq.NewPoller()
	poller1.Add(backend, zmq.POLLIN)
	poller2 := zmq.NewPoller()
	poller2.Add(backend, zmq.POLLIN)
	poller2.Add(frontend, zmq.POLLIN)

	for client_nbr > 0 {
		//  Poll frontend only if we have available workers
		var sockets []zmq.Polled
		if len(worker_queue) > 0 {
			sockets, _ = poller2.Poll(-1)
		} else {
			sockets, _ = poller1.Poll(-1)
		}
		for _, socket := range sockets {
			switch socket.Socket {
			case backend:

				//  Handle worker activity on backend
				//  Queue worker identity for load-balancing
				worker_id, _ := backend.Recv(0)
				if !(len(worker_queue) < NBR_WORKERS) {
					panic("!(len(worker_queue) < NBR_WORKERS)")
				}
				worker_queue = append(worker_queue, worker_id)

				//  Second frame is empty
				empty, _ := backend.Recv(0)
				if empty != "" {
					panic(fmt.Sprintf("empty is not \"\": %q", empty))
				}

				//  Third frame is READY or else a client reply identity
				client_id, _ := backend.Recv(0)

				//  If client reply, send rest back to frontend
				if client_id != "READY" {
					empty, _ := backend.Recv(0)
					if empty != "" {
						panic(fmt.Sprintf("empty is not \"\": %q", empty))
					}
					reply, _ := backend.Recv(0)
					frontend.Send(client_id, zmq.SNDMORE)
					frontend.Send("", zmq.SNDMORE)
					frontend.Send(reply, 0)
					client_nbr--
				}

			case frontend:
				//  Here is how we handle a client request:

				//  Now get next client request, route to last-used worker
				//  Client request is [identity][empty][request]
				client_id, _ := frontend.Recv(0)
				empty, _ := frontend.Recv(0)
				if empty != "" {
					panic(fmt.Sprintf("empty is not \"\": %q", empty))
				}
				request, _ := frontend.Recv(0)

				backend.Send(worker_queue[0], zmq.SNDMORE)
				backend.Send("", zmq.SNDMORE)
				backend.Send(client_id, zmq.SNDMORE)
				backend.Send("", zmq.SNDMORE)
				backend.Send(request, 0)

				//  Dequeue and drop the next worker identity
				worker_queue = worker_queue[1:]

			}
		}
	}

	time.Sleep(100 * time.Millisecond)
}
예제 #24
0
func main() {
	snapshot, _ := zmq.NewSocket(zmq.ROUTER)
	snapshot.Bind("tcp://*:5556")
	publisher, _ := zmq.NewSocket(zmq.PUB)
	publisher.Bind("tcp://*:5557")
	collector, _ := zmq.NewSocket(zmq.PULL)
	collector.Bind("tcp://*:5558")

	//  The body of the main task collects updates from clients and
	//  publishes them back out to clients:

	sequence := int64(0)
	kvmap := make(map[string]*kvsimple.Kvmsg)

	poller := zmq.NewPoller()
	poller.Add(collector, zmq.POLLIN)
	poller.Add(snapshot, zmq.POLLIN)
LOOP:
	for {
		polled, err := poller.Poll(1000 * time.Millisecond)
		if err != nil {
			break
		}
		for _, item := range polled {
			switch socket := item.Socket; socket {
			case collector:
				//  Apply state update sent from client
				kvmsg, err := kvsimple.RecvKvmsg(collector)
				if err != nil {
					break LOOP //  Interrupted
				}
				sequence++
				kvmsg.SetSequence(sequence)
				kvmsg.Send(publisher)
				kvmsg.Store(kvmap)
				fmt.Println("I: publishing update", sequence)
			case snapshot:
				//  Execute state snapshot request
				msg, err := snapshot.RecvMessage(0)
				if err != nil {
					break LOOP
				}
				identity := msg[0]

				//  Request is in second frame of message
				request := msg[1]
				if request != "ICANHAZ?" {
					fmt.Println("E: bad request, aborting")
					break LOOP
				}
				//  Send state snapshot to client

				//  For each entry in kvmap, send kvmsg to client
				for _, kvmsg := range kvmap {
					snapshot.Send(identity, zmq.SNDMORE)
					kvmsg.Send(snapshot)
				}

				//  Now send END message with sequence number
				fmt.Println("I: sending shapshot =", sequence)
				snapshot.Send(identity, zmq.SNDMORE)
				kvmsg := kvsimple.NewKvmsg(sequence)
				kvmsg.SetKey("KTHXBAI")
				kvmsg.SetBody("")
				kvmsg.Send(snapshot)
			}
		}
	}
	fmt.Printf("Interrupted\n%d messages handled\n", sequence)
}
예제 #25
0
func main() {
	//  Arguments can be either of:
	//      -p  primary server, at tcp://localhost:5001
	//      -b  backup server, at tcp://localhost:5002
	statepub, _ := zmq.NewSocket(zmq.PUB)
	statesub, _ := zmq.NewSocket(zmq.SUB)
	statesub.SetSubscribe("")
	frontend, _ := zmq.NewSocket(zmq.ROUTER)
	fsm := &bstar_t{peer_expiry: time.Now().Add(2 * HEARTBEAT)}

	if len(os.Args) == 2 && os.Args[1] == "-p" {
		fmt.Println("I: Primary active, waiting for backup (passive)")
		frontend.Bind("tcp://*:5001")
		statepub.Bind("tcp://*:5003")
		statesub.Connect("tcp://localhost:5004")
		fsm.state = STATE_PRIMARY
	} else if len(os.Args) == 2 && os.Args[1] == "-b" {
		fmt.Println("I: Backup passive, waiting for primary (active)")
		frontend.Bind("tcp://*:5002")
		statepub.Bind("tcp://*:5004")
		statesub.Connect("tcp://localhost:5003")
		fsm.state = STATE_BACKUP
	} else {
		fmt.Println("Usage: bstarsrv { -p | -b }")
		return
	}
	//  We now process events on our two input sockets, and process these
	//  events one at a time via our finite-state machine. Our "work" for
	//  a client request is simply to echo it back:

	//  Set timer for next outgoing state message
	send_state_at := time.Now().Add(HEARTBEAT)

	poller := zmq.NewPoller()
	poller.Add(frontend, zmq.POLLIN)
	poller.Add(statesub, zmq.POLLIN)

LOOP:
	for {
		time_left := send_state_at.Sub(time.Now())
		if time_left < 0 {
			time_left = 0
		}
		polled, err := poller.Poll(time_left)
		if err != nil {
			break //  Context has been shut down
		}
		for _, socket := range polled {
			switch socket.Socket {
			case frontend:
				//  Have a client request
				msg, _ := frontend.RecvMessage(0)
				fsm.event = CLIENT_REQUEST
				if !StateMachine(fsm) {
					//  Answer client by echoing request back
					frontend.SendMessage(msg)
				}
			case statesub:
				//  Have state from our peer, execute as event
				message, _ := statesub.RecvMessage(0)
				i, _ := strconv.Atoi(message[0])
				fsm.event = event_t(i)
				if StateMachine(fsm) {
					break LOOP //  Error, so exit
				}
				fsm.peer_expiry = time.Now().Add(2 * HEARTBEAT)
			}
		}
		//  If we timed-out, send state to peer
		if time.Now().After(send_state_at) {
			statepub.SendMessage(int(fsm.state))
			send_state_at = time.Now().Add(HEARTBEAT)
		}
	}
	fmt.Println("W: interrupted")
}
예제 #26
0
func main() {
	fmt.Println("I: connecting to server...")
	client, err := zmq.NewSocket(zmq.REQ)
	if err != nil {
		panic(err)
	}
	client.Connect(SERVER_ENDPOINT)

	poller := zmq.NewPoller()
	poller.Add(client, zmq.POLLIN)

	sequence := 0
	retries_left := REQUEST_RETRIES
	for retries_left > 0 {
		//  We send a request, then we work to get a reply
		sequence++
		client.SendMessage(sequence)

		for expect_reply := true; expect_reply; {
			//  Poll socket for a reply, with timeout
			sockets, err := poller.Poll(REQUEST_TIMEOUT)
			if err != nil {
				break //  Interrupted
			}

			//  Here we process a server reply and exit our loop if the
			//  reply is valid. If we didn't a reply we close the client
			//  socket and resend the request. We try a number of times
			//  before finally abandoning:

			if len(sockets) > 0 {
				//  We got a reply from the server, must match sequence
				reply, err := client.RecvMessage(0)
				if err != nil {
					break //  Interrupted
				}
				seq, _ := strconv.Atoi(reply[0])
				if seq == sequence {
					fmt.Printf("I: server replied OK (%s)\n", reply[0])
					retries_left = REQUEST_RETRIES
					expect_reply = false
				} else {
					fmt.Printf("E: malformed reply from server: %s\n", reply)
				}
			} else {
				retries_left--
				if retries_left == 0 {
					fmt.Println("E: server seems to be offline, abandoning")
					break
				} else {
					fmt.Println("W: no response from server, retrying...")
					//  Old socket is confused; close it and open a new one
					client.Close()
					client, _ = zmq.NewSocket(zmq.REQ)
					client.Connect(SERVER_ENDPOINT)
					// Recreate poller for new client
					poller = zmq.NewPoller()
					poller.Add(client, zmq.POLLIN)
					//  Send request again, on new socket
					client.SendMessage(sequence)
				}
			}
		}
	}
	client.Close()
}
예제 #27
0
func main() {
	//  First argument is this broker's name
	//  Other arguments are our peers' names
	//
	if len(os.Args) < 2 {
		fmt.Println("syntax: peering2 me {you}...")
		os.Exit(1)
	}
	for _, peer := range os.Args[2:] {
		peers[peer] = true
	}

	self := os.Args[1]
	fmt.Println("I: preparing broker at", self)
	rand.Seed(time.Now().UnixNano())

	//  Bind cloud frontend to endpoint
	cloudfe, _ := zmq.NewSocket(zmq.ROUTER)
	defer cloudfe.Close()
	cloudfe.SetIdentity(self)
	cloudfe.Bind("ipc://" + self + "-cloud.ipc")

	//  Connect cloud backend to all peers
	cloudbe, _ := zmq.NewSocket(zmq.ROUTER)
	defer cloudbe.Close()
	cloudbe.SetIdentity(self)
	for _, peer := range os.Args[2:] {
		fmt.Println("I: connecting to cloud frontend at", peer)
		cloudbe.Connect("ipc://" + peer + "-cloud.ipc")
	}
	//  Prepare local frontend and backend
	localfe, _ := zmq.NewSocket(zmq.ROUTER)
	defer localfe.Close()
	localfe.Bind("ipc://" + self + "-localfe.ipc")
	localbe, _ := zmq.NewSocket(zmq.ROUTER)
	defer localbe.Close()
	localbe.Bind("ipc://" + self + "-localbe.ipc")

	//  Get user to tell us when we can start...
	fmt.Print("Press Enter when all brokers are started: ")
	var line string
	fmt.Scanln(&line)

	//  Start local workers
	for worker_nbr := 0; worker_nbr < NBR_WORKERS; worker_nbr++ {
		go worker_task(self, worker_nbr)
	}

	//  Start local clients
	for client_nbr := 0; client_nbr < NBR_CLIENTS; client_nbr++ {
		go client_task(self, client_nbr)
	}

	//  Here we handle the request-reply flow. We're using load-balancing
	//  to poll workers at all times, and clients only when there are one or
	//  more workers available.

	//  Least recently used queue of available workers
	workers := make([]string, 0)

	backends := zmq.NewPoller()
	backends.Add(localbe, zmq.POLLIN)
	backends.Add(cloudbe, zmq.POLLIN)
	frontends := zmq.NewPoller()
	frontends.Add(localfe, zmq.POLLIN)
	frontends.Add(cloudfe, zmq.POLLIN)

	msg := []string{}
	number_of_peers := len(os.Args) - 2

	for {
		//  First, route any waiting replies from workers
		//  If we have no workers anyhow, wait indefinitely
		timeout := time.Second
		if len(workers) == 0 {
			timeout = -1
		}
		sockets, err := backends.Poll(timeout)
		if err != nil {
			log.Println(err)
			break //  Interrupted
		}

		msg = msg[:]
		if socketInPolled(localbe, sockets) {
			//  Handle reply from local worker
			msg, err = localbe.RecvMessage(0)
			if err != nil {
				log.Println(err)
				break //  Interrupted
			}
			var identity string
			identity, msg = unwrap(msg)
			workers = append(workers, identity)

			//  If it's READY, don't route the message any further
			if msg[0] == WORKER_READY {
				msg = msg[0:0]
			}
		} else if socketInPolled(cloudbe, sockets) {
			//  Or handle reply from peer broker
			msg, err = cloudbe.RecvMessage(0)
			if err != nil {
				log.Println(err)
				break //  Interrupted
			}

			//  We don't use peer broker identity for anything
			_, msg = unwrap(msg)
		}

		if len(msg) > 0 {
			//  Route reply to cloud if it's addressed to a broker
			if peers[msg[0]] {
				cloudfe.SendMessage(msg)
			} else {
				localfe.SendMessage(msg)
			}
		}

		//  Now we route as many client requests as we have worker capacity
		//  for. We may reroute requests from our local frontend, but not from
		//  the cloud frontend. We reroute randomly now, just to test things
		//  out. In the next version we'll do this properly by calculating
		//  cloud capacity:

		for len(workers) > 0 {
			sockets, err := frontends.Poll(0)
			if err != nil {
				log.Println(err)
				break //  Interrupted
			}
			var reroutable bool
			//  We'll do peer brokers first, to prevent starvation
			if socketInPolled(cloudfe, sockets) {
				msg, _ = cloudfe.RecvMessage(0)
				reroutable = false
			} else if socketInPolled(localfe, sockets) {
				msg, _ = localfe.RecvMessage(0)
				reroutable = true
			} else {
				break //  No work, go back to backends
			}

			//  If reroutable, send to cloud 20% of the time
			//  Here we'd normally use cloud status information
			//
			if reroutable && number_of_peers > 0 && rand.Intn(5) == 0 {
				//  Route to random broker peer
				random_peer := os.Args[2+rand.Intn(number_of_peers)]
				cloudbe.SendMessage(random_peer, "", msg)
			} else {
				localbe.SendMessage(workers[0], "", msg)
				workers = workers[1:]
			}
		}
	}
	fmt.Println("Exit")
}
예제 #28
0
파일: zmq3_test.go 프로젝트: pebbe/zmq3
func TestPoller(t *testing.T) {

	var sb, sc *zmq.Socket

	defer func() {
		for _, s := range []*zmq.Socket{sb, sc} {
			if s != nil {
				s.SetLinger(0)
				s.Close()
			}
		}
	}()

	sb, err := zmq.NewSocket(zmq.PAIR)
	if err != nil {
		t.Fatal("NewSocket:", err)
	}

	err = sb.Bind("tcp://127.0.0.1:9737")
	if err != nil {
		t.Fatal("sb.Bind:", err)
	}

	sc, err = zmq.NewSocket(zmq.PAIR)
	if err != nil {
		t.Fatal("NewSocket:", err)
	}

	err = sc.Connect("tcp://127.0.0.1:9737")
	if err != nil {
		t.Fatal("sc.Connect:", err)
	}

	poller := zmq.NewPoller()
	idxb := poller.Add(sb, 0)
	idxc := poller.Add(sc, 0)
	if idxb != 0 || idxc != 1 {
		t.Errorf("idxb=%d idxc=%d", idxb, idxc)
	}

	if pa, err := poller.PollAll(100 * time.Millisecond); err != nil {
		t.Error("PollAll 1:", err)
	} else if len(pa) != 2 {
		t.Errorf("PollAll 1 len = %d", len(pa))
	} else if pa[0].Events != 0 || pa[1].Events != 0 {
		t.Errorf("PollAll 1 events = %v, %v", pa[0], pa[1])
	}

	poller.Update(idxb, zmq.POLLOUT)
	poller.UpdateBySocket(sc, zmq.POLLIN)

	if pa, err := poller.PollAll(100 * time.Millisecond); err != nil {
		t.Error("PollAll 2:", err)
	} else if len(pa) != 2 {
		t.Errorf("PollAll 2 len = %d", len(pa))
	} else if pa[0].Events != zmq.POLLOUT || pa[1].Events != 0 {
		t.Errorf("PollAll 2 events = %v, %v", pa[0], pa[1])
	}

	poller.UpdateBySocket(sb, 0)

	content := "12345678ABCDEFGH12345678ABCDEFGH"

	//  Send message from client to server
	if rc, err := sb.Send(content, zmq.DONTWAIT); err != nil {
		t.Error("sb.Send DONTWAIT:", err)
	} else if rc != 32 {
		t.Error("sb.Send DONTWAIT:", err32)
	}

	if pa, err := poller.PollAll(100 * time.Millisecond); err != nil {
		t.Error("PollAll 3:", err)
	} else if len(pa) != 2 {
		t.Errorf("PollAll 3 len = %d", len(pa))
	} else if pa[0].Events != 0 || pa[1].Events != zmq.POLLIN {
		t.Errorf("PollAll 3 events = %v, %v", pa[0], pa[1])
	}

	//  Receive message
	if msg, err := sc.Recv(zmq.DONTWAIT); err != nil {
		t.Error("sb.Recv DONTWAIT:", err)
	} else if msg != content {
		t.Error("sb.Recv msg != content")
	}

	poller.UpdateBySocket(sb, zmq.POLLOUT)
	poller.Update(idxc, zmq.POLLIN)

	if pa, err := poller.PollAll(100 * time.Millisecond); err != nil {
		t.Error("PollAll 4:", err)
	} else if len(pa) != 2 {
		t.Errorf("PollAll 4 len = %d", len(pa))
	} else if pa[0].Events != zmq.POLLOUT || pa[1].Events != 0 {
		t.Errorf("PollAll 4 events = %v, %v", pa[0], pa[1])
	}

	err = sc.Close()
	sc = nil
	if err != nil {
		t.Error("sc.Close:", err)
	}

	err = sb.Close()
	sb = nil
	if err != nil {
		t.Error("sb.Close:", err)
	}
}
예제 #29
0
파일: peering3.go 프로젝트: yangzhao28/zmq3
func main() {
	//  First argument is this broker's name
	//  Other arguments are our peers' names
	//
	if len(os.Args) < 2 {
		fmt.Println("syntax: peering1 me {you}...")
		os.Exit(1)
	}
	self = os.Args[1]
	fmt.Printf("I: preparing broker at %s...\n", self)
	rand.Seed(time.Now().UnixNano())

	//  Prepare local frontend and backend
	localfe, _ := zmq.NewSocket(zmq.ROUTER)
	defer localfe.Close()
	localfe.Bind("ipc://" + self + "-localfe.ipc")

	localbe, _ := zmq.NewSocket(zmq.ROUTER)
	defer localbe.Close()
	localbe.Bind("ipc://" + self + "-localbe.ipc")

	//  Bind cloud frontend to endpoint
	cloudfe, _ := zmq.NewSocket(zmq.ROUTER)
	defer cloudfe.Close()
	cloudfe.SetIdentity(self)
	cloudfe.Bind("ipc://" + self + "-cloud.ipc")

	//  Connect cloud backend to all peers
	cloudbe, _ := zmq.NewSocket(zmq.ROUTER)
	defer cloudbe.Close()
	cloudbe.SetIdentity(self)
	for _, peer := range os.Args[2:] {
		fmt.Printf("I: connecting to cloud frontend at '%s'\n", peer)
		cloudbe.Connect("ipc://" + peer + "-cloud.ipc")
	}
	//  Bind state backend to endpoint
	statebe, _ := zmq.NewSocket(zmq.PUB)
	defer statebe.Close()
	statebe.Bind("ipc://" + self + "-state.ipc")

	//  Connect state frontend to all peers
	statefe, _ := zmq.NewSocket(zmq.SUB)
	defer statefe.Close()
	statefe.SetSubscribe("")
	for _, peer := range os.Args[2:] {
		fmt.Printf("I: connecting to state backend at '%s'\n", peer)
		statefe.Connect("ipc://" + peer + "-state.ipc")
	}
	//  Prepare monitor socket
	monitor, _ := zmq.NewSocket(zmq.PULL)
	defer monitor.Close()
	monitor.Bind("ipc://" + self + "-monitor.ipc")

	//  After binding and connecting all our sockets, we start our child
	//  tasks - workers and clients:

	for worker_nbr := 0; worker_nbr < NBR_WORKERS; worker_nbr++ {
		go worker_task(worker_nbr)
	}

	//  Start local clients
	for client_nbr := 0; client_nbr < NBR_CLIENTS; client_nbr++ {
		go client_task(client_nbr)
	}

	//  Queue of available workers
	local_capacity := 0
	cloud_capacity := 0
	workers := make([]string, 0)

	primary := zmq.NewPoller()
	primary.Add(localbe, zmq.POLLIN)
	primary.Add(cloudbe, zmq.POLLIN)
	primary.Add(statefe, zmq.POLLIN)
	primary.Add(monitor, zmq.POLLIN)

	secondary1 := zmq.NewPoller()
	secondary1.Add(localfe, zmq.POLLIN)
	secondary2 := zmq.NewPoller()
	secondary2.Add(localfe, zmq.POLLIN)
	secondary2.Add(cloudfe, zmq.POLLIN)

	msg := make([]string, 0)
	for {

		//  If we have no workers ready, wait indefinitely
		timeout := time.Duration(time.Second)
		if local_capacity == 0 {
			timeout = -1
		}
		sockets, err := primary.PollAll(timeout)
		if err != nil {
			break //  Interrupted
		}

		//  Track if capacity changes during this iteration
		previous := local_capacity

		//  Handle reply from local worker
		msg = msg[0:0]

		if sockets[0].Events&zmq.POLLIN != 0 { // 0 == localbe
			msg, err = localbe.RecvMessage(0)
			if err != nil {
				break //  Interrupted
			}
			var identity string
			identity, msg = unwrap(msg)
			workers = append(workers, identity)
			local_capacity++

			//  If it's READY, don't route the message any further
			if msg[0] == WORKER_READY {
				msg = msg[0:0]
			}
		} else if sockets[1].Events&zmq.POLLIN != 0 { // 1 == cloudbe
			//  Or handle reply from peer broker
			msg, err = cloudbe.RecvMessage(0)
			if err != nil {
				break //  Interrupted
			}
			//  We don't use peer broker identity for anything
			_, msg = unwrap(msg)
		}

		if len(msg) > 0 {

			//  Route reply to cloud if it's addressed to a broker
			to_broker := false
			for _, peer := range os.Args[2:] {
				if peer == msg[0] {
					to_broker = true
					break
				}
			}
			if to_broker {
				cloudfe.SendMessage(msg)
			} else {
				localfe.SendMessage(msg)
			}
		}

		//  If we have input messages on our statefe or monitor sockets we
		//  can process these immediately:

		if sockets[2].Events&zmq.POLLIN != 0 { // 2 == statefe
			var status string
			m, _ := statefe.RecvMessage(0)
			_, m = unwrap(m) // peer
			status, _ = unwrap(m)
			cloud_capacity, _ = strconv.Atoi(status)
		}
		if sockets[3].Events&zmq.POLLIN != 0 { // 3 == monitor
			status, _ := monitor.Recv(0)
			fmt.Println(status)
		}
		//  Now route as many clients requests as we can handle. If we have
		//  local capacity we poll both localfe and cloudfe. If we have cloud
		//  capacity only, we poll just localfe. We route any request locally
		//  if we can, else we route to the cloud.

		for local_capacity+cloud_capacity > 0 {
			var sockets []zmq.Polled
			var err error
			if local_capacity > 0 {
				sockets, err = secondary2.PollAll(0)
			} else {
				sockets, err = secondary1.PollAll(0)
			}
			if err != nil {
				panic(err)
			}

			if sockets[0].Events&zmq.POLLIN != 0 { // 0 == localfe
				msg, _ = localfe.RecvMessage(0)
			} else if len(sockets) > 1 && sockets[1].Events&zmq.POLLIN != 0 { // 1 == cloudfe
				msg, _ = cloudfe.RecvMessage(0)
			} else {
				break //  No work, go back to primary
			}

			if local_capacity > 0 {
				localbe.SendMessage(workers[0], "", msg)
				workers = workers[1:]
				local_capacity--
			} else {
				//  Route to random broker peer
				random_peer := rand.Intn(len(os.Args)-2) + 2
				cloudbe.SendMessage(os.Args[random_peer], "", msg)
			}
		}
		//  We broadcast capacity messages to other peers; to reduce chatter
		//  we do this only if our capacity changed.

		if local_capacity != previous {
			//  We stick our own identity onto the envelope
			//  Broadcast new capacity
			statebe.SendMessage(self, "", local_capacity)
		}
	}
}
예제 #30
0
func main() {

	server := []string{"tcp://localhost:5001", "tcp://localhost:5002"}
	server_nbr := 0

	fmt.Printf("I: connecting to server at %s...\n", server[server_nbr])
	client, _ := zmq.NewSocket(zmq.REQ)
	client.Connect(server[server_nbr])

	poller := zmq.NewPoller()
	poller.Add(client, zmq.POLLIN)

	sequence := 0
LOOP:
	for {
		//  We send a request, then we work to get a reply
		sequence++
		client.SendMessage(sequence)

		for expect_reply := true; expect_reply; {
			//  Poll socket for a reply, with timeout
			polled, err := poller.Poll(REQUEST_TIMEOUT)
			if err != nil {
				break LOOP //  Interrupted
			}

			//  We use a Lazy Pirate strategy in the client. If there's no
			//  reply within our timeout, we close the socket and try again.
			//  In Binary Star, it's the client vote which decides which
			//  server is primary; the client must therefore try to connect
			//  to each server in turn:

			if len(polled) == 1 {
				//  We got a reply from the server, must match sequence
				reply, _ := client.RecvMessage(0)
				seq, _ := strconv.Atoi(reply[0])
				if seq == sequence {
					fmt.Printf("I: server replied OK (%s)\n", reply[0])
					expect_reply = false
					time.Sleep(time.Second) //  One request per second
				} else {
					fmt.Printf("E: bad reply from server: %q\n", reply)
				}

			} else {
				fmt.Println("W: no response from server, failing over")

				//  Old socket is confused; close it and open a new one
				client.Close()
				server_nbr = 1 - server_nbr
				time.Sleep(SETTLE_DELAY)
				fmt.Printf("I: connecting to server at %s...\n", server[server_nbr])
				client, _ = zmq.NewSocket(zmq.REQ)
				client.Connect(server[server_nbr])

				poller = zmq.NewPoller()
				poller.Add(client, zmq.POLLIN)

				//  Send request again, on new socket
				client.SendMessage(sequence)
			}
		}
	}
}