Esempio n. 1
0
// Receives marshaled data from 0mq socket.
func recv(socket *zmq.Socket, flag zmq.Flag) (t Transit, err error) {
	// Read all frames
	frames, err := socket.RecvMessageBytes(flag)
	if err != nil {
		return nil, err
	}

	sType, err := socket.GetType()
	if err != nil {
		return nil, err
	}

	var routingId []byte
	// If message came from a router socket, first frame is routingId
	if sType == zmq.ROUTER {
		if len(frames) <= 1 {
			return nil, errors.New("no routingId")
		}
		routingId = frames[0]
		frames = frames[1:]
	}

	t, err = Unmarshal(frames...)
	if err != nil {
		return nil, err
	}

	if sType == zmq.ROUTER {
		t.SetRoutingId(routingId)
	}
	return t, err
}
Esempio n. 2
0
func (c *WorkerConnection) run(socket *zmq.Socket) {
	defer socket.Close()
	socket.SetRcvtimeo(c.activeTimeout)
	for {
		select {
		case <-c.quit:
			close(c.wait)
			return
		case request := <-c.requestChannel:
			if _, err := socket.SendMessage(request); err != nil {
				log.Println("Failed to send request:", err)
			}
		case request := <-c.connectionChan:
			go c.handleConnectionRequest(socket, request)
		default:
			message, err := socket.RecvMessageBytes(0)
			if err != nil {
				// Needed to yield to goroutines when GOMAXPROCS is 1.
				// Note: The 1.3 preemptive scheduler doesn't seem to work here,
				// so this is still required.
				runtime.Gosched()
				break
			}
			socket.SetRcvtimeo(c.activeTimeout)
			go c.handleResponse(message)
		}
	}
}
Esempio n. 3
0
func reflector(t *testing.T, s *zmq.Socket, n int, wg *sync.WaitGroup) chan bool {
	s.SetRcvtimeo(time.Second)
	s.SetSndtimeo(time.Second)
	svcrouter.Barrier()
	wg.Add(1)
	c := make(chan bool)
	go func() {
		/* this is a nasty hack but is OK for a test program I guess */
		for {
			if msg, err := s.RecvMessageBytes(0); err == nil {
				svcrouter.DumpMsg(fmt.Sprintf("reflector %d", n), msg)
				if _, err := s.SendMessage(msg); err != nil {
					wg.Done()
					t.Fatal(err)
					return
				}
			}
			select {
			case <-c:
				wg.Done()
				return
			default:
			}
		}
	}()
	return c
}
Esempio n. 4
0
func (w *Worker) run(socket *zmq.Socket) {
	defer close(w.wait)
	defer socket.Close()
	socket.SetRcvtimeo(w.passiveTimeout)
	socket.Bind(w.address)
	atomic.StoreInt32(&w.runningWorkers, 0)
	responseChannel := make(chan *messageContext)
	sendResponse := func(response [][]byte) {
		atomic.AddInt32(&w.runningWorkers, -1)
		if _, err := socket.SendMessage(response); err != nil {
			log.Println("Failed to send response:", err)
		}
		if atomic.LoadInt32(&w.runningWorkers) == 0 {
			socket.SetRcvtimeo(w.passiveTimeout)
		}
	}
	for {
		if atomic.LoadInt32(&w.runningWorkers) == w.maxWorkers {
			// We're already running maxWorkers so block until a response is ready
			select {
			case response := <-responseChannel:
				w.logFinish(response)
				sendResponse(response.data)
			case <-w.quit:
				return
			}
		}
		select {
		case <-w.quit:
			return
		case response := <-responseChannel:
			w.logFinish(response)
			sendResponse(response.data)
			break
		default:
			message, err := socket.RecvMessageBytes(0)
			if err != nil {
				// Needed to yield to goroutines when GOMAXPROCS is 1.
				// Note: The 1.3 preemptive scheduler doesn't seem to work here,
				// so this is still required.
				runtime.Gosched()
				break
			}
			request, err := w.unmarshal(message[len(message)-1])
			if err != nil {
				log.Println("Received invalid message", err)
				break
			}
			workerFunction := w.registeredWorkerFunctions[request.Method]
			if workerFunction == nil {
				log.Println("Unregistered worker function:", request.Method)
				break
			}
			context := messageContext{data: message, startTime: time.Now(), method: request.Method}
			if atomic.LoadInt32(&w.runningWorkers) == 0 {
				socket.SetRcvtimeo(w.activeTimeout)
			}
			atomic.AddInt32(&w.runningWorkers, 1)
			go w.runFunction(responseChannel, &context, request.Parameters, workerFunction)
		}
	}
}
Esempio n. 5
0
func (re *RouterElement) run(doRouting func(*RouterElement, *Peer, [][]byte) (*Peer, [][]byte, error), controlIn, controlOut *zmq.Socket) {
	var wg sync.WaitGroup
	controlChanInternal := make(chan *controlMessage, 1)
	wg.Add(1)

	// Start a goroutine to copy from the control channel to the internal
	// control channel and wake up the control socket pair on each message.
	// Quit the goroutine when the control channel itself is closed
	go func() {
		defer func() {
			controlIn.SetLinger(0)
			controlIn.Close()
			wg.Done()
		}()
		msg := [][]byte{[]byte{0}}
		for {
			select {
			case m, ok := <-re.controlChan:
				if !ok {
					close(controlChanInternal)
					// send a message to the control panel to wake it up
					controlIn.SendMessage(msg)
					// ensure we get a message back before closing it
					controlIn.RecvMessageBytes(0)
					return
				}
				controlChanInternal <- m
				controlIn.SendMessageDontwait(msg)
			}
		}
	}()

	defer func() {
		wg.Wait()
		controlOut.SetLinger(0)
		controlOut.Close()
		// Close() should already remove all the peers, so this is just belt & braces
		for _, p := range re.peers {
			p.sock.SetLinger(0)
			p.sock.Close()
			close(p.buffer)
		}
		re.wg.Done()
	}()

	for {
		poller := zmq.NewPoller()
		pollSource := make([]*Peer, len(re.peers), len(re.peers))
		blockInput := false
		i := 0
		for _, p := range re.peers {
			pollSource[i] = p
			i++
			if p.xmit == nil {
				select {
				case p.xmit = <-p.buffer:
				default:
				}
			}
			buffered := len(p.buffer)
			if p.xmit != nil {
				buffered++
			}
			if buffered >= cap(p.buffer) {
				log.Printf("Blocking input as %s cannot take more data", p.Name)
				blockInput = true
			}
		}
		for _, p := range pollSource {
			flags := zmq.State(0)
			if !blockInput {
				flags |= zmq.POLLIN
			}
			if p.xmit != nil {
				flags |= zmq.POLLOUT
			}
			poller.Add(p.sock, flags)
		}
		idxControl := poller.Add(controlOut, zmq.POLLIN)

		if polled, err := poller.PollAll(-1); err != nil {
			log.Printf("Cannot poll: %v", err)
		} else {
			for i, p := range pollSource {
				if (polled[i].Events&zmq.POLLOUT != 0) && (p.xmit != nil) {
					if _, err := p.sock.SendMessageDontwait(p.xmit); err != nil {
						log.Printf("Peer %s cannot send message: %v", p.Name, err)
					}
					p.xmit = nil
				}
			}
			for i, p := range pollSource {
				if polled[i].Events&zmq.POLLIN != 0 {
					if recv, err := p.sock.RecvMessageBytes(0); err != nil {
						log.Printf("Peer %s cannot receive message: %v", p.Name, err)
					} else {
						if dest, fwd, err := doRouting(re, p, recv); err != nil {
							log.Printf("Peer %s cannot route message: %v", p.Name, err)
						} else {
							if dest != nil && fwd != nil {
								dest.buffer <- fwd
							}
						}
					}
				}
			}
			if polled[idxControl].Events&zmq.POLLIN != 0 {
				// read from the control socket and discard. We don't care
				// if this errors as it's really only to make us read from our
				// own control socket
				controlOut.RecvMessageBytes(0)
				select {
				case msg, ok := <-controlChanInternal:
					if !ok {
						// control channel is closed
						// send a dummy message to handshake
						msg := [][]byte{[]byte{0}}
						controlOut.SendMessage(msg)
						return
					}
					msg.reply <- re.processControlMessage(msg)
				default:
				}
			}
		}
	}
}