Пример #1
0
func main() {
	file := new(ZMQConfigFile)

	fileReader, err := os.Open("config.json")
	if err != nil {
		log.Panicf("Error: %v", err)
	}
	jsonReader := json.NewDecoder(fileReader)
	err = jsonReader.Decode(&file)
	fileReader.Close()
	if err != nil {
		log.Panicf("Error: %v", err)
	}

	serverToMe, _ := zmq.NewSocket(zmq.PULL)
	defer serverToMe.Close()
	applyZMQConfig(serverToMe, file.ServerToMe)
	meToLink, _ := zmq.NewSocket(zmq.PUSH)
	defer meToLink.Close()
	applyZMQConfig(meToLink, file.MeToLink)

	linkToMe, _ := zmq.NewSocket(zmq.XSUB)
	defer linkToMe.Close()
	applyZMQConfig(linkToMe, file.LinkToMe)
	meToServer, _ := zmq.NewSocket(zmq.XPUB)
	defer meToServer.Close()
	applyZMQConfig(meToServer, file.MeToServer)

	go zmq.Proxy(serverToMe, meToLink, nil)
	zmq.Proxy(linkToMe, meToServer, nil)
}
Пример #2
0
func BindProxy(frontend string, backend string) {
	context, err := zmq.NewContext()

	if err != nil {
		log.Fatal(err)
	}

	// create XSUB for publishers to connect to
	xSub, _ := context.NewSocket(zmq.XSUB)
	defer xSub.Close()
	err = xSub.Bind(frontend)
	if err != nil {
		log.Fatal(err)
	}

	// create XPUB for subscribers to connect to
	xPub, _ := context.NewSocket(zmq.XPUB)
	defer xPub.Close()

	err = xPub.Bind(backend)
	if err != nil {
		log.Fatal(err)
	}

	err = zmq.Proxy(xSub, xPub, nil)

	log.Fatalln("Proxy interrupted:", err)
}
func router(log func(level int, format string, a ...interface{})) {
	// inproc socket for receiving requests from frontend
	frontend, error := zmq.NewSocket(zmq.ROUTER)
	if error != nil {
		log(3, "error creating frontend socket: %s\n", error)
	} else {
		defer frontend.Close()
		log(0, "created frontend socket\n")
	}
	error = frontend.Bind("ipc:///tmp/feeds/upstream")
	if error != nil {
		log(3, "error binding [ipc:///tmp/feeds/upstream] frontend socket: %s\n", error)
	} else {
		log(0, "bound frontend socket to [ipc:///tmp/feeds/upstream]\n")
	}

	//  inproc socket for sending requests to downstream
	backend, error := zmq.NewSocket(zmq.DEALER)
	if error != nil {
		log(3, "error creating backend socket: %s\n", error)
	} else {
		defer backend.Close()
		log(0, "created backend socket\n")
	}
	error = backend.Bind("ipc:///tmp/feeds/downstream")
	if error != nil {
		log(3, "error binding [ipc:///tmp/feeds/downstream] backend socket: %s\n", error)
	} else {
		log(0, "bound backend socket to [ipc:///tmp/feeds/downstream]\n")
	}

	//  Connect backend to frontend via a proxy
	err := zmq.Proxy(frontend, backend, nil)
	log(0, "Proxy interrupted: %s\n", err)
}
Пример #4
0
// see explanation here http://api.zeromq.org/4-0:zmq-proxy#toc2
func RunForwarderProxy(pubPort string, subPort string) {
	var err error

	glog.V(1).Info("pub port :", pubPort, " sub port :", subPort)
	//  Socket facing clients

	frontend, _ := zmq.NewSocket(zmq.XSUB)
	defer frontend.Close()
	err = frontend.Bind("tcp://*:" + pubPort)
	if err != nil {
		glog.Error("Error Binding frontend:", err)
	}

	//  Socket facing services

	backend, _ := zmq.NewSocket(zmq.XPUB)
	defer backend.Close()
	err = backend.Bind("tcp://*:" + subPort)
	if err != nil {
		glog.Error("Error Binding backend:", err)
	}

	//  Start the proxy
	err = zmq.Proxy(frontend, backend, nil)

	glog.Error("Proxy interrupted:", err)

}
Пример #5
0
// see explanation here http://api.zeromq.org/4-0:zmq-proxy#toc2
func RunStreamerProxy(pushPort string, pullPort string) {
	var err error

	glog.V(1).Info("push port :", pushPort, " pull port :", pullPort)
	//  Socket facing web servers (front end)

	frontend, _ := zmq.NewSocket(zmq.PULL)
	defer frontend.Close()
	err = frontend.Bind("tcp://*:" + pushPort)
	if err != nil {
		glog.Error("Binding frontend:", err)
	}

	//  Socket facing backend workers

	backend, _ := zmq.NewSocket(zmq.PUSH)
	defer backend.Close()
	err = backend.Bind("tcp://*:" + pullPort)
	if err != nil {
		glog.Error("Binding backend:", err)
	}

	//  Start the proxy
	err = zmq.Proxy(frontend, backend, nil)

	glog.Error("Proxy interrupted:", err)

}
func StartZeromqJsonRouterDealerServer(port string) {
	frontend, _ := zmq.NewSocket(zmq.ROUTER)
	defer frontend.Close()

	backend, _ := zmq.NewSocket(zmq.DEALER)
	defer backend.Close()

	frontend.Bind("tcp://0.0.0.0:" + port)
	backend.Bind("inproc://backend")

	// start num cpu request processors
	for i := 0; i < runtime.NumCPU(); i++ {
		go func() {
			responder, _ := zmq.NewSocket(zmq.REP)
			defer responder.Close()

			responder.Connect("inproc://backend")

			for {
				request, _ := responder.RecvBytes(0)

				var body Request

				json.Unmarshal(request, body)

				response, _ := json.Marshal(Response{Method: body.Method, PayloadLength: len(body.Payload)})

				responder.Send(string(response), 0)
			}
		}()
	}

	err := zmq.Proxy(frontend, backend, nil)
	log.Fatalln(err)
}
Пример #7
0
func main() {

	go listener_thread()
	time.Sleep(time.Millisecond * 5)

	//  This is where the weather server sits
	frontend, _ := zmq.NewSocket(zmq.XSUB)
	defer frontend.Close()
	frontend.Bind(proxy_frontend_url_port)

	//  This is our public endpoint for subscribers
	backend, _ := zmq.NewSocket(zmq.XPUB)
	defer backend.Close()
	backend.Bind(proxy_backend_url_port)

	listener, _ := zmq.NewSocket(zmq.PAIR)
	listener.Connect("inproc://pipe")

	log.Println("0MQ proxy started!")
	log.Println("Frontend protocl/url/port:", proxy_frontend_url_port)
	log.Println("Backend protocol/url/port:", proxy_backend_url_port)

	//  Run the proxy until the user interrupts us
	err := zmq.Proxy(frontend, backend, listener)
	log.Fatalln("Proxy interrupted:", err)
}
Пример #8
0
func BrokerTask() {
	//  Prepare our sockets
	frontend, _ := zmq.NewSocket(zmq.DEALER)
	frontend.Bind("tcp://*:5555")
	backend, _ := zmq.NewSocket(zmq.DEALER)
	backend.Bind("tcp://*:5556")
	zmq.Proxy(frontend, backend, nil)
}
Пример #9
0
// createSockets sets up the 0MQ sockets through which the kernel will
// communicate.
func createSockets(connInfo *ConnectionInfo) (*zmq.Context, *sockets, error) {
	context, err := zmq.NewContext()
	if err != nil {
		return nil, nil, err
	}

	bindSocket := func(t zmq.Type, port int) (*zmq.Socket, error) {
		addr := fmt.Sprintf(
			"%s://%s:%v", connInfo.Transport, connInfo.IP, port,
		)
		socket, err := context.NewSocket(t)
		if err != nil {
			return nil, err
		}
		if err := socket.Bind(addr); err != nil {
			socket.Close()
			return nil, err
		}
		return socket, nil
	}

	var sockets sockets
	var heartbeatSocket *zmq.Socket

	socketPorts := []struct {
		Name   string
		Port   int
		Type   zmq.Type
		Socket **zmq.Socket
	}{
		{"heartbeat", connInfo.HeartbeatPort, zmq.REP, &heartbeatSocket},
		{"shell", connInfo.ShellPort, zmq.ROUTER, &sockets.Shell},
		{"control", connInfo.ControlPort, zmq.ROUTER, &sockets.Control},
		{"stdin", connInfo.StdinPort, zmq.ROUTER, &sockets.Stdin},
		{"iopub", connInfo.IOPubPort, zmq.PUB, &sockets.IOPub},
	}
	for _, socketPort := range socketPorts {
		socket, err := bindSocket(socketPort.Type, socketPort.Port)
		if err != nil {
			// TODO(axw) do we need to close all sockets if one
			// fails? Is terminating the context good enough?
			if err := context.Term(); err != nil {
				log.Printf("terminating context: %v", err)
			}
			return nil, nil, fmt.Errorf(
				"creating %v socket: %v", socketPort.Name, err,
			)
		}
		*socketPort.Socket = socket
	}

	go zmq.Proxy(heartbeatSocket, heartbeatSocket, nil)
	return context, &sockets, nil
}
Пример #10
0
// createSockets sets up the 0MQ sockets through which the kernel will
// communicate.
func createSockets(connInfo *ConnectionInfo) (*zmq.Context, *sockets, error) {
	context, err := zmq.NewContext()
	if err != nil {
		return nil, nil, err
	}

	bindSocket := func(t zmq.Type, port int) (*zmq.Socket, error) {
		addr := fmt.Sprintf(
			"%s://%s:%v", connInfo.Transport, connInfo.IP, port,
		)
		socket, err := context.NewSocket(t)
		if err != nil {
			return nil, err
		}
		if err := socket.Bind(addr); err != nil {
			socket.Close()
			return nil, err
		}
		return socket, nil
	}

	sockets := sockets{
		Heartbeat: socket{Name: "heartbeat", Port: connInfo.HeartbeatPort, Type: zmq.REP},
		Shell:     socket{Name: "shell", Port: connInfo.ShellPort, Type: zmq.ROUTER},
		Control:   socket{Name: "control", Port: connInfo.ControlPort, Type: zmq.ROUTER},
		Stdin:     socket{Name: "stdin", Port: connInfo.StdinPort, Type: zmq.ROUTER},
		IOPub:     socket{Name: "iopub", Port: connInfo.IOPubPort, Type: zmq.PUB},
	}

	for _, socketPtr := range sockets.sockets() {
		socket, err := bindSocket(socketPtr.Type, socketPtr.Port)
		if err == nil {
			socketPtr.Socket = socket
			err = socket.SetLinger(0)
		}
		if err != nil {
			sockets.tryClose()
			if err := context.Term(); err != nil {
				log.Printf("error terminating context: %v", err)
			}
			return nil, nil, fmt.Errorf(
				"creating %v socket: %v", socketPtr.Name, err,
			)
		}
	}

	go zmq.Proxy(sockets.Heartbeat.Socket, sockets.Heartbeat.Socket, nil)
	return context, &sockets, nil
}
Пример #11
0
func main() {
	//  This is where the weather server sits
	frontend, _ := zmq.NewSocket(zmq.XSUB)
	defer frontend.Close()
	frontend.Connect("tcp://192.168.55.210:5556")

	//  This is our public endpoint for subscribers
	backend, _ := zmq.NewSocket(zmq.XPUB)
	defer backend.Close()
	backend.Bind("tcp://10.1.1.0:8100")

	//  Run the proxy until the user interrupts us
	err := zmq.Proxy(frontend, backend, nil)
	log.Fatalln("Proxy interrupted:", err)
}
Пример #12
0
// NewClientWithConnection returns a new Client to handle requests to the
// set of services at the other end of the connection.
// An existing connection in the form of a zmq socket together with the zmq
// context they were created with is used
func NewClientWithConnection(ctx *zmq.Context, conn *zmq.Socket) *Client {
	// A router socket is the middle-man between client requests and actually sending/receiving on the wire
	router, err := ctx.NewSocket(zmq.ROUTER)
	if err != nil {
		glog.Fatal(err)
	}
	if err := router.Bind(RouterURL); err != nil {
		glog.Fatal(err)
	}

	client := &Client{
		conn: conn,
		endpoints: endpoints{
			socket: conn,
		},
		router: router,
		ctx:    ctx,
	}

	// Start the proxy in an own routine since it is blocking
	go func() {
		if err := zmq.Proxy(conn, router, nil); err != nil {
			switch zmq.AsErrno(err) {
			case zmq.Errno(zmq.ETERM):
				glog.Info(err)
			case zmq.Errno(syscall.EINTR):
				glog.Info(err)
			default:
				glog.Info(zmq.AsErrno(err))
				glog.Info(err)
			}
			client.Close()
		}
	}()

	// Socket monitor for connect event
	monitorURL := "inproc://monitor"
	if err := conn.Monitor(monitorURL, zmq.EVENT_CONNECTED|zmq.EVENT_DISCONNECTED); err != nil {
		client.Close()
		glog.Fatal(err)
	}
	go client.monitor(monitorURL)

	return client
}
Пример #13
0
func main() {
	//  Start child threads
	go publisher_thread()
	go subscriber_thread()
	go listener_thread()

	time.Sleep(100 * time.Millisecond)

	subscriber, _ := zmq.NewSocket(zmq.XSUB)
	subscriber.Connect("tcp://localhost:6000")
	publisher, _ := zmq.NewSocket(zmq.XPUB)
	publisher.Bind("tcp://*:6001")
	listener, _ := zmq.NewSocket(zmq.PAIR)
	listener.Connect("inproc://pipe")
	zmq.Proxy(subscriber, publisher, listener)

	fmt.Println("interrupted")
}
Пример #14
0
func main() {
	//  Socket to talk to clients
	clients, _ := zmq.NewSocket(zmq.ROUTER)
	defer clients.Close()
	clients.Bind("tcp://*:5555")

	//  Socket to talk to workers
	workers, _ := zmq.NewSocket(zmq.DEALER)
	defer workers.Close()
	workers.Bind("inproc://workers")

	//  Launch pool of worker goroutines
	for thread_nbr := 0; thread_nbr < 5; thread_nbr++ {
		go worker_routine()
	}
	//  Connect work threads to client threads via a queue proxy
	err := zmq.Proxy(clients, workers, nil)
	log.Fatalln("Proxy interrupted:", err)
}
Пример #15
0
func server_task() {

	//  Frontend socket talks to clients over TCP
	frontend, _ := zmq.NewSocket(zmq.ROUTER)
	defer frontend.Close()
	frontend.Bind("tcp://*:5570")

	//  Backend socket talks to workers over inproc
	backend, _ := zmq.NewSocket(zmq.DEALER)
	defer backend.Close()
	backend.Bind("inproc://backend")

	//  Launch pool of worker threads, precise number is not critical
	for i := 0; i < 5; i++ {
		go server_worker()
	}

	//  Connect backend to frontend via a proxy
	err := zmq.Proxy(frontend, backend, nil)
	log.Fatalln("Proxy interrupted:", err)
}
Пример #16
0
func main() {
	var err error

	//  Socket facing clients
	frontend, _ := zmq.NewSocket(zmq.ROUTER)
	defer frontend.Close()
	err = frontend.Bind("tcp://*:5559")
	if err != nil {
		log.Fatalln("Binding frontend:", err)
	}

	//  Socket facing services
	backend, _ := zmq.NewSocket(zmq.DEALER)
	defer backend.Close()
	err = backend.Bind("tcp://*:5560")
	if err != nil {
		log.Fatalln("Binding backend:", err)
	}

	//  Start the proxy
	err = zmq.Proxy(frontend, backend, nil)
	log.Fatalln("Proxy interrupted:", err)
}
Пример #17
0
/*
	InitZMQTransport creates ZeroMQ transport.

	It multiplexes incoming connections which are then processed in separate go routines (workers).
	Multiplexer spawns go routines as needed, but 10 worker routines are created on startup.
	Every request times out after provided timeout duration. ZMQ pattern is:
		zmq.ROUTER(incoming) -> proxy -> zmq.DEALER -> [zmq.REP(worker), zmq.REP...]
*/
func InitZMQTransport(hostname string, timeout time.Duration, logger *log.Logger) (Transport, error) {
	// use default logger if one is not provided
	if logger == nil {
		logger = log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile)
	}
	// initialize ZMQ Context
	context, err := zmq.NewContext()
	if err != nil {
		return nil, err
	}

	// setup router and bind() to tcp address for clients to connect to
	router_sock, err := context.NewSocket(zmq.ROUTER)
	if err != nil {
		return nil, err
	}
	err = router_sock.Bind("tcp://" + hostname)
	if err != nil {
		return nil, err
	}

	// setup dealer
	dealer_sock, err := context.NewSocket(zmq.DEALER)
	if err != nil {
		return nil, err
	}
	err = dealer_sock.Bind("inproc://dendrite-zmqdealer")
	if err != nil {
		return nil, err
	}
	poller := zmq.NewPoller()
	poller.Add(router_sock, zmq.POLLIN)
	poller.Add(dealer_sock, zmq.POLLIN)

	transport := &ZMQTransport{
		lock:              new(sync.Mutex),
		clientTimeout:     timeout,
		ClientTimeout:     timeout,
		minHandlers:       10,
		maxHandlers:       1024,
		incrHandlers:      10,
		activeRequests:    0,
		workerIdleTimeout: 10 * time.Second,
		table:             make(map[string]*localHandler),
		control_c:         make(chan *workerComm),
		dealer_sock:       dealer_sock,
		router_sock:       router_sock,
		zmq_context:       context,
		ZMQContext:        context,
		hooks:             make([]TransportHook, 0),
		Logger:            logger,
	}

	go zmq.Proxy(router_sock, dealer_sock, nil)
	// Scheduler goroutine keeps track of running workers
	// It spawns new ones if needed, and cancels ones that are idling
	go func() {
		sched_ticker := time.NewTicker(60 * time.Second)
		workers := make(map[*workerComm]bool)
		// fire up initial set of workers
		for i := 0; i < transport.minHandlers; i++ {
			go transport.zmq_worker()
		}
		for {
			select {
			case comm := <-transport.control_c:
				// worker sent something...
				msg := <-comm.worker_out
				switch {
				case msg == workerRegisterReq:
					if len(workers) == transport.maxHandlers {
						comm.worker_in <- workerRegisterDenied
						logger.Println("[DENDRITE][INFO]: TransportListener - max number of workers reached")
						continue
					}
					if _, ok := workers[comm]; ok {
						// worker already registered
						continue
					}
					comm.worker_in <- workerRegisterAllowed
					workers[comm] = true
					logger.Println("[DENDRITE][INFO]: TransportListener - registered new worker, total:", len(workers))
				case msg == workerShutdownReq:
					//logger.Println("Got shutdown req")
					if len(workers) > transport.minHandlers {
						comm.worker_in <- workerShutdownAllowed
						for _ = range comm.worker_out {
							// wait until worker closes the channel
						}
						delete(workers, comm)
					} else {
						comm.worker_in <- workerShutdownDenied
					}
				}
			case <-sched_ticker.C:
				// check if requests are piling up and start more workers if that's the case
				if transport.activeRequests > 3*len(workers) {
					for i := 0; i < transport.incrHandlers; i++ {
						go transport.zmq_worker()
					}
				}
			}
		}
	}()
	return transport, nil
}
Пример #18
0
// connectToStore connects to the Store service using the following two endpoints:
//  * API: push notifications from master to store
//  * subscription service: notifications from store to nodes
func connectToStore(apiAddr string, subscriptionAddr string) (*store, error) {
	ctx, err := zmq.NewContext()
	if err != nil {
		return nil, err
	}

	// frontend subscribes to all topics on the Store subscriptions service
	frontend, err := ctx.NewSocket(zmq.SUB)
	if err != nil {
		return nil, err
	}
	err = frontend.Connect(fmt.Sprintf("tcp://%s", subscriptionAddr))
	if err != nil {
		return nil, err
	}

	// backend is a local publisher that will help distribute topics locally
	backend, err := ctx.NewSocket(zmq.PUB)
	if err != nil {
		return nil, err
	}
	err = backend.Bind(updateEndpoint)
	if err != nil {
		return nil, err
	}

	if err = zmq.Proxy(frontend, backend, nil); err != nil {
		return nil, err
	}

	mux, err := router.New(updateEndpoint)
	if err != nil {
		return nil, err
	}

	conn, err := grpc.Dial(apiAddr)
	if err != nil {
		return nil, err
	}
	client := storepb.NewStoreServiceClient(conn)

	s := &store{
		conn:      conn,
		client:    client,
		schedules: make(chan []byte),
		jobs:      make(chan []byte),
		mux:       mux,
		done:      make(chan struct{}),
	}

	if err = mux.Add(service.ScheduleUpdateKey, s.schedules); err != nil {
		return nil, err
	}
	if err = mux.Add(service.JobUpdateKey, s.jobs); err != nil {
		return nil, err
	}

	go s.loop()
	go mux.Run()
	return s, nil
}
Пример #19
0
// Start registers a zmq endpoint at passed address answering requests for registered services
func (server *Server) Start(addr string) {
	// Don't use the global context to avoid package level confusion
	ctx, err := zmq.NewContext()
	if err != nil {
		glog.Fatal(err)
	}
	// A router socket handles the actual connection
	sock, _ := ctx.NewSocket(zmq.ROUTER)
	server.conn = sock

	// If no prefix is passed, default to tcp
	if !strings.HasPrefix(addr, "tcp://") {
		addr = "tcp://" + addr
	}
	server.conn.Bind(addr)
	glog.Info("Server listening on ", addr)

	// Socket monitor
	monitorURL := "inproc://monitor"
	if err := server.conn.Monitor(monitorURL, zmq.EVENT_ACCEPTED|zmq.EVENT_DISCONNECTED); err != nil {
		glog.Fatal(err)
	}
	go server.monitor(ctx, monitorURL)

	// A dealer socket multiplexes requests to workers
	mux, _ := ctx.NewSocket(zmq.DEALER)
	defer mux.Close()
	mux.Bind("inproc://mux")

	// Start backing worker processes
	for i := 0; i < server.numWorkers; i++ {
		go func(i int) {
			worker, _ := ctx.NewSocket(zmq.REP)
			defer worker.Close()
			worker.Connect("inproc://mux")
			glog.V(2).Infof("Started worker #%d", i)

			for {
				if server.closing {
					glog.Warning(ErrShutdown)
					break
				}
				reqBytes, err := worker.RecvBytes(0)
				if err != nil {
					switch zmq.AsErrno(err) {
					// If was interrupted there is no need to log as an error
					case zmq.Errno(zmq.ETERM):
						glog.Info(err)
					default:
						// Error receiving is usually fatal
						glog.Error(err)
					}
					break
				}

				// Decode the request envelope
				req := &Request{}
				if err := proto.Unmarshal(reqBytes, req); err != nil {
					glog.Error(err)
					sendError(worker, nil, err)
					continue
				}

				// Make sure it's not expired on arrival
				if req.Expires != nil {
					if time.Unix(*req.Expires, 0).Before(time.Now()) {
						glog.Infof("discarding expired message: '%s'", req.UUID)
						sendError(worker, req, NewExpiredError("message expired on arrival"))
						continue
					}
				}

				serviceName := path.Dir(strings.TrimPrefix(req.GetPath(), "zrpc://"))
				methodName := path.Base(req.GetPath())

				// Make sure a handler for this request exists
				server.mu.RLock()
				service, ok := server.serviceMap[serviceName]
				server.mu.RUnlock()
				if !ok {
					err := fmt.Sprintf("service '%s' is not served", serviceName)
					if serviceName == "." {
						err = "no service name passed"
					}
					glog.Warning(err)
					sendError(worker, req, errors.New(err))
					continue
				}

				// Make sure the message is registered for this server
				if mType, ok := service.method[methodName]; ok {
					// Decode the incoming request message
					var argv reflect.Value
					argIsValue := false // if true, need to indirect before calling.
					if mType.ArgType.Kind() == reflect.Ptr {
						argv = reflect.New(mType.ArgType.Elem())
					} else {
						argv = reflect.New(mType.ArgType)
						argIsValue = true
					}

					// argv guaranteed to be a pointer now.
					if err := proto.Unmarshal(req.Payload, argv.Interface().(proto.Message)); err != nil {
						glog.Error(err)
						sendError(worker, req, err)
						continue
					}

					if argIsValue {
						argv = reflect.Indirect(argv)
					}

					glog.V(3).Infof("Received '%s' (%s)", argv.Type().Elem(), req.UUID)

					// Invoke the method, providing a new value for the reply (if expected)
					var (
						returnValues []reflect.Value
						replyv       reflect.Value
					)
					if mType.ReplyType != nil {
						replyv = reflect.New(mType.ReplyType.Elem())
						returnValues = mType.method.Func.Call([]reflect.Value{service.rcvr, argv, replyv})
					} else {
						returnValues = mType.method.Func.Call([]reflect.Value{service.rcvr, argv})
					}
					// The return value for the method is an error.
					errInter := returnValues[0].Interface()
					if errInter != nil {
						err := errInter.(error)
						sendError(worker, req, err)
						continue
					}

					// Envelope the response message
					envelope := &Response{
						Path: req.Path,
						UUID: req.UUID,
					}

					// Marshal the response message (if exists)
					if mType.ReplyType != nil {
						replyBytes, err := proto.Marshal(replyv.Interface().(proto.Message))
						if err != nil {
							glog.Error(err)
							sendError(worker, req, err)
							continue
						}
						envelope.Payload = replyBytes
					}

					// Marshal the envelope
					envBytes, err := proto.Marshal(envelope)
					if err != nil {
						glog.Error(err)
						sendError(worker, req, err)
						continue
					}

					// Send the response
					if _, err := worker.SendBytes(envBytes, 0); err != nil {
						// Since we could not send, we could not send an error either, just log
						glog.Error(err)
					}
					if mType.ReplyType != nil {
						glog.V(3).Infof("Replied '%s' (%s)", mType.ReplyType.Elem(), envelope.UUID)
					} else {
						glog.V(3).Infof("Replied nil (%s)", envelope.UUID)
					}
				} else {
					// If reached here, the message was not handled by the server
					glog.V(1).Infof("message '%s' is not handled by this service", methodName)
					sendError(worker, req, fmt.Errorf("message '%s' is not handled by this service", methodName))
				}
			}

			glog.Infof("Closing worker #%d", i)
		}(i + 1)
	}

	// This is blocking so we put it last
	if err := zmq.Proxy(sock, mux, nil); err != nil {
		switch zmq.AsErrno(err) {
		// If was interrupted there is no need to log as an error
		case zmq.Errno(syscall.EINTR):
			glog.Info(err)
		case zmq.Errno(zmq.ETERM):
			glog.Info(err)
		default:
			glog.Error(err)
		}
	}

	// Since it was blocking we could safely close the server if reached here
	server.Close()
}
Пример #20
0
func NewSockets(cinfo ConnectionFile) (Sockets, error) {
	context, err := zmq4.NewContext()
	if err != nil {
		return Sockets{}, err
	}
	s := Sockets{
		context: context,
		Key:     []byte(cinfo.Key)}

	address := func(port int) string { return fmt.Sprintf("%s://%s:%d", cinfo.Transport, cinfo.Ip, port) }

	// setup heartbeat
	heartBeat, err := context.NewSocket(zmq4.REP)
	if err != nil {
		log.Println("Error in setting up heart beat")
		log.Fatal(err)
	}
	err = heartBeat.Bind(address(cinfo.HbPort))
	if err != nil {
		log.Println("Error in setting up heart beat")
		log.Fatal(err)
	}

	// setup sockets
	s.ShellSocket, err = context.NewSocket(zmq4.ROUTER)
	if err != nil {
		return s, err
	}
	s.ControlSocket, err = context.NewSocket(zmq4.ROUTER)
	if err != nil {
		return s, err
	}
	s.StdinSocket, err = context.NewSocket(zmq4.ROUTER)
	if err != nil {
		return s, err
	}
	s.IOPubSocket, err = context.NewSocket(zmq4.ROUTER)
	if err != nil {
		return s, err
	}
	err = s.ShellSocket.Bind(address(cinfo.ShellPort))
	if err != nil {
		return s, err
	}
	err = s.ControlSocket.Bind(address(cinfo.ControlPort))
	if err != nil {
		return s, err
	}
	err = s.StdinSocket.Bind(address(cinfo.StdinPort))
	if err != nil {
		return s, err
	}
	err = s.IOPubSocket.Bind(address(cinfo.IOpubPort))
	if err != nil {
		return s, err
	}

	go func(heartBeat *zmq4.Socket) {
		err = zmq4.Proxy(heartBeat, heartBeat, nil)
		if err != nil {
			log.Fatal(err)
		}
	}(heartBeat)

	return s, nil
}