// TODO: connectAsync() func (ro *Route) connect() error { var netconn net.Conn var err error for i := 0; i < routeConnectRetries; i++ { netconn, err = net.Dial("tcp", ro.addr) if err == nil { break } log.Debug(err) waitBackoff(i) } if err != nil { // TODO: Cancel all queue if we give up reconnecting return err } log.Debug("Connected to", netconn.RemoteAddr()) ro.conn = newConn(netconn) ro.closed = make(chan struct{}) ro.exited = make(chan struct{}) go ro.send() go ro.recv() return nil }
func test(n int, dom *goa.Domain) { bind, err := dom.Bind("com.bar") if err != nil { panic(err) } payld := []byte("ping" + strconv.Itoa(n)) rsp2 := "pong" + string(payld) for { req := bind.NewRequest(payld) // XXX req.SetTimeout(time.Second) if err := req.Send(); err != nil { panic(err) } log.Debug("SEND ", string(payld)) rsp, err := req.Recv() if err != nil { panic(err) } if rsp2 != string(rsp) { panic("bad request") } log.Debug("RECV ", string(rsp)) atomic.AddUint64(&cntr, 1) } }
func (d *Domain) Announce(service string, handler func([]byte, uint64) ([]byte, uint64)) error { lis, err := net.Listen("tcp", ":8000") if err != nil { return err } // // TODO: Optimize with batching like the client side, with two goroutines // The define callback can do whatever the user wants, to send the // request to a worker pool through a channel o handle it directly on the // goroutine that receive the messages. // go func() { for { netconn, err := lis.Accept() if err != nil { log.Debug(err) } log.Debug("Connection accepted from", netconn.RemoteAddr()) conn := newConn(netconn) go func() { reqs := make([]*Request, 0, maxBatchLen) for { reqs = reqs[:0:cap(reqs)] for i := 0; i < maxBatchLen; i++ { payld, seq, err := conn.recv() if err != nil { log.Debug(err) Exit() } log.Debug("RECV(", seq, ") ", string(payld)) payld, seq = handler(payld, seq) req := newRequest(payld, seq) reqs = append(reqs, req) } if err := conn.sendBatch(reqs); err != nil { log.Debug(err) } } }() } }() return nil }