func TestStopping(t *testing.T) { ctx := context.Background() // setup p1 := &TestProtocol{Pipe: msg.NewPipe(10)} p2 := &TestProtocol{Pipe: msg.NewPipe(10)} pid1 := pb.ProtocolID_Test pid2 := pb.ProtocolID_Identify mux1 := NewMuxer(ctx, ProtocolMap{ pid1: p1, pid2: p2, }) peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa") // peer2 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275bbbbbb") // test outgoing p1 for _, s := range []string{"foo1", "bar1", "baz1"} { p1.Outgoing <- msg.New(peer1, []byte(s)) testWrappedMsg(t, <-mux1.Outgoing, pid1, []byte(s)) } // test incoming p1 for _, s := range []string{"foo2", "bar2", "baz2"} { d, err := wrapData([]byte(s), pid1) if err != nil { t.Error(err) } mux1.Incoming <- msg.New(peer1, d) testMsg(t, <-p1.Incoming, []byte(s)) } mux1.Close() // waits // test outgoing p1 for _, s := range []string{"foo3", "bar3", "baz3"} { p1.Outgoing <- msg.New(peer1, []byte(s)) select { case m := <-mux1.Outgoing: t.Errorf("should not have received anything. Got: %v", string(m.Data())) case <-time.After(time.Millisecond): } } // test incoming p1 for _, s := range []string{"foo4", "bar4", "baz4"} { d, err := wrapData([]byte(s), pid1) if err != nil { t.Error(err) } mux1.Incoming <- msg.New(peer1, d) select { case <-p1.Incoming: t.Error("should not have received anything.") case <-time.After(time.Millisecond): } } }
func TestSimpleMuxer(t *testing.T) { ctx := context.Background() // setup p1 := &TestProtocol{Pipe: msg.NewPipe(10)} p2 := &TestProtocol{Pipe: msg.NewPipe(10)} pid1 := pb.ProtocolID_Test pid2 := pb.ProtocolID_Routing mux1 := NewMuxer(ctx, ProtocolMap{ pid1: p1, pid2: p2, }) peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa") // peer2 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275bbbbbb") // test outgoing p1 for _, s := range []string{"foo", "bar", "baz"} { p1.Outgoing <- msg.New(peer1, []byte(s)) testWrappedMsg(t, <-mux1.Outgoing, pid1, []byte(s)) } // test incoming p1 for _, s := range []string{"foo", "bar", "baz"} { d, err := wrapData([]byte(s), pid1) if err != nil { t.Error(err) } mux1.Incoming <- msg.New(peer1, d) testMsg(t, <-p1.Incoming, []byte(s)) } // test outgoing p2 for _, s := range []string{"foo", "bar", "baz"} { p2.Outgoing <- msg.New(peer1, []byte(s)) testWrappedMsg(t, <-mux1.Outgoing, pid2, []byte(s)) } // test incoming p2 for _, s := range []string{"foo", "bar", "baz"} { d, err := wrapData([]byte(s), pid2) if err != nil { t.Error(err) } mux1.Incoming <- msg.New(peer1, d) testMsg(t, <-p2.Incoming, []byte(s)) } }
// NewService creates a service object with given type ID and Handler func NewService(ctx context.Context, h Handler) Service { s := &service{ Handler: h, Requests: RequestMap{}, Pipe: msg.NewPipe(10), ContextCloser: ctxc.NewContextCloser(ctx, nil), } s.Children().Add(1) go s.handleIncomingMessages() return s }
// NewSwarm constructs a Swarm, with a Chan. func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr, local peer.Peer, ps peer.Peerstore) (*Swarm, error) { s := &Swarm{ Pipe: msg.NewPipe(10), conns: conn.MultiConnMap{}, local: local, peers: ps, errChan: make(chan error, 100), } // ContextCloser for proper child management. s.ContextCloser = ctxc.NewContextCloser(ctx, s.close) s.Children().Add(1) go s.fanOut() return s, s.listen(listenAddrs) }
// NewMuxer constructs a muxer given a protocol map. func NewMuxer(ctx context.Context, mp ProtocolMap) *Muxer { m := &Muxer{ Protocols: mp, Pipe: msg.NewPipe(10), ContextCloser: ctxc.NewContextCloser(ctx, nil), } m.Children().Add(1) go m.handleIncomingMessages() for pid, proto := range m.Protocols { m.Children().Add(1) go m.handleOutgoingMessages(pid, proto) } return m }
func TestSimultMuxer(t *testing.T) { if testing.Short() { t.SkipNow() } // run muxer ctx, cancel := context.WithCancel(context.Background()) // setup p1 := &TestProtocol{Pipe: msg.NewPipe(10)} p2 := &TestProtocol{Pipe: msg.NewPipe(10)} pid1 := pb.ProtocolID_Test pid2 := pb.ProtocolID_Identify mux1 := NewMuxer(ctx, ProtocolMap{ pid1: p1, pid2: p2, }) peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa") // peer2 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275bbbbbb") // counts total := 10000 speed := time.Microsecond * 1 counts := [2][2][2]int{} var countsLock sync.Mutex // run producers at every end sending incrementing messages produceOut := func(pid pb.ProtocolID, size int) { limiter := time.Tick(speed) for i := 0; i < size; i++ { <-limiter s := fmt.Sprintf("proto %v out %v", pid, i) m := msg.New(peer1, []byte(s)) mux1.Protocols[pid].GetPipe().Outgoing <- m countsLock.Lock() counts[pid][0][0]++ countsLock.Unlock() // log.Debug("sent %v", s) } } produceIn := func(pid pb.ProtocolID, size int) { limiter := time.Tick(speed) for i := 0; i < size; i++ { <-limiter s := fmt.Sprintf("proto %v in %v", pid, i) d, err := wrapData([]byte(s), pid) if err != nil { t.Error(err) } m := msg.New(peer1, d) mux1.Incoming <- m countsLock.Lock() counts[pid][1][0]++ countsLock.Unlock() // log.Debug("sent %v", s) } } consumeOut := func() { for { select { case m := <-mux1.Outgoing: data, pid, err := unwrapData(m.Data()) if err != nil { t.Error(err) } // log.Debug("got %v", string(data)) _ = data countsLock.Lock() counts[pid][1][1]++ countsLock.Unlock() case <-ctx.Done(): return } } } consumeIn := func(pid pb.ProtocolID) { for { select { case m := <-mux1.Protocols[pid].GetPipe().Incoming: countsLock.Lock() counts[pid][0][1]++ countsLock.Unlock() // log.Debug("got %v", string(m.Data())) _ = m case <-ctx.Done(): return } } } go produceOut(pid1, total) go produceOut(pid2, total) go produceIn(pid1, total) go produceIn(pid2, total) go consumeOut() go consumeIn(pid1) go consumeIn(pid2) limiter := time.Tick(speed) for { <-limiter countsLock.Lock() got := counts[0][0][0] + counts[0][0][1] + counts[0][1][0] + counts[0][1][1] + counts[1][0][0] + counts[1][0][1] + counts[1][1][0] + counts[1][1][1] countsLock.Unlock() if got == total*8 { cancel() return } } }