func setupConn(t *testing.T, ctx context.Context, a1, a2 string) (a, b Conn) { p1, err := setupPeer(a1) if err != nil { t.Fatal("error setting up peer", err) } p2, err := setupPeer(a2) if err != nil { t.Fatal("error setting up peer", err) } laddr := p1.NetAddress("tcp") if laddr == nil { t.Fatal("Listen address is nil.") } ps1 := peer.NewPeerstore() ps2 := peer.NewPeerstore() ps1.Add(p1) ps2.Add(p2) l1, err := Listen(ctx, laddr, p1, ps1) if err != nil { t.Fatal(err) } d2 := &Dialer{ Peerstore: ps2, LocalPeer: p2, } c2, err := d2.Dial(ctx, "tcp", p1) if err != nil { t.Fatal("error dialing peer", err) } c1 := <-l1.Accept() return c1, c2 }
func setupSecureConn(t *testing.T, c Conn) Conn { c, ok := c.(*secureConn) if ok { return c } // shouldn't happen, because dial + listen already return secure conns. s, err := newSecureConn(c.Context(), c, peer.NewPeerstore()) if err != nil { t.Fatal(err) } return s }
func runEncryptBenchmark(b *testing.B) { pstore := peer.NewPeerstore() ctx := context.TODO() bufsize := 1024 * 1024 pa := getPeer(b) pb := getPeer(b) duplexa := pipes.NewDuplex(16) duplexb := pipes.NewDuplex(16) go bindDuplexNoCopy(duplexa, duplexb) var spb *SecurePipe done := make(chan struct{}) go func() { var err error spb, err = NewSecurePipe(ctx, bufsize, pb, pstore, duplexb) if err != nil { b.Fatal(err) } done <- struct{}{} }() spa, err := NewSecurePipe(ctx, bufsize, pa, pstore, duplexa) if err != nil { b.Fatal(err) } <-done go func() { for _ = range spa.In { // Throw it all away, // all of your hopes and dreams // piped out to /dev/null... done <- struct{}{} } }() data := make([]byte, 1024*512) util.NewTimeSeededRand().Read(data) // Begin actual benchmarking b.ResetTimer() for i := 0; i < b.N; i++ { b.SetBytes(int64(len(data))) spb.Out <- data <-done } }
func TestPeerIsLocal(t *testing.T) { t.Log("Ensure that peer is Local after initializing identity") online := false peers := peer.NewPeerstore() cfg := testIdentity p, err := initIdentity(&cfg, peers, online) if err != nil { t.Fatal(err) } if p.GetType() != peer.Local { t.Fail() } }
// NewMockNode constructs an IpfsNode for use in tests. func NewMockNode() (*IpfsNode, error) { nd := new(IpfsNode) // Generate Identity sk, pk, err := ci.GenerateKeyPair(ci.RSA, 1024) if err != nil { return nil, err } p, err := peer.WithKeyPair(sk, pk) if err != nil { return nil, err } nd.Peerstore = peer.NewPeerstore() nd.Identity, err = nd.Peerstore.Add(p) if err != nil { return nil, err } // Temp Datastore dstore := ds.NewMapDatastore() nd.Datastore = util.CloserWrap(syncds.MutexWrap(dstore)) // Routing dht := mdht.NewMockRouter(nd.Identity, nd.Datastore) nd.Routing = dht // Bitswap //?? bserv, err := bs.NewBlockService(nd.Datastore, nil) if err != nil { return nil, err } nd.DAG = mdag.NewDAGService(bserv) // Namespace resolver nd.Namesys = nsys.NewNameSystem(dht) // Path resolver nd.Resolver = &path.Resolver{DAG: nd.DAG} return nd, nil }
func setupDHT(ctx context.Context, t *testing.T, p peer.Peer) *IpfsDHT { peerstore := peer.NewPeerstore() dhts := netservice.NewService(ctx, nil) // nil handler for now, need to patch it net, err := inet.NewIpfsNetwork(ctx, p.Addresses(), p, peerstore, &mux.ProtocolMap{ mux.ProtocolID_Routing: dhts, }) if err != nil { t.Fatal(err) } d := NewDHT(ctx, p, peerstore, net, dhts, ds.NewMapDatastore()) dhts.SetHandler(d) d.Validators["v"] = func(u.Key, []byte) error { return nil } return d }
func makeSwarms(ctx context.Context, t *testing.T, addrs []string) ([]*Swarm, []peer.Peer) { swarms := []*Swarm{} for _, addr := range addrs { local := setupPeer(t, addr) peerstore := peer.NewPeerstore() swarm, err := NewSwarm(ctx, local.Addresses(), local, peerstore) if err != nil { t.Fatal(err) } swarms = append(swarms, swarm) } peers := make([]peer.Peer, len(swarms)) for i, s := range swarms { peers[i] = s.local } return swarms, peers }
// If less than K nodes are in the entire network, it should fail when we make // a GET rpc and nobody has the value func TestLessThanKResponses(t *testing.T) { // t.Skip("skipping test because it makes a lot of output") ctx := context.Background() u.Debug = false fn := &fauxNet{} fs := &fauxSender{} local := makePeer(nil) peerstore := peer.NewPeerstore() peerstore.Add(local) d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore()) var ps []peer.Peer for i := 0; i < 5; i++ { ps = append(ps, _randPeer()) d.Update(ctx, ps[i]) } other := _randPeer() // Reply with random peers to every message fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage { pmes := new(pb.Message) err := proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: resp := &pb.Message{ Type: pmes.Type, CloserPeers: pb.PeersToPBPeers([]peer.Peer{other}), } mes, err := msg.FromObject(mes.Peer(), resp) if err != nil { t.Error(err) } return mes default: panic("Shouldnt recieve this.") } }) ctx, _ = context.WithTimeout(ctx, time.Second*30) _, err := d.GetValue(ctx, u.Key("hello")) if err != nil { switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func TestNotFound(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() fn := &fauxNet{} fs := &fauxSender{} local := makePeer(nil) peerstore := peer.NewPeerstore() peerstore.Add(local) d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore()) var ps []peer.Peer for i := 0; i < 5; i++ { ps = append(ps, _randPeer()) d.Update(ctx, ps[i]) } // Reply with random peers to every message fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage { pmes := new(pb.Message) err := proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: resp := &pb.Message{Type: pmes.Type} peers := []peer.Peer{} for i := 0; i < 7; i++ { peers = append(peers, _randPeer()) } resp.CloserPeers = pb.PeersToPBPeers(peers) mes, err := msg.FromObject(mes.Peer(), resp) if err != nil { t.Error(err) } return mes default: panic("Shouldnt recieve this.") } }) ctx, _ = context.WithTimeout(ctx, time.Second*5) v, err := d.GetValue(ctx, u.Key("hello")) log.Debugf("get value got %v", v) if err != nil { switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func TestGetFailures(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() fn := &fauxNet{} fs := &fauxSender{} peerstore := peer.NewPeerstore() local := makePeer(nil) d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore()) other := makePeer(nil) d.Update(ctx, other) // This one should time out // u.POut("Timout Test\n") ctx1, _ := context.WithTimeout(context.Background(), time.Second) _, err := d.GetValue(ctx1, u.Key("test")) if err != nil { if err != context.DeadlineExceeded { t.Fatal("Got different error than we expected", err) } } else { t.Fatal("Did not get expected error!") } // u.POut("NotFound Test\n") // Reply with failures to every message fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage { pmes := new(pb.Message) err := proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } resp := &pb.Message{ Type: pmes.Type, } m, err := msg.FromObject(mes.Peer(), resp) return m }) // This one should fail with NotFound ctx2, _ := context.WithTimeout(context.Background(), time.Second) _, err = d.GetValue(ctx2, u.Key("test")) if err != nil { if err != routing.ErrNotFound { t.Fatalf("Expected ErrNotFound, got: %s", err) } } else { t.Fatal("expected error, got none.") } fs.handlers = nil // Now we test this DHT's handleGetValue failure typ := pb.Message_GET_VALUE str := "hello" rec, err := d.makePutRecord(u.Key(str), []byte("blah")) if err != nil { t.Fatal(err) } req := pb.Message{ Type: &typ, Key: &str, Record: rec, } // u.POut("handleGetValue Test\n") mes, err := msg.FromObject(other, &req) if err != nil { t.Error(err) } mes = d.HandleMessage(ctx, mes) pmes := new(pb.Message) err = proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } if pmes.GetRecord() != nil { t.Fatal("shouldnt have value") } if pmes.GetProviderPeers() != nil { t.Fatal("shouldnt have provider peers") } }
// NewIpfsNode constructs a new IpfsNode based on the given config. func NewIpfsNode(cfg *config.Config, online bool) (n *IpfsNode, err error) { success := false // flip to true after all sub-system inits succeed defer func() { if !success && n != nil { n.Close() } }() if cfg == nil { return nil, debugerror.Errorf("configuration required") } // derive this from a higher context. ctx := context.TODO() n = &IpfsNode{ onlineMode: online, Config: cfg, } n.ContextCloser = ctxc.NewContextCloser(ctx, n.teardown) // setup datastore. if n.Datastore, err = makeDatastore(cfg.Datastore); err != nil { return nil, debugerror.Wrap(err) } // setup peerstore + local peer identity n.Peerstore = peer.NewPeerstore() n.Identity, err = initIdentity(&n.Config.Identity, n.Peerstore, online) if err != nil { return nil, debugerror.Wrap(err) } // setup online services if online { dhtService := netservice.NewService(ctx, nil) // nil handler for now, need to patch it exchangeService := netservice.NewService(ctx, nil) // nil handler for now, need to patch it diagService := netservice.NewService(ctx, nil) // nil handler for now, need to patch it muxMap := &mux.ProtocolMap{ mux.ProtocolID_Routing: dhtService, mux.ProtocolID_Exchange: exchangeService, mux.ProtocolID_Diagnostic: diagService, // add protocol services here. } // setup the network listenAddrs, err := listenAddresses(cfg) if err != nil { return nil, debugerror.Wrap(err) } n.Network, err = inet.NewIpfsNetwork(ctx, listenAddrs, n.Identity, n.Peerstore, muxMap) if err != nil { return nil, debugerror.Wrap(err) } n.AddCloserChild(n.Network) // setup diagnostics service n.Diagnostics = diag.NewDiagnostics(n.Identity, n.Network, diagService) diagService.SetHandler(n.Diagnostics) // setup routing service dhtRouting := dht.NewDHT(ctx, n.Identity, n.Peerstore, n.Network, dhtService, n.Datastore) dhtRouting.Validators[IpnsValidatorTag] = namesys.ValidateIpnsRecord // TODO(brian): perform this inside NewDHT factory method dhtService.SetHandler(dhtRouting) // wire the handler to the service. n.Routing = dhtRouting n.AddCloserChild(dhtRouting) // setup exchange service const alwaysSendToPeer = true // use YesManStrategy bitswapNetwork := bsnet.NewFromIpfsNetwork(exchangeService, n.Network) n.Exchange = bitswap.New(ctx, n.Identity, bitswapNetwork, n.Routing, n.Datastore, alwaysSendToPeer) go initConnections(ctx, n.Config, n.Peerstore, dhtRouting) } // TODO(brian): when offline instantiate the BlockService with a bitswap // session that simply doesn't return blocks n.Blocks, err = bserv.NewBlockService(n.Datastore, n.Exchange) if err != nil { return nil, debugerror.Wrap(err) } n.DAG = merkledag.NewDAGService(n.Blocks) n.Namesys = namesys.NewNameSystem(n.Routing) n.Pinning, err = pin.LoadPinner(n.Datastore, n.DAG) if err != nil { n.Pinning = pin.NewPinner(n.Datastore, n.DAG) } n.Resolver = &path.Resolver{DAG: n.DAG} success = true return n, nil }
func TestDialer(t *testing.T) { // t.Skip("Skipping in favor of another test") p1, err := setupPeer("/ip4/127.0.0.1/tcp/4234") if err != nil { t.Fatal("error setting up peer", err) } p2, err := setupPeer("/ip4/127.0.0.1/tcp/4235") if err != nil { t.Fatal("error setting up peer", err) } ctx, cancel := context.WithCancel(context.Background()) laddr := p1.NetAddress("tcp") if laddr == nil { t.Fatal("Listen address is nil.") } ps1 := peer.NewPeerstore() ps2 := peer.NewPeerstore() ps1.Add(p1) ps2.Add(p2) l, err := Listen(ctx, laddr, p1, ps1) if err != nil { t.Fatal(err) } go echoListen(ctx, l) d := &Dialer{ Peerstore: ps2, LocalPeer: p2, } c, err := d.Dial(ctx, "tcp", p1) if err != nil { t.Fatal("error dialing peer", err) } // fmt.Println("sending") c.Out() <- []byte("beep") c.Out() <- []byte("boop") out := <-c.In() // fmt.Println("recving", string(out)) data := string(out) if data != "beep" { t.Error("unexpected conn output", data) } out = <-c.In() data = string(out) if string(out) != "boop" { t.Error("unexpected conn output", data) } // fmt.Println("closing") c.Close() l.Close() cancel() }
func setupMultiConns(t *testing.T, ctx context.Context) (a, b *MultiConn) { log.Info("Setting up peers") p1, err := setupPeer(tcpAddrString(11000)) if err != nil { t.Fatal("error setting up peer", err) } p2, err := setupPeer(tcpAddrString(12000)) if err != nil { t.Fatal("error setting up peer", err) } // peerstores p1ps := peer.NewPeerstore() p2ps := peer.NewPeerstore() p1ps.Add(p1) p2ps.Add(p2) // listeners listen := func(addr ma.Multiaddr, p peer.Peer, ps peer.Peerstore) Listener { l, err := Listen(ctx, addr, p, ps) if err != nil { t.Fatal(err) } return l } log.Info("Setting up listeners") p1l := listen(p1.Addresses()[0], p1, p1ps) p2l := listen(p2.Addresses()[0], p2, p2ps) // dialers p1d := &Dialer{Peerstore: p1ps, LocalPeer: p1} p2d := &Dialer{Peerstore: p2ps, LocalPeer: p2} dial := func(d *Dialer, dst peer.Peer) <-chan Conn { cc := make(chan Conn) go func() { c, err := d.Dial(ctx, "tcp", dst) if err != nil { t.Fatal("error dialing peer", err) } cc <- c }() return cc } // connect simultaneously log.Info("Connecting...") p1dc := dial(p1d, p2) p2dc := dial(p2d, p1) c12a := <-p1l.Accept() c12b := <-p1dc c21a := <-p2l.Accept() c21b := <-p2dc log.Info("Ok, making multiconns") c1, err := NewMultiConn(ctx, p1, p2, []Conn{c12a, c12b}) if err != nil { t.Fatal(err) } c2, err := NewMultiConn(ctx, p2, p1, []Conn{c21a, c21b}) if err != nil { t.Fatal(err) } p1l.Close() p2l.Close() log.Info("did you make multiconns?") return c1, c2 }