// NewMockNode constructs an IpfsNode for use in tests. func NewMockNode() (*core.IpfsNode, error) { ctx := context.Background() // effectively offline, only peer in its network return core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: MockHostOption(mocknet.New(ctx)), }) }
func InitializeSupernodeNetwork( ctx context.Context, numServers, numClients int, conf testutil.LatencyConfig) ([]*core.IpfsNode, []*core.IpfsNode, error) { // create network mn := mocknet.New(ctx) mn.SetLinkDefaults(mocknet.LinkOptions{ Latency: conf.NetworkLatency, Bandwidth: math.MaxInt32, }) routingDatastore := ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())) var servers []*core.IpfsNode for i := 0; i < numServers; i++ { bootstrap, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), Routing: corerouting.SupernodeServer(routingDatastore), }) if err != nil { return nil, nil, err } servers = append(servers, bootstrap) } var bootstrapInfos []peer.PeerInfo for _, n := range servers { info := n.Peerstore.PeerInfo(n.PeerHost.ID()) bootstrapInfos = append(bootstrapInfos, info) } var clients []*core.IpfsNode for i := 0; i < numClients; i++ { n, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), Routing: corerouting.SupernodeClient(bootstrapInfos...), }) if err != nil { return nil, nil, err } clients = append(clients, n) } mn.LinkAll() bcfg := core.BootstrapConfigWithPeers(bootstrapInfos) for _, n := range clients { if err := n.Bootstrap(bcfg); err != nil { return nil, nil, err } } return servers, clients, nil }
func TestBitswapWithoutRouting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() const numPeers = 4 // create network mn := mocknet.New(ctx) var nodes []*core.IpfsNode for i := 0; i < numPeers; i++ { n, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: coremock.MockHostOption(mn), Routing: core.NilRouterOption, // no routing }) if err != nil { t.Fatal(err) } defer n.Close() nodes = append(nodes, n) } mn.LinkAll() // connect them for _, n1 := range nodes { for _, n2 := range nodes { if n1 == n2 { continue } log.Debug("connecting to other hosts") p2 := n2.PeerHost.Peerstore().PeerInfo(n2.PeerHost.ID()) if err := n1.PeerHost.Connect(ctx, p2); err != nil { t.Fatal(err) } } } // add blocks to each before log.Debug("adding block.") block0 := blocks.NewBlock([]byte("block0")) block1 := blocks.NewBlock([]byte("block1")) // put 1 before if err := nodes[0].Blockstore.Put(block0); err != nil { t.Fatal(err) } // get it out. for i, n := range nodes { // skip first because block not in its exchange. will hang. if i == 0 { continue } log.Debugf("%d %s get block.", i, n.Identity) b, err := n.Blocks.GetBlock(ctx, block0.Key()) if err != nil { t.Error(err) } else if !bytes.Equal(b.Data, block0.Data) { t.Error("byte comparison fail") } else { log.Debug("got block: %s", b.Key()) } } // put 1 after if err := nodes[1].Blockstore.Put(block1); err != nil { t.Fatal(err) } // get it out. for _, n := range nodes { b, err := n.Blocks.GetBlock(ctx, block1.Key()) if err != nil { t.Error(err) } else if !bytes.Equal(b.Data, block1.Data) { t.Error("byte comparison fail") } else { log.Debug("got block: %s", b.Key()) } } }
func TestRepublish(t *testing.T) { // set cache life to zero for testing low-period repubs ctx, cancel := context.WithCancel(context.Background()) defer cancel() // create network mn := mocknet.New(ctx) var nodes []*core.IpfsNode for i := 0; i < 10; i++ { nd, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { t.Fatal(err) } nd.Namesys = namesys.NewNameSystem(nd.Routing, nd.Repo.Datastore(), 0) nodes = append(nodes, nd) } mn.LinkAll() bsinf := core.BootstrapConfigWithPeers( []peer.PeerInfo{ nodes[0].Peerstore.PeerInfo(nodes[0].Identity), }, ) for _, n := range nodes[1:] { if err := n.Bootstrap(bsinf); err != nil { t.Fatal(err) } } // have one node publish a record that is valid for 1 second publisher := nodes[3] p := path.FromString("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") // does not need to be valid rp := namesys.NewRoutingPublisher(publisher.Routing, publisher.Repo.Datastore()) err := rp.PublishWithEOL(ctx, publisher.PrivateKey, p, time.Now().Add(time.Second)) if err != nil { t.Fatal(err) } name := "/ipns/" + publisher.Identity.Pretty() if err := verifyResolution(nodes, name, p); err != nil { t.Fatal(err) } // Now wait a second, the records will be invalid and we should fail to resolve time.Sleep(time.Second) if err := verifyResolutionFails(nodes, name); err != nil { t.Fatal(err) } // The republishers that are contained within the nodes have their timeout set // to 12 hours. Instead of trying to tweak those, we're just going to pretend // they dont exist and make our own. repub := NewRepublisher(publisher.Routing, publisher.Repo.Datastore(), publisher.Peerstore) repub.Interval = time.Second repub.RecordLifetime = time.Second * 5 repub.AddName(publisher.Identity) proc := goprocess.Go(repub.Run) defer proc.Close() // now wait a couple seconds for it to fire time.Sleep(time.Second * 2) // we should be able to resolve them now if err := verifyResolution(nodes, name, p); err != nil { t.Fatal(err) } }
func benchCat(b *testing.B, data []byte, conf testutil.LatencyConfig) error { b.StopTimer() ctx, cancel := context.WithCancel(context.Background()) defer cancel() // create network mn := mocknet.New(ctx) mn.SetLinkDefaults(mocknet.LinkOptions{ Latency: conf.NetworkLatency, // TODO add to conf. This is tricky because we want 0 values to be functional. Bandwidth: math.MaxInt32, }) adder, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { return err } defer adder.Close() catter, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { return err } defer catter.Close() err = mn.LinkAll() if err != nil { return err } bs1 := []peer.PeerInfo{adder.Peerstore.PeerInfo(adder.Identity)} bs2 := []peer.PeerInfo{catter.Peerstore.PeerInfo(catter.Identity)} if err := catter.Bootstrap(core.BootstrapConfigWithPeers(bs1)); err != nil { return err } if err := adder.Bootstrap(core.BootstrapConfigWithPeers(bs2)); err != nil { return err } added, err := coreunix.Add(adder, bytes.NewReader(data)) if err != nil { return err } b.StartTimer() readerCatted, err := coreunix.Cat(ctx, catter, added) if err != nil { return err } // verify bufout := new(bytes.Buffer) io.Copy(bufout, readerCatted) if 0 != bytes.Compare(bufout.Bytes(), data) { return errors.New("catted data does not match added data") } return nil }
func RunThreeLeggedCat(data []byte, conf testutil.LatencyConfig) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() const numPeers = 3 // create network mn := mocknet.New(ctx) mn.SetLinkDefaults(mocknet.LinkOptions{ Latency: conf.NetworkLatency, // TODO add to conf. This is tricky because we want 0 values to be functional. Bandwidth: math.MaxInt32, }) bootstrap, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { return err } defer bootstrap.Close() adder, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { return err } defer adder.Close() catter, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { return err } defer catter.Close() mn.LinkAll() bis := bootstrap.Peerstore.PeerInfo(bootstrap.PeerHost.ID()) bcfg := core.BootstrapConfigWithPeers([]peer.PeerInfo{bis}) if err := adder.Bootstrap(bcfg); err != nil { return err } if err := catter.Bootstrap(bcfg); err != nil { return err } added, err := coreunix.Add(adder, bytes.NewReader(data)) if err != nil { return err } readerCatted, err := coreunix.Cat(ctx, catter, added) if err != nil { return err } // verify bufout := new(bytes.Buffer) io.Copy(bufout, readerCatted) if 0 != bytes.Compare(bufout.Bytes(), data) { return errors.New("catted data does not match added data") } return nil }