// NewMockNode constructs an IpfsNode for use in tests. func NewMockNode() (*core.IpfsNode, error) { ctx := context.Background() // effectively offline, only peer in its network return core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: MockHostOption(mocknet.New(ctx)), }) }
func InitializeSupernodeNetwork( ctx context.Context, numServers, numClients int, conf testutil.LatencyConfig) ([]*core.IpfsNode, []*core.IpfsNode, error) { // create network mn := mocknet.New(ctx) mn.SetLinkDefaults(mocknet.LinkOptions{ Latency: conf.NetworkLatency, Bandwidth: math.MaxInt32, }) routingDatastore := ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())) var servers []*core.IpfsNode for i := 0; i < numServers; i++ { bootstrap, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), Routing: corerouting.SupernodeServer(routingDatastore), }) if err != nil { return nil, nil, err } servers = append(servers, bootstrap) } var bootstrapInfos []peer.PeerInfo for _, n := range servers { info := n.Peerstore.PeerInfo(n.PeerHost.ID()) bootstrapInfos = append(bootstrapInfos, info) } var clients []*core.IpfsNode for i := 0; i < numClients; i++ { n, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), Routing: corerouting.SupernodeClient(bootstrapInfos...), }) if err != nil { return nil, nil, err } clients = append(clients, n) } mn.LinkAll() bcfg := core.BootstrapConfigWithPeers(bootstrapInfos) for _, n := range clients { if err := n.Bootstrap(bcfg); err != nil { return nil, nil, err } } return servers, clients, nil }
func RunThreeLeggedCat(data []byte, conf testutil.LatencyConfig) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() const numPeers = 3 // create network mn := mocknet.New(ctx) mn.SetLinkDefaults(mocknet.LinkOptions{ Latency: conf.NetworkLatency, // TODO add to conf. This is tricky because we want 0 values to be functional. Bandwidth: math.MaxInt32, }) bootstrap, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { return err } defer bootstrap.Close() adder, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { return err } defer adder.Close() catter, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { return err } defer catter.Close() mn.LinkAll() bis := bootstrap.Peerstore.PeerInfo(bootstrap.PeerHost.ID()) bcfg := core.BootstrapConfigWithPeers([]peer.PeerInfo{bis}) if err := adder.Bootstrap(bcfg); err != nil { return err } if err := catter.Bootstrap(bcfg); err != nil { return err } added, err := coreunix.Add(adder, bytes.NewReader(data)) if err != nil { return err } readerCatted, err := coreunix.Cat(catter, added) if err != nil { return err } // verify bufout := new(bytes.Buffer) io.Copy(bufout, readerCatted) if 0 != bytes.Compare(bufout.Bytes(), data) { return errors.New("catted data does not match added data") } return nil }
func benchCat(b *testing.B, data []byte, conf testutil.LatencyConfig) error { b.StopTimer() ctx, cancel := context.WithCancel(context.Background()) defer cancel() // create network mn := mocknet.New(ctx) mn.SetLinkDefaults(mocknet.LinkOptions{ Latency: conf.NetworkLatency, // TODO add to conf. This is tricky because we want 0 values to be functional. Bandwidth: math.MaxInt32, }) adder, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { return err } defer adder.Close() catter, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), }) if err != nil { return err } defer catter.Close() err = mn.LinkAll() if err != nil { return err } bs1 := []peer.PeerInfo{adder.Peerstore.PeerInfo(adder.Identity)} bs2 := []peer.PeerInfo{catter.Peerstore.PeerInfo(catter.Identity)} if err := catter.Bootstrap(core.BootstrapConfigWithPeers(bs1)); err != nil { return err } if err := adder.Bootstrap(core.BootstrapConfigWithPeers(bs2)); err != nil { return err } added, err := coreunix.Add(adder, bytes.NewReader(data)) if err != nil { return err } b.StartTimer() readerCatted, err := coreunix.Cat(catter, added) if err != nil { return err } // verify bufout := new(bytes.Buffer) io.Copy(bufout, readerCatted) if 0 != bytes.Compare(bufout.Bytes(), data) { return errors.New("catted data does not match added data") } return nil }
func TestBitswapWithoutRouting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() const numPeers = 4 // create network mn := mocknet.New(ctx) var nodes []*core.IpfsNode for i := 0; i < numPeers; i++ { n, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: coremock.MockHostOption(mn), Routing: core.NilRouterOption, // no routing }) if err != nil { t.Fatal(err) } defer n.Close() nodes = append(nodes, n) } mn.LinkAll() // connect them for _, n1 := range nodes { for _, n2 := range nodes { if n1 == n2 { continue } log.Debug("connecting to other hosts") p2 := n2.PeerHost.Peerstore().PeerInfo(n2.PeerHost.ID()) if err := n1.PeerHost.Connect(ctx, p2); err != nil { t.Fatal(err) } } } // add blocks to each before log.Debug("adding block.") block0 := blocks.NewBlock([]byte("block0")) block1 := blocks.NewBlock([]byte("block1")) // put 1 before if err := nodes[0].Blockstore.Put(block0); err != nil { t.Fatal(err) } // get it out. for i, n := range nodes { // skip first because block not in its exchange. will hang. if i == 0 { continue } log.Debugf("%d %s get block.", i, n.Identity) b, err := n.Blocks.GetBlock(ctx, block0.Key()) if err != nil { t.Error(err) } else if !bytes.Equal(b.Data, block0.Data) { t.Error("byte comparison fail") } else { log.Debug("got block: %s", b.Key()) } } // put 1 after if err := nodes[1].Blockstore.Put(block1); err != nil { t.Fatal(err) } // get it out. for _, n := range nodes { b, err := n.Blocks.GetBlock(ctx, block1.Key()) if err != nil { t.Error(err) } else if !bytes.Equal(b.Data, block1.Data) { t.Error("byte comparison fail") } else { log.Debug("got block: %s", b.Key()) } } }