func runFileCattingWorker(ctx context.Context, n *core.IpfsNode) error { conf, err := config.Init(ioutil.Discard, *nBitsForKeypair) if err != nil { return err } r := &repo.Mock{ D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())), C: *conf, } dummy, err := core.NewNode(ctx, &core.BuildCfg{ Repo: r, }) if err != nil { return err } errs := make(chan error) go func() { defer dummy.Close() var i int64 = 1 for { buf := new(bytes.Buffer) if err := random.WritePseudoRandomBytes(sizeOfIthFile(i), buf, *seed); err != nil { errs <- err } // add to a dummy node to discover the key k, err := coreunix.Add(dummy, bytes.NewReader(buf.Bytes())) if err != nil { errs <- err } e := elog.EventBegin(ctx, "cat", logging.LoggableF(func() map[string]interface{} { return map[string]interface{}{ "key": k, "localPeer": n.Identity, } })) if r, err := coreunix.Cat(ctx, n, k); err != nil { e.Done() log.Printf("failed to cat file. seed: %d #%d key: %s err: %s", *seed, i, k, err) } else { log.Println("found file", "seed", *seed, "#", i, "key", k, "size", unit.Information(sizeOfIthFile(i))) io.Copy(ioutil.Discard, r) e.Done() log.Println("catted file", "seed", *seed, "#", i, "key", k, "size", unit.Information(sizeOfIthFile(i))) i++ } time.Sleep(time.Second) } }() err = <-errs if err != nil { log.Fatal(err) } return nil }
func InitializeSupernodeNetwork( ctx context.Context, numServers, numClients int, conf testutil.LatencyConfig) ([]*core.IpfsNode, []*core.IpfsNode, error) { // create network mn := mocknet.New(ctx) mn.SetLinkDefaults(mocknet.LinkOptions{ Latency: conf.NetworkLatency, Bandwidth: math.MaxInt32, }) routingDatastore := ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())) var servers []*core.IpfsNode for i := 0; i < numServers; i++ { bootstrap, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), Routing: corerouting.SupernodeServer(routingDatastore), }) if err != nil { return nil, nil, err } servers = append(servers, bootstrap) } var bootstrapInfos []pstore.PeerInfo for _, n := range servers { info := n.Peerstore.PeerInfo(n.PeerHost.ID()) bootstrapInfos = append(bootstrapInfos, info) } var clients []*core.IpfsNode for i := 0; i < numClients; i++ { n, err := core.NewNode(ctx, &core.BuildCfg{ Online: true, Host: mock.MockHostOption(mn), Routing: corerouting.SupernodeClient(bootstrapInfos...), }) if err != nil { return nil, nil, err } clients = append(clients, n) } mn.LinkAll() bcfg := core.BootstrapConfigWithPeers(bootstrapInfos) for _, n := range clients { if err := n.Bootstrap(bcfg); err != nil { return nil, nil, err } } return servers, clients, nil }
func MockCmdsCtx() (commands.Context, error) { // Generate Identity ident, err := testutil.RandIdentity() if err != nil { return commands.Context{}, err } p := ident.ID() conf := config.Config{ Identity: config.Identity{ PeerID: p.String(), }, } r := &repo.Mock{ D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())), C: conf, } node, err := core.NewNode(context.Background(), &core.BuildCfg{ Repo: r, }) if err != nil { return commands.Context{}, err } return commands.Context{ Online: true, ConfigRoot: "/tmp/.mockipfsconfig", LoadConfig: func(path string) (*config.Config, error) { return &conf, nil }, ConstructNode: func() (*core.IpfsNode, error) { return node, nil }, }, nil }
func ThreadSafeCloserMapDatastore() ds2.ThreadSafeDatastoreCloser { return ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())) }