func TestRecurivePathResolution(t *testing.T) { ctx := context.Background() dagService := dagmock.Mock() a := randNode() b := randNode() c := randNode() err := b.AddNodeLink("grandchild", c) if err != nil { t.Fatal(err) } err = a.AddNodeLink("child", b) if err != nil { t.Fatal(err) } for _, n := range []node.Node{a, b, c} { _, err = dagService.Add(n) if err != nil { t.Fatal(err) } } aKey := a.Cid() segments := []string{aKey.String(), "child", "grandchild"} p, err := path.FromSegments("/ipfs/", segments...) if err != nil { t.Fatal(err) } resolver := path.NewBasicResolver(dagService) node, err := resolver.ResolvePath(ctx, p) if err != nil { t.Fatal(err) } cKey := c.Cid() key := node.Cid() if key.String() != cKey.String() { t.Fatal(fmt.Errorf( "recursive path resolution failed for %s: %s != %s", p.String(), key.String(), cKey.String())) } }
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { // setup local peer ID (private key is loaded in online setup) if err := n.loadID(); err != nil { return err } rds := &retry.Datastore{ Batching: n.Repo.Datastore(), Delay: time.Millisecond * 200, Retries: 6, TempErrFunc: isTooManyFDError, } var err error bs := bstore.NewBlockstore(rds) opts := bstore.DefaultCacheOpts() conf, err := n.Repo.Config() if err != nil { return err } opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize if !cfg.Permament { opts.HasBloomFilterSize = 0 } cbs, err := bstore.CachedBlockstore(bs, ctx, opts) if err != nil { return err } n.Blockstore = bstore.NewGCBlockstore(cbs, bstore.NewGCLocker()) rcfg, err := n.Repo.Config() if err != nil { return err } if rcfg.Datastore.HashOnRead { bs.HashOnRead(true) } if cfg.Online { do := setupDiscoveryOption(rcfg.Discovery) if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do, cfg.getOpt("pubsub"), cfg.getOpt("mplex")); err != nil { return err } } else { n.Exchange = offline.Exchange(n.Blockstore) } n.Blocks = bserv.New(n.Blockstore, n.Exchange) n.DAG = dag.NewDAGService(n.Blocks) internalDag := dag.NewDAGService(bserv.New(n.Blockstore, offline.Exchange(n.Blockstore))) n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG, internalDag) if err != nil { // TODO: we should move towards only running 'NewPinner' explicity on // node init instead of implicitly here as a result of the pinner keys // not being found in the datastore. // this is kinda sketchy and could cause data loss n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG, internalDag) } n.Resolver = path.NewBasicResolver(n.DAG) err = n.loadFilesRoot() if err != nil { return err } return nil }