コード例 #1
0
ファイル: testutils.go プロジェクト: yanghongkjxy/go-ipfs
// session creates a test bitswap session.
//
// NB: It's easy make mistakes by providing the same peer ID to two different
// sessions. To safeguard, use the SessionGenerator to generate sessions. It's
// just a much better idea.
func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance {
	bsdelay := delay.Fixed(0)
	const bloomSize = 512
	const writeCacheElems = 100

	adapter := net.Adapter(p)
	dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay))

	bstore, err := blockstore.CachedBlockstore(blockstore.NewBlockstore(
		ds_sync.MutexWrap(dstore)), ctx, blockstore.DefaultCacheOpts())
	if err != nil {
		panic(err.Error()) // FIXME perhaps change signature and return error.
	}

	const alwaysSendToPeer = true

	bs := New(ctx, p.ID(), adapter, bstore, alwaysSendToPeer).(*Bitswap)

	return Instance{
		Peer:            p.ID(),
		Exchange:        bs,
		blockstore:      bstore,
		blockstoreDelay: bsdelay,
	}
}
コード例 #2
0
ファイル: builder.go プロジェクト: tswindell/go-ipfs
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
	// setup local peer ID (private key is loaded in online setup)
	if err := n.loadID(); err != nil {
		return err
	}

	rds := &retry.Datastore{
		Batching:    n.Repo.Datastore(),
		Delay:       time.Millisecond * 200,
		Retries:     6,
		TempErrFunc: isTooManyFDError,
	}

	var err error
	bs := bstore.NewBlockstore(rds)
	opts := bstore.DefaultCacheOpts()
	conf, err := n.Repo.Config()
	if err != nil {
		return err
	}

	opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize
	if !cfg.Permament {
		opts.HasBloomFilterSize = 0
	}

	cbs, err := bstore.CachedBlockstore(bs, ctx, opts)
	if err != nil {
		return err
	}

	n.Blockstore = bstore.NewGCBlockstore(cbs, bstore.NewGCLocker())

	rcfg, err := n.Repo.Config()
	if err != nil {
		return err
	}

	if rcfg.Datastore.HashOnRead {
		bs.HashOnRead(true)
	}

	if cfg.Online {
		do := setupDiscoveryOption(rcfg.Discovery)
		if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do, cfg.getOpt("pubsub"), cfg.getOpt("mplex")); err != nil {
			return err
		}
	} else {
		n.Exchange = offline.Exchange(n.Blockstore)
	}

	n.Blocks = bserv.New(n.Blockstore, n.Exchange)
	n.DAG = dag.NewDAGService(n.Blocks)

	internalDag := dag.NewDAGService(bserv.New(n.Blockstore, offline.Exchange(n.Blockstore)))
	n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG, internalDag)
	if err != nil {
		// TODO: we should move towards only running 'NewPinner' explicity on
		// node init instead of implicitly here as a result of the pinner keys
		// not being found in the datastore.
		// this is kinda sketchy and could cause data loss
		n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG, internalDag)
	}
	n.Resolver = path.NewBasicResolver(n.DAG)

	err = n.loadFilesRoot()
	if err != nil {
		return err
	}

	return nil
}
コード例 #3
0
ファイル: builder.go プロジェクト: yuanwr/go-ipfs
func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
	// setup local peer ID (private key is loaded in online setup)
	if err := n.loadID(); err != nil {
		return err
	}

	var err error
	bs := bstore.NewBlockstore(n.Repo.Datastore())
	opts := bstore.DefaultCacheOpts()
	conf, err := n.Repo.Config()
	if err != nil {
		return err
	}

	opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize
	if !cfg.Permament {
		opts.HasBloomFilterSize = 0
	}

	n.Blockstore, err = bstore.CachedBlockstore(bs, ctx, opts)
	if err != nil {
		return err
	}

	rcfg, err := n.Repo.Config()
	if err != nil {
		return err
	}

	if rcfg.Datastore.HashOnRead {
		bs.RuntimeHashing(true)
	}

	if cfg.Online {
		do := setupDiscoveryOption(rcfg.Discovery)
		if err := n.startOnlineServices(ctx, cfg.Routing, cfg.Host, do); err != nil {
			return err
		}
	} else {
		n.Exchange = offline.Exchange(n.Blockstore)
	}

	n.Blocks = bserv.New(n.Blockstore, n.Exchange)
	n.DAG = dag.NewDAGService(n.Blocks)
	n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG)
	if err != nil {
		// TODO: we should move towards only running 'NewPinner' explicity on
		// node init instead of implicitly here as a result of the pinner keys
		// not being found in the datastore.
		// this is kinda sketchy and could cause data loss
		n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG)
	}
	n.Resolver = &path.Resolver{DAG: n.DAG}

	err = n.loadFilesRoot()
	if err != nil {
		return err
	}

	return nil
}