Esempio n. 1
0
// newPeernet constructs a new peernet
func newPeernet(ctx context.Context, m *mocknet, k ic.PrivKey,
	a ma.Multiaddr) (*peernet, error) {

	p, err := peer.IDFromPublicKey(k.GetPublic())
	if err != nil {
		return nil, err
	}

	// create our own entirely, so that peers knowledge doesn't get shared
	ps := peer.NewPeerstore()
	ps.AddAddr(p, a, peer.PermanentAddrTTL)
	ps.AddPrivKey(p, k)
	ps.AddPubKey(p, k.GetPublic())

	n := &peernet{
		mocknet: m,
		peer:    p,
		ps:      ps,
		cg:      ctxgroup.WithContext(ctx),

		connsByPeer: map[peer.ID]map[*conn]struct{}{},
		connsByLink: map[*link]map[*conn]struct{}{},

		notifs: make(map[inet.Notifiee]struct{}),
	}

	n.cg.SetTeardown(n.teardown)
	return n, nil
}
Esempio n. 2
0
func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
	if cfg == nil {
		cfg = new(BuildCfg)
	}

	err := cfg.fillDefaults()
	if err != nil {
		return nil, err
	}

	n := &IpfsNode{
		mode:      offlineMode,
		Repo:      cfg.Repo,
		ctx:       ctx,
		Peerstore: peer.NewPeerstore(),
	}
	if cfg.Online {
		n.mode = onlineMode
	}

	// TODO: this is a weird circular-ish dependency, rework it
	n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)

	if err := setupNode(ctx, n, cfg); err != nil {
		n.Close()
		return nil, err
	}

	return n, nil
}
Esempio n. 3
0
// TODO refactor so maybeRouter isn't special-cased in this way
func standardWithRouting(r repo.Repo, online bool, routingOption RoutingOption, hostOption HostOption) ConfigOption {
	return func(ctx context.Context) (n *IpfsNode, err error) {
		// FIXME perform node construction in the main constructor so it isn't
		// necessary to perform this teardown in this scope.
		success := false
		defer func() {
			if !success && n != nil {
				n.teardown()
			}
		}()

		// TODO move as much of node initialization as possible into
		// NewIPFSNode. The larger these config options are, the harder it is
		// to test all node construction code paths.

		if r == nil {
			return nil, fmt.Errorf("repo required")
		}
		n = &IpfsNode{
			mode: func() mode {
				if online {
					return onlineMode
				}
				return offlineMode
			}(),
			Repo: r,
		}

		n.ctx = ctx
		n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)

		// setup Peerstore
		n.Peerstore = peer.NewPeerstore()

		// setup local peer ID (private key is loaded in online setup)
		if err := n.loadID(); err != nil {
			return nil, err
		}

		n.Blockstore, err = bstore.WriteCached(bstore.NewBlockstore(n.Repo.Datastore()), kSizeBlockstoreWriteCache)
		if err != nil {
			return nil, err
		}

		if online {
			do := setupDiscoveryOption(n.Repo.Config().Discovery)
			if err := n.startOnlineServices(ctx, routingOption, hostOption, do); err != nil {
				return nil, err
			}
		} else {
			n.Exchange = offline.Exchange(n.Blockstore)
		}

		success = true
		return n, nil
	}
}
Esempio n. 4
0
// NewMockNode constructs an IpfsNode for use in tests.
func NewMockNode() (*core.IpfsNode, error) {
	ctx := context.Background()

	// Generate Identity
	ident, err := testutil.RandIdentity()
	if err != nil {
		return nil, err
	}
	p := ident.ID()

	c := config.Config{
		Identity: config.Identity{
			PeerID: p.String(),
		},
	}

	nd, err := core.Offline(&repo.Mock{
		C: c,
		D: ds2.CloserWrap(syncds.MutexWrap(datastore.NewMapDatastore())),
	})(ctx)
	if err != nil {
		return nil, err
	}

	nd.PrivateKey = ident.PrivateKey()
	nd.Peerstore = peer.NewPeerstore()
	nd.Peerstore.AddPrivKey(p, ident.PrivateKey())
	nd.Peerstore.AddPubKey(p, ident.PublicKey())
	nd.Identity = p

	nd.PeerHost, err = mocknet.New(nd.Context()).AddPeer(ident.PrivateKey(), ident.Address()) // effectively offline
	if err != nil {
		return nil, err
	}

	// Routing
	nd.Routing = offrt.NewOfflineRouter(nd.Repo.Datastore(), nd.PrivateKey)

	// Bitswap
	bstore := blockstore.NewBlockstore(nd.Repo.Datastore())
	bserv, err := blockservice.New(bstore, offline.Exchange(bstore))
	if err != nil {
		return nil, err
	}

	nd.DAG = mdag.NewDAGService(bserv)

	nd.Pinning = pin.NewPinner(nd.Repo.Datastore(), nd.DAG)

	// Namespace resolver
	nd.Namesys = nsys.NewNameSystem(nd.Routing)

	// Path resolver
	nd.Resolver = &path.Resolver{DAG: nd.DAG}

	return nd, nil
}
Esempio n. 5
0
func GenSwarmNetwork(t *testing.T, ctx context.Context) *swarm.Network {
	p := tu.RandPeerNetParamsOrFatal(t)
	ps := peer.NewPeerstore()
	ps.AddPubKey(p.ID, p.PubKey)
	ps.AddPrivKey(p.ID, p.PrivKey)
	n, err := swarm.NewNetwork(ctx, []ma.Multiaddr{p.Addr}, p.ID, ps, metrics.NewBandwidthCounter())
	if err != nil {
		t.Fatal(err)
	}
	ps.AddAddrs(p.ID, n.ListenAddresses(), peer.PermanentAddrTTL)
	return n
}
Esempio n. 6
0
func (mn *mocknet) AddPeer(k ic.PrivKey, a ma.Multiaddr) (host.Host, error) {
	p, err := peer.IDFromPublicKey(k.GetPublic())
	if err != nil {
		return nil, err
	}

	ps := peer.NewPeerstore()
	ps.AddAddr(p, a, peer.PermanentAddrTTL)
	ps.AddPrivKey(p, k)
	ps.AddPubKey(p, k.GetPublic())

	return mn.AddPeerWithPeerstore(p, ps)
}
Esempio n. 7
0
func NewIPFSNode(ctx context.Context, option ConfigOption) (*IpfsNode, error) {
	node, err := option(ctx)
	if err != nil {
		return nil, err
	}

	if node.ctx == nil {
		node.ctx = ctx
	}
	if node.proc == nil {
		node.proc = goprocessctx.WithContextAndTeardown(node.ctx, node.teardown)
	}

	success := false // flip to true after all sub-system inits succeed
	defer func() {
		if !success {
			node.proc.Close()
		}
	}()

	// Need to make sure it's perfectly clear 1) which variables are expected
	// to be initialized at this point, and 2) which variables will be
	// initialized after this point.

	node.Blocks, err = bserv.New(node.Blockstore, node.Exchange)
	if err != nil {
		return nil, err
	}
	if node.Peerstore == nil {
		node.Peerstore = peer.NewPeerstore()
	}
	node.DAG = merkledag.NewDAGService(node.Blocks)
	node.Pinning, err = pin.LoadPinner(node.Repo.Datastore(), node.DAG)
	if err != nil {
		node.Pinning = pin.NewPinner(node.Repo.Datastore(), node.DAG)
	}
	node.Resolver = &path.Resolver{DAG: node.DAG}

	// Setup the mutable ipns filesystem structure
	if node.OnlineMode() {
		fs, err := ipnsfs.NewFilesystem(ctx, node.DAG, node.Namesys, node.Pinning, node.PrivateKey)
		if err != nil && err != kb.ErrLookupFailure {
			return nil, err
		}
		node.IpnsFs = fs
	}

	success = true
	return node, nil
}
Esempio n. 8
0
func makeSwarms(ctx context.Context, t *testing.T, num int) []*Swarm {
	swarms := make([]*Swarm, 0, num)

	for i := 0; i < num; i++ {
		localnp := testutil.RandPeerNetParamsOrFatal(t)

		peerstore := peer.NewPeerstore()
		peerstore.AddPubKey(localnp.ID, localnp.PubKey)
		peerstore.AddPrivKey(localnp.ID, localnp.PrivKey)

		addrs := []ma.Multiaddr{localnp.Addr}
		swarm, err := NewSwarm(ctx, addrs, localnp.ID, peerstore, metrics.NewBandwidthCounter())
		if err != nil {
			t.Fatal(err)
		}

		swarm.SetStreamHandler(EchoStreamHandler)
		swarms = append(swarms, swarm)
	}

	return swarms
}
Esempio n. 9
0
func setupPeer(a args) (peer.ID, peer.Peerstore, error) {
	if a.keybits < 1024 {
		return "", nil, errors.New("Bitsize less than 1024 is considered unsafe.")
	}

	out("generating key pair...")
	sk, pk, err := ci.GenerateKeyPair(ci.RSA, a.keybits)
	if err != nil {
		return "", nil, err
	}

	p, err := peer.IDFromPublicKey(pk)
	if err != nil {
		return "", nil, err
	}

	ps := peer.NewPeerstore()
	ps.AddPrivKey(p, sk)
	ps.AddPubKey(p, pk)

	out("local peer id: %s", p)
	return p, ps, nil
}
Esempio n. 10
0
func TestFilterAddrs(t *testing.T) {

	m := func(s string) ma.Multiaddr {
		maddr, err := ma.NewMultiaddr(s)
		if err != nil {
			t.Fatal(err)
		}
		return maddr
	}

	bad := []ma.Multiaddr{
		m("/ip4/1.2.3.4/udp/1234"),           // unreliable
		m("/ip4/1.2.3.4/udp/1234/sctp/1234"), // not in manet
		m("/ip4/1.2.3.4/udp/1234/utp"),       // utp is broken
		m("/ip4/1.2.3.4/udp/1234/udt"),       // udt is broken on arm
		m("/ip6/fe80::1/tcp/0"),              // link local
		m("/ip6/fe80::100/tcp/1234"),         // link local
	}

	good := []ma.Multiaddr{
		m("/ip4/127.0.0.1/tcp/0"),
		m("/ip6/::1/tcp/0"),
	}

	goodAndBad := append(good, bad...)

	// test filters

	for _, a := range bad {
		if addrutil.AddrUsable(a, true) {
			t.Errorf("addr %s should be unusable", a)
		}
	}

	for _, a := range good {
		if !addrutil.AddrUsable(a, true) {
			t.Errorf("addr %s should be usable", a)
		}
	}

	subtestAddrsEqual(t, addrutil.FilterUsableAddrs(bad), []ma.Multiaddr{})
	subtestAddrsEqual(t, addrutil.FilterUsableAddrs(good), good)
	subtestAddrsEqual(t, addrutil.FilterUsableAddrs(goodAndBad), good)

	// now test it with swarm

	id, err := testutil.RandPeerID()
	if err != nil {
		t.Fatal(err)
	}

	ps := peer.NewPeerstore()
	ctx := context.Background()

	if _, err := NewNetwork(ctx, bad, id, ps, metrics.NewBandwidthCounter()); err == nil {
		t.Fatal("should have failed to create swarm")
	}

	if _, err := NewNetwork(ctx, goodAndBad, id, ps, metrics.NewBandwidthCounter()); err != nil {
		t.Fatal("should have succeeded in creating swarm", err)
	}
}