Esempio n. 1
0
func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
	if cfg == nil {
		cfg = new(BuildCfg)
	}

	err := cfg.fillDefaults()
	if err != nil {
		return nil, err
	}
	ctx = metrics.CtxScope(ctx, "ipfs")

	n := &IpfsNode{
		mode:      offlineMode,
		Repo:      cfg.Repo,
		ctx:       ctx,
		Peerstore: pstore.NewPeerstore(),
	}
	if cfg.Online {
		n.mode = onlineMode
	}

	// TODO: this is a weird circular-ish dependency, rework it
	n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)

	if err := setupNode(ctx, n, cfg); err != nil {
		n.Close()
		return nil, err
	}

	return n, nil
}
Esempio n. 2
0
func setupPeer(a args) (peer.ID, pstore.Peerstore, error) {
	if a.keybits < 1024 {
		return "", nil, errors.New("Bitsize less than 1024 is considered unsafe.")
	}

	out("generating key pair...")
	sk, pk, err := ci.GenerateKeyPair(ci.RSA, a.keybits)
	if err != nil {
		return "", nil, err
	}

	p, err := peer.IDFromPublicKey(pk)
	if err != nil {
		return "", nil, err
	}

	ps := pstore.NewPeerstore()
	ps.AddPrivKey(p, sk)
	ps.AddPubKey(p, pk)

	out("local peer id: %s", p)
	return p, ps, nil
}
Esempio n. 3
0
func spawnDHT(pk ci.PrivKey, dstore repo.Datastore, bootstraps []ma.Multiaddr) (*dht.IpfsDHT, error) {
	pub := pk.GetPublic()

	local, err := peer.IDFromPublicKey(pub)
	if err != nil {
		return nil, err
	}

	fmt.Println("Local peer ID: ", local.Pretty())

	ps := pstore.NewPeerstore()
	ps.AddPrivKey(local, pk)
	ps.AddPubKey(local, pub)

	listenaddr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
	if err != nil {
		return nil, err
	}

	s, err := swarm.NewNetwork(context.Background(), []ma.Multiaddr{listenaddr}, local, ps, metrics.NewBandwidthCounter())
	if err != nil {
		fatal(err)
	}

	host := basichost.New(s)

	idht := dht.NewDHT(context.Background(), host, dstore)
	idht.Validator[IpnsValidatorTag] = namesys.IpnsRecordValidator
	idht.Selector[IpnsValidatorTag] = namesys.IpnsSelectorFunc

	err = doBootstrap(host, idht, bootstraps)
	if err != nil {
		return nil, err
	}

	return idht, nil
}