// NewSwarm constructs a Swarm, with a Chan. func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr, local peer.ID, peers peer.Peerstore, bwc metrics.Reporter) (*Swarm, error) { listenAddrs, err := filterAddrs(listenAddrs) if err != nil { return nil, err } s := &Swarm{ swarm: ps.NewSwarm(PSTransport), local: local, peers: peers, cg: ctxgroup.WithContext(ctx), dialT: DialTimeout, notifs: make(map[inet.Notifiee]ps.Notifiee), bwc: bwc, Filters: filter.NewFilters(), } // configure Swarm s.cg.SetTeardown(s.teardown) s.SetConnHandler(nil) // make sure to setup our own conn handler. // setup swarm metrics prom.MustRegisterOrGet(peersTotal) s.Notify((*metricsNotifiee)(s)) return s, s.listen(listenAddrs) }
// newPeernet constructs a new peernet func newPeernet(ctx context.Context, m *mocknet, k ic.PrivKey, a ma.Multiaddr) (*peernet, error) { p, err := peer.IDFromPublicKey(k.GetPublic()) if err != nil { return nil, err } // create our own entirely, so that peers knowledge doesn't get shared ps := peer.NewPeerstore() ps.AddAddr(p, a, peer.PermanentAddrTTL) ps.AddPrivKey(p, k) ps.AddPubKey(p, k.GetPublic()) n := &peernet{ mocknet: m, peer: p, ps: ps, cg: ctxgroup.WithContext(ctx), connsByPeer: map[peer.ID]map[*conn]struct{}{}, connsByLink: map[*link]map[*conn]struct{}{}, notifs: make(map[inet.Notifiee]struct{}), } n.cg.SetTeardown(n.teardown) return n, nil }
func New(ctx context.Context) Mocknet { return &mocknet{ nets: map[peer.ID]*peernet{}, hosts: map[peer.ID]*bhost.BasicHost{}, links: map[peer.ID]map[peer.ID]map[*link]struct{}{}, cg: ctxgroup.WithContext(ctx), } }
func NewProviderManager(ctx context.Context, local peer.ID) *ProviderManager { pm := new(ProviderManager) pm.getprovs = make(chan *getProv) pm.newprovs = make(chan *addProv) pm.providers = make(map[key.Key]*providerSet) pm.getlocal = make(chan chan []key.Key) pm.local = make(map[key.Key]struct{}) pm.ContextGroup = ctxgroup.WithContext(ctx) pm.Children().Add(1) go pm.run() return pm }
func NewIPFSNode(parent context.Context, option ConfigOption) (*IpfsNode, error) { ctxg := ctxgroup.WithContext(parent) ctx := ctxg.Context() success := false // flip to true after all sub-system inits succeed defer func() { if !success { ctxg.Close() } }() node, err := option(ctx) if err != nil { return nil, err } node.ContextGroup = ctxg ctxg.SetTeardown(node.teardown) // Need to make sure it's perfectly clear 1) which variables are expected // to be initialized at this point, and 2) which variables will be // initialized after this point. node.Blocks, err = bserv.New(node.Blockstore, node.Exchange) if err != nil { return nil, err } if node.Peerstore == nil { node.Peerstore = peer.NewPeerstore() } node.DAG = merkledag.NewDAGService(node.Blocks) node.Pinning, err = pin.LoadPinner(node.Repo.Datastore(), node.DAG) if err != nil { node.Pinning = pin.NewPinner(node.Repo.Datastore(), node.DAG) } node.Resolver = &path.Resolver{DAG: node.DAG} // Setup the mutable ipns filesystem structure if node.OnlineMode() { fs, err := ipnsfs.NewFilesystem(ctx, node.DAG, node.Namesys, node.Pinning, node.PrivateKey) if err != nil && err != kb.ErrLookupFailure { return nil, err } node.IpnsFs = fs } success = true return node, nil }
// Listen listens on the particular multiaddr, with given peer and peerstore. func Listen(ctx context.Context, addr ma.Multiaddr, local peer.ID, sk ic.PrivKey) (Listener, error) { ml, err := manetListen(addr) if err != nil { return nil, err } l := &listener{ Listener: ml, local: local, privk: sk, cg: ctxgroup.WithContext(ctx), } l.cg.SetTeardown(l.teardown) log.Debugf("Conn Listener on %s", l.Multiaddr()) log.Event(ctx, "swarmListen", l) return l, nil }