예제 #1
0
// Listen listens on the particular multiaddr, with given peer and peerstore.
func Listen(ctx context.Context, addr ma.Multiaddr, local peer.Peer, peers peer.Peerstore) (Listener, error) {

	ml, err := manet.Listen(addr)
	if err != nil {
		return nil, fmt.Errorf("Failed to listen on %s: %s", addr, err)
	}

	// todo make this a variable
	chansize := 10

	l := &listener{
		Listener: ml,
		maddr:    addr,
		peers:    peers,
		local:    local,
		conns:    make(chan Conn, chansize),
		chansize: chansize,
		ctx:      ctx,
	}

	// need a separate context to use for the context closer.
	// This is because the parent context will be given to all connections too,
	// and if we close the listener, the connections shouldn't share the fate.
	ctx2, _ := context.WithCancel(ctx)
	l.ContextCloser = ctxc.NewContextCloser(ctx2, l.close)

	l.Children().Add(1)
	go l.listen()

	return l, nil
}
예제 #2
0
// newConn constructs a new connection
func newSingleConn(ctx context.Context, local, remote peer.Peer,
	maconn manet.Conn) (Conn, error) {

	conn := &singleConn{
		local:  local,
		remote: remote,
		maconn: maconn,
		msgio:  newMsgioPipe(10, BufferPool),
	}

	conn.ContextCloser = ctxc.NewContextCloser(ctx, conn.close)

	log.Info("newSingleConn: %v to %v", local, remote)

	// setup the various io goroutines
	conn.Children().Add(1)
	go func() {
		conn.msgio.outgoing.WriteTo(maconn)
		conn.Children().Done()
	}()
	conn.Children().Add(1)
	go func() {
		conn.msgio.incoming.ReadFrom(maconn, MaxMessageSize)
		conn.Children().Done()
	}()

	// version handshake
	ctxT, _ := context.WithTimeout(ctx, HandshakeTimeout)
	if err := Handshake1(ctxT, conn); err != nil {
		conn.Close()
		return nil, fmt.Errorf("Handshake1 failed: %s", err)
	}

	return conn, nil
}
예제 #3
0
// NewDHT creates a new DHT object with the given peer as the 'local' host
func NewDHT(ctx context.Context, p peer.Peer, ps peer.Peerstore, dialer inet.Dialer, sender inet.Sender, dstore ds.Datastore) *IpfsDHT {
	dht := new(IpfsDHT)
	dht.dialer = dialer
	dht.sender = sender
	dht.datastore = dstore
	dht.self = p
	dht.peerstore = ps
	dht.ContextCloser = ctxc.NewContextCloser(ctx, nil)

	dht.providers = NewProviderManager(dht.Context(), p.ID())
	dht.AddCloserChild(dht.providers)

	dht.routingTables = make([]*kb.RoutingTable, 3)
	dht.routingTables[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Millisecond*1000)
	dht.routingTables[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Millisecond*1000)
	dht.routingTables[2] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID()), time.Hour)
	dht.birth = time.Now()

	dht.Validators = make(map[string]ValidatorFunc)
	dht.Validators["pk"] = ValidatePublicKeyRecord

	if doPinging {
		dht.Children().Add(1)
		go dht.PingRoutine(time.Second * 10)
	}
	return dht
}
예제 #4
0
// NewService creates a service object with given type ID and Handler
func NewService(ctx context.Context, h Handler) Service {
	s := &service{
		Handler:       h,
		Requests:      RequestMap{},
		Pipe:          msg.NewPipe(10),
		ContextCloser: ctxc.NewContextCloser(ctx, nil),
	}

	s.Children().Add(1)
	go s.handleIncomingMessages()
	return s
}
예제 #5
0
func NewProviderManager(ctx context.Context, local peer.ID) *ProviderManager {
	pm := new(ProviderManager)
	pm.getprovs = make(chan *getProv)
	pm.newprovs = make(chan *addProv)
	pm.providers = make(map[u.Key][]*providerInfo)
	pm.getlocal = make(chan chan []u.Key)
	pm.local = make(map[u.Key]struct{})
	pm.ContextCloser = ctxc.NewContextCloser(ctx, nil)

	pm.Children().Add(1)
	go pm.run()

	return pm
}
예제 #6
0
// NewSwarm constructs a Swarm, with a Chan.
func NewSwarm(ctx context.Context, listenAddrs []ma.Multiaddr, local peer.Peer, ps peer.Peerstore) (*Swarm, error) {
	s := &Swarm{
		Pipe:    msg.NewPipe(10),
		conns:   conn.MultiConnMap{},
		local:   local,
		peers:   ps,
		errChan: make(chan error, 100),
	}

	// ContextCloser for proper child management.
	s.ContextCloser = ctxc.NewContextCloser(ctx, s.close)

	s.Children().Add(1)
	go s.fanOut()
	return s, s.listen(listenAddrs)
}
예제 #7
0
// NewMuxer constructs a muxer given a protocol map.
func NewMuxer(ctx context.Context, mp ProtocolMap) *Muxer {
	m := &Muxer{
		Protocols:     mp,
		Pipe:          msg.NewPipe(10),
		ContextCloser: ctxc.NewContextCloser(ctx, nil),
	}

	m.Children().Add(1)
	go m.handleIncomingMessages()
	for pid, proto := range m.Protocols {
		m.Children().Add(1)
		go m.handleOutgoingMessages(pid, proto)
	}

	return m
}
예제 #8
0
// newConn constructs a new connection
func newSecureConn(ctx context.Context, insecure Conn, peers peer.Peerstore) (Conn, error) {

	conn := &secureConn{
		insecure: insecure,
	}
	conn.ContextCloser = ctxc.NewContextCloser(ctx, conn.close)

	log.Debugf("newSecureConn: %v to %v", insecure.LocalPeer(), insecure.RemotePeer())
	// perform secure handshake before returning this connection.
	if err := conn.secureHandshake(peers); err != nil {
		conn.Close()
		return nil, err
	}
	log.Debugf("newSecureConn: %v to %v handshake success!", insecure.LocalPeer(), insecure.RemotePeer())

	return conn, nil
}
예제 #9
0
// NewMultiConn constructs a new connection
func NewMultiConn(ctx context.Context, local, remote peer.Peer, conns []Conn) (*MultiConn, error) {

	c := &MultiConn{
		local:  local,
		remote: remote,
		conns:  map[string]Conn{},
		duplex: Duplex{
			In:  make(chan []byte, 10),
			Out: make(chan []byte, 10),
		},
	}

	// must happen before Adds / fanOut
	c.ContextCloser = ctxc.NewContextCloser(ctx, c.close)

	if conns != nil && len(conns) > 0 {
		c.Add(conns...)
	}

	c.Children().Add(1)
	go c.fanOut()
	return c, nil
}
예제 #10
0
// NewIpfsNetwork is the structure that implements the network interface
func NewIpfsNetwork(ctx context.Context, listen []ma.Multiaddr, local peer.Peer,
	peers peer.Peerstore, pmap *mux.ProtocolMap) (*IpfsNetwork, error) {

	in := &IpfsNetwork{
		local:         local,
		muxer:         mux.NewMuxer(ctx, *pmap),
		ContextCloser: ctxc.NewContextCloser(ctx, nil),
	}

	var err error
	in.swarm, err = swarm.NewSwarm(ctx, listen, local, peers)
	if err != nil {
		in.Close()
		return nil, err
	}

	in.AddCloserChild(in.swarm)
	in.AddCloserChild(in.muxer)

	// remember to wire components together.
	in.muxer.Pipe.ConnectTo(in.swarm.Pipe)

	return in, nil
}
예제 #11
0
// NewIpfsNode constructs a new IpfsNode based on the given config.
func NewIpfsNode(cfg *config.Config, online bool) (n *IpfsNode, err error) {
	success := false // flip to true after all sub-system inits succeed
	defer func() {
		if !success && n != nil {
			n.Close()
		}
	}()

	if cfg == nil {
		return nil, debugerror.Errorf("configuration required")
	}

	// derive this from a higher context.
	ctx := context.TODO()
	n = &IpfsNode{
		onlineMode: online,
		Config:     cfg,
	}
	n.ContextCloser = ctxc.NewContextCloser(ctx, n.teardown)

	// setup datastore.
	if n.Datastore, err = makeDatastore(cfg.Datastore); err != nil {
		return nil, debugerror.Wrap(err)
	}

	// setup peerstore + local peer identity
	n.Peerstore = peer.NewPeerstore()
	n.Identity, err = initIdentity(&n.Config.Identity, n.Peerstore, online)
	if err != nil {
		return nil, debugerror.Wrap(err)
	}

	// setup online services
	if online {

		dhtService := netservice.NewService(ctx, nil)      // nil handler for now, need to patch it
		exchangeService := netservice.NewService(ctx, nil) // nil handler for now, need to patch it
		diagService := netservice.NewService(ctx, nil)     // nil handler for now, need to patch it

		muxMap := &mux.ProtocolMap{
			mux.ProtocolID_Routing:    dhtService,
			mux.ProtocolID_Exchange:   exchangeService,
			mux.ProtocolID_Diagnostic: diagService,
			// add protocol services here.
		}

		// setup the network
		listenAddrs, err := listenAddresses(cfg)
		if err != nil {
			return nil, debugerror.Wrap(err)
		}

		n.Network, err = inet.NewIpfsNetwork(ctx, listenAddrs, n.Identity, n.Peerstore, muxMap)
		if err != nil {
			return nil, debugerror.Wrap(err)
		}
		n.AddCloserChild(n.Network)

		// setup diagnostics service
		n.Diagnostics = diag.NewDiagnostics(n.Identity, n.Network, diagService)
		diagService.SetHandler(n.Diagnostics)

		// setup routing service
		dhtRouting := dht.NewDHT(ctx, n.Identity, n.Peerstore, n.Network, dhtService, n.Datastore)
		dhtRouting.Validators[IpnsValidatorTag] = namesys.ValidateIpnsRecord

		// TODO(brian): perform this inside NewDHT factory method
		dhtService.SetHandler(dhtRouting) // wire the handler to the service.
		n.Routing = dhtRouting
		n.AddCloserChild(dhtRouting)

		// setup exchange service
		const alwaysSendToPeer = true // use YesManStrategy
		bitswapNetwork := bsnet.NewFromIpfsNetwork(exchangeService, n.Network)
		n.Exchange = bitswap.New(ctx, n.Identity, bitswapNetwork, n.Routing, n.Datastore, alwaysSendToPeer)

		go initConnections(ctx, n.Config, n.Peerstore, dhtRouting)
	}

	// TODO(brian): when offline instantiate the BlockService with a bitswap
	// session that simply doesn't return blocks
	n.Blocks, err = bserv.NewBlockService(n.Datastore, n.Exchange)
	if err != nil {
		return nil, debugerror.Wrap(err)
	}

	n.DAG = merkledag.NewDAGService(n.Blocks)
	n.Namesys = namesys.NewNameSystem(n.Routing)
	n.Pinning, err = pin.LoadPinner(n.Datastore, n.DAG)
	if err != nil {
		n.Pinning = pin.NewPinner(n.Datastore, n.DAG)
	}
	n.Resolver = &path.Resolver{DAG: n.DAG}

	success = true
	return n, nil
}
예제 #12
0
// New constructs a new Mount instance. ctx is a context to wait upon,
// the mountpoint is the directory that the mount was mounted at, and unmount
// in an UnmountFunc to perform the unmounting logic.
func New(ctx context.Context, mountpoint string) Mount {
	m := &mount{mpoint: mountpoint}
	m.ContextCloser = ctxc.NewContextCloser(ctx, m.persistentUnmount)
	return m
}