Ejemplo n.º 1
0
// NewDHT creates a new DHT object with the given peer as the 'local' host
func NewDHT(ctx context.Context, h host.Host, dstore ds.Datastore) *IpfsDHT {
	dht := new(IpfsDHT)
	dht.datastore = dstore
	dht.self = h.ID()
	dht.peerstore = h.Peerstore()
	dht.host = h

	// register for network notifs.
	dht.host.Network().Notify((*netNotifiee)(dht))

	dht.proc = goprocess.WithTeardown(func() error {
		// remove ourselves from network notifs.
		dht.host.Network().StopNotify((*netNotifiee)(dht))
		return nil
	})

	dht.ctx = ctx

	h.SetStreamHandler(ProtocolDHT, dht.handleNewStream)
	dht.providers = NewProviderManager(dht.ctx, dht.self)
	dht.proc.AddChild(dht.providers.proc)
	goprocessctx.CloseAfterContext(dht.proc, ctx)

	dht.routingTable = kb.NewRoutingTable(20, kb.ConvertPeerID(dht.self), time.Minute, dht.peerstore)
	dht.birth = time.Now()

	dht.Validator = make(record.Validator)
	dht.Validator["pk"] = record.PublicKeyValidator

	dht.Selector = make(record.Selector)
	dht.Selector["pk"] = record.PublicKeySelector

	return dht
}
Ejemplo n.º 2
0
func (r *dhtQueryRunner) Run(ctx context.Context, peers []peer.ID) (*dhtQueryResult, error) {
	r.log = log
	r.runCtx = ctx

	if len(peers) == 0 {
		log.Warning("Running query with no peers!")
		return nil, nil
	}

	// setup concurrency rate limiting
	for i := 0; i < r.query.concurrency; i++ {
		r.rateLimit <- struct{}{}
	}

	// add all the peers we got first.
	for _, p := range peers {
		r.addPeerToQuery(p)
	}

	// go do this thing.
	// do it as a child proc to make sure Run exits
	// ONLY AFTER spawn workers has exited.
	r.proc.Go(r.spawnWorkers)

	// so workers are working.

	// wait until they're done.
	err := routing.ErrNotFound

	// now, if the context finishes, close the proc.
	// we have to do it here because the logic before is setup, which
	// should run without closing the proc.
	ctxproc.CloseAfterContext(r.proc, ctx)

	select {
	case <-r.peersRemaining.Done():
		r.proc.Close()
		r.RLock()
		defer r.RUnlock()

		err = routing.ErrNotFound

		// if every query to every peer failed, something must be very wrong.
		if len(r.errs) > 0 && len(r.errs) == r.peersSeen.Size() {
			log.Debugf("query errs: %s", r.errs)
			err = r.errs[0]
		}

	case <-r.proc.Closed():
		r.RLock()
		defer r.RUnlock()
		err = context.DeadlineExceeded
	}

	if r.result != nil && r.result.success {
		return r.result, nil
	}

	return nil, err
}
Ejemplo n.º 3
0
// New initializes a BitSwap instance that communicates over the provided
// BitSwapNetwork. This function registers the returned instance as the network
// delegate.
// Runs until context is cancelled.
func New(parent context.Context, p peer.ID, network bsnet.BitSwapNetwork,
	bstore blockstore.Blockstore, nice bool) exchange.Interface {

	// important to use provided parent context (since it may include important
	// loggable data). It's probably not a good idea to allow bitswap to be
	// coupled to the concerns of the IPFS daemon in this way.
	//
	// FIXME(btc) Now that bitswap manages itself using a process, it probably
	// shouldn't accept a context anymore. Clients should probably use Close()
	// exclusively. We should probably find another way to share logging data
	ctx, cancelFunc := context.WithCancel(parent)

	notif := notifications.New()
	px := process.WithTeardown(func() error {
		notif.Shutdown()
		return nil
	})

	bs := &Bitswap{
		self:          p,
		blockstore:    bstore,
		notifications: notif,
		engine:        decision.NewEngine(ctx, bstore), // TODO close the engine with Close() method
		network:       network,
		findKeys:      make(chan *blockRequest, sizeBatchRequestChan),
		process:       px,
		newBlocks:     make(chan *blocks.Block, HasBlockBufferSize),
		provideKeys:   make(chan key.Key, provideKeysBufferSize),
		wm:            NewWantManager(ctx, network),
	}
	go bs.wm.Run()
	network.SetDelegate(bs)

	// Start up bitswaps async worker routines
	bs.startWorkers(px, ctx)

	// bind the context and process.
	// do it over here to avoid closing before all setup is done.
	go func() {
		<-px.Closing() // process closes first
		cancelFunc()
	}()
	procctx.CloseAfterContext(px, ctx) // parent cancelled first

	return bs
}
Ejemplo n.º 4
0
func (s *Swarm) dialAddrs(ctx context.Context, d *conn.Dialer, p peer.ID, remoteAddrs []ma.Multiaddr) (conn.Conn, error) {

	// try to connect to one of the peer's known addresses.
	// we dial concurrently to each of the addresses, which:
	// * makes the process faster overall
	// * attempts to get the fastest connection available.
	// * mitigates the waste of trying bad addresses
	log.Debugf("%s swarm dialing %s %s", s.local, p, remoteAddrs)

	ctx, cancel := context.WithCancel(ctx)
	defer cancel() // cancel work when we exit func

	foundConn := make(chan struct{})
	conns := make(chan conn.Conn, len(remoteAddrs))
	errs := make(chan error, len(remoteAddrs))

	// dialSingleAddr is used in the rate-limited async thing below.
	dialSingleAddr := func(addr ma.Multiaddr) {
		connC, err := s.dialAddr(ctx, d, p, addr)

		// check parent still wants our results
		select {
		case <-foundConn:
			if connC != nil {
				connC.Close()
			}
			return
		default:
		}

		if err != nil {
			errs <- err
		} else if connC == nil {
			errs <- fmt.Errorf("failed to dial %s %s", p, addr)
		} else {
			conns <- connC
		}
	}

	// this whole thing is in a goroutine so we can use foundConn
	// to end early.
	go func() {
		// rate limiting just in case. at most 10 addrs at once.
		limiter := ratelimit.NewRateLimiter(process.Background(), 10)
		limiter.Go(func(worker process.Process) {
			// permute addrs so we try different sets first each time.
			for _, i := range rand.Perm(len(remoteAddrs)) {
				select {
				case <-foundConn: // if one of them succeeded already
					break
				case <-worker.Closing(): // our context was cancelled
					break
				default:
				}

				workerAddr := remoteAddrs[i] // shadow variable to avoid race
				limiter.LimitedGo(func(worker process.Process) {
					dialSingleAddr(workerAddr)
				})
			}
		})

		processctx.CloseAfterContext(limiter, ctx)
	}()

	// wair fot the results.
	exitErr := fmt.Errorf("failed to dial %s", p)
	for i := 0; i < len(remoteAddrs); i++ {
		select {
		case exitErr = <-errs: //
			log.Debug("dial error: ", exitErr)
		case connC := <-conns:
			// take the first + return asap
			close(foundConn)
			return connC, nil
		}
	}
	return nil, exitErr
}