// NewDHT creates a new DHT object with the given peer as the 'local' host func NewDHT(ctx context.Context, h host.Host, dstore ds.Datastore) *IpfsDHT { dht := new(IpfsDHT) dht.datastore = dstore dht.self = h.ID() dht.peerstore = h.Peerstore() dht.host = h // register for network notifs. dht.host.Network().Notify((*netNotifiee)(dht)) dht.proc = goprocess.WithTeardown(func() error { // remove ourselves from network notifs. dht.host.Network().StopNotify((*netNotifiee)(dht)) return nil }) dht.ctx = ctx h.SetStreamHandler(ProtocolDHT, dht.handleNewStream) dht.providers = NewProviderManager(dht.ctx, dht.self) dht.proc.AddChild(dht.providers.proc) goprocessctx.CloseAfterContext(dht.proc, ctx) dht.routingTable = kb.NewRoutingTable(20, kb.ConvertPeerID(dht.self), time.Minute, dht.peerstore) dht.birth = time.Now() dht.Validator = make(record.Validator) dht.Validator["pk"] = record.PublicKeyValidator dht.Selector = make(record.Selector) dht.Selector["pk"] = record.PublicKeySelector return dht }
func bootstrapConnect(ctx context.Context, ph host.Host, peers []peer.PeerInfo) error { if len(peers) < 1 { return ErrNotEnoughBootstrapPeers } errs := make(chan error, len(peers)) var wg sync.WaitGroup for _, p := range peers { // performed asynchronously because when performed synchronously, if // one `Connect` call hangs, subsequent calls are more likely to // fail/abort due to an expiring context. // Also, performed asynchronously for dial speed. wg.Add(1) go func(p peer.PeerInfo) { defer wg.Done() defer log.EventBegin(ctx, "bootstrapDial", ph.ID(), p.ID).Done() log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID) ph.Peerstore().AddAddrs(p.ID, p.Addrs, peer.PermanentAddrTTL) if err := ph.Connect(ctx, p); err != nil { log.Event(ctx, "bootstrapDialFailed", p.ID) log.Debugf("failed to bootstrap with %v: %s", p.ID, err) errs <- err return } log.Event(ctx, "bootstrapDialSuccess", p.ID) log.Infof("bootstrapped with %v", p.ID) }(p) } wg.Wait() // our failure condition is when no connection attempt succeeded. // So drain the errs channel, counting the results. close(errs) count := 0 var err error for err = range errs { if err != nil { count++ } } if count == len(peers) { return fmt.Errorf("failed to bootstrap. %s", err) } return nil }
func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error { ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout) defer cancel() id := host.ID() // get bootstrap peers from config. retrieving them here makes // sure we remain observant of changes to client configuration. peers := cfg.BootstrapPeers() // determine how many bootstrap connections to open connected := host.Network().Peers() if len(connected) >= cfg.MinPeerThreshold { log.Event(ctx, "bootstrapSkip", id) log.Debugf("%s core bootstrap skipped -- connected to %d (> %d) nodes", id, len(connected), cfg.MinPeerThreshold) return nil } numToDial := cfg.MinPeerThreshold - len(connected) // filter out bootstrap nodes we are already connected to var notConnected []peer.PeerInfo for _, p := range peers { if host.Network().Connectedness(p.ID) != inet.Connected { notConnected = append(notConnected, p) } } // if connected to all bootstrap peer candidates, exit if len(notConnected) < 1 { log.Debugf("%s no more bootstrap peers to create %d connections", id, numToDial) return ErrNotEnoughBootstrapPeers } // connect to a random susbset of bootstrap candidates randSubset := randomSubsetOfPeers(notConnected, numToDial) defer log.EventBegin(ctx, "bootstrapStart", id).Done() log.Debugf("%s bootstrapping to %d nodes: %s", id, numToDial, randSubset) if err := bootstrapConnect(ctx, host, randSubset); err != nil { return err } return nil }