// NewDHT creates a new DHT object with the given peer as the 'local' host func NewDHT(ctx context.Context, h host.Host, dstore ds.ThreadSafeDatastore) *IpfsDHT { dht := new(IpfsDHT) dht.datastore = dstore dht.self = h.ID() dht.peerstore = h.Peerstore() dht.host = h // register for network notifs. dht.host.Network().Notify((*netNotifiee)(dht)) dht.proc = goprocess.WithTeardown(func() error { // remove ourselves from network notifs. dht.host.Network().StopNotify((*netNotifiee)(dht)) return nil }) dht.ctx = ctx h.SetStreamHandler(ProtocolDHT, dht.handleNewStream) dht.providers = NewProviderManager(dht.ctx, dht.self) dht.proc.AddChild(dht.providers.proc) goprocessctx.CloseAfterContext(dht.proc, ctx) dht.routingTable = kb.NewRoutingTable(20, kb.ConvertPeerID(dht.self), time.Minute, dht.peerstore) dht.birth = time.Now() dht.Validator = make(record.Validator) dht.Validator["pk"] = record.PublicKeyValidator return dht }
// NewDHT creates a new DHT object with the given peer as the 'local' host func NewDHT(ctx context.Context, h host.Host, dstore ds.ThreadSafeDatastore) *IpfsDHT { dht := new(IpfsDHT) dht.datastore = dstore dht.self = h.ID() dht.peerstore = h.Peerstore() dht.host = h // register for network notifs. dht.host.Network().Notify((*netNotifiee)(dht)) dht.ContextGroup = ctxgroup.WithContextAndTeardown(ctx, func() error { // remove ourselves from network notifs. dht.host.Network().StopNotify((*netNotifiee)(dht)) return nil }) h.SetStreamHandler(ProtocolDHT, dht.handleNewStream) dht.providers = NewProviderManager(dht.Context(), dht.self) dht.AddChild(dht.providers) dht.routingTable = kb.NewRoutingTable(20, kb.ConvertPeerID(dht.self), time.Minute, dht.peerstore) dht.birth = time.Now() dht.Validator = make(record.Validator) dht.Validator["pk"] = record.PublicKeyValidator if doPinging { dht.Children().Add(1) go dht.PingRoutine(time.Second * 10) } return dht }
func bootstrapConnect(ctx context.Context, ph host.Host, peers []peer.PeerInfo) error { if len(peers) < 1 { return ErrNotEnoughBootstrapPeers } errs := make(chan error, len(peers)) var wg sync.WaitGroup for _, p := range peers { // performed asynchronously because when performed synchronously, if // one `Connect` call hangs, subsequent calls are more likely to // fail/abort due to an expiring context. // Also, performed asynchronously for dial speed. wg.Add(1) go func(p peer.PeerInfo) { defer wg.Done() defer log.EventBegin(ctx, "bootstrapDial", ph.ID(), p.ID).Done() log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID) ph.Peerstore().AddAddrs(p.ID, p.Addrs, peer.PermanentAddrTTL) if err := ph.Connect(ctx, p); err != nil { log.Event(ctx, "bootstrapDialFailed", p.ID) log.Debugf("failed to bootstrap with %v: %s", p.ID, err) errs <- err return } log.Event(ctx, "bootstrapDialSuccess", p.ID) log.Infof("bootstrapped with %v", p.ID) }(p) } wg.Wait() // our failure condition is when no connection attempt succeeded. // So drain the errs channel, counting the results. close(errs) count := 0 var err error for err = range errs { if err != nil { count++ } } if count == len(peers) { return fmt.Errorf("failed to bootstrap. %s", err) } return nil }
func testKnowsAddrs(t *testing.T, h host.Host, p peer.ID, expected []ma.Multiaddr) { actual := h.Peerstore().Addrs(p) if len(actual) != len(expected) { t.Error("dont have the same addresses") } have := map[string]struct{}{} for _, addr := range actual { have[addr.String()] = struct{}{} } for _, addr := range expected { if _, found := have[addr.String()]; !found { t.Errorf("%s did not have addr for %s: %s", h.ID(), p, addr) // panic("ahhhhhhh") } } }
func NewMdnsService(peerhost host.Host, interval time.Duration) (Service, error) { // TODO: dont let mdns use logging... golog.SetOutput(ioutil.Discard) var ipaddrs []net.IP port := 4001 addrs, err := getDialableListenAddrs(peerhost) if err != nil { log.Warning(err) } else { port = addrs[0].Port for _, a := range addrs { ipaddrs = append(ipaddrs, a.IP) } } myid := peerhost.ID().Pretty() info := []string{myid} service, err := mdns.NewMDNSService(myid, ServiceTag, "", "", port, ipaddrs, info) if err != nil { return nil, err } // Create the mDNS server, defer shutdown server, err := mdns.NewServer(&mdns.Config{Zone: service}) if err != nil { return nil, err } s := &mdnsService{ server: server, service: service, host: peerhost, interval: interval, } go s.pollForEntries() return s, nil }
func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error { ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout) defer cancel() id := host.ID() // get bootstrap peers from config. retrieving them here makes // sure we remain observant of changes to client configuration. peers := cfg.BootstrapPeers() // determine how many bootstrap connections to open connected := host.Network().Peers() if len(connected) >= cfg.MinPeerThreshold { log.Event(ctx, "bootstrapSkip", id) log.Debugf("%s core bootstrap skipped -- connected to %d (> %d) nodes", id, len(connected), cfg.MinPeerThreshold) return nil } numToDial := cfg.MinPeerThreshold - len(connected) // filter out bootstrap nodes we are already connected to var notConnected []peer.PeerInfo for _, p := range peers { if host.Network().Connectedness(p.ID) != inet.Connected { notConnected = append(notConnected, p) } } // if connected to all bootstrap peer candidates, exit if len(notConnected) < 1 { log.Debugf("%s no more bootstrap peers to create %d connections", id, numToDial) return ErrNotEnoughBootstrapPeers } // connect to a random susbset of bootstrap candidates randSubset := randomSubsetOfPeers(notConnected, numToDial) defer log.EventBegin(ctx, "bootstrapStart", id).Done() log.Debugf("%s bootstrapping to %d nodes: %s", id, numToDial, randSubset) if err := bootstrapConnect(ctx, host, randSubset); err != nil { return err } return nil }