// startListening on the network addresses func startListening(ctx context.Context, host p2phost.Host, cfg *config.Config) error { listenAddrs, err := listenAddresses(cfg) if err != nil { return err } // make sure we error out if our config does not have addresses we can use log.Debugf("Config.Addresses.Swarm:%s", listenAddrs) filteredAddrs := addrutil.FilterUsableAddrs(listenAddrs) log.Debugf("Config.Addresses.Swarm:%s (filtered)", filteredAddrs) if len(filteredAddrs) < 1 { return fmt.Errorf("addresses in config not usable: %s", listenAddrs) } // Actually start listening: if err := host.Network().Listen(filteredAddrs...); err != nil { return err } // list out our addresses addrs, err := host.Network().InterfaceListenAddresses() if err != nil { return err } log.Infof("Swarm listening at: %s", addrs) return nil }
func NewIDService(h host.Host) *IDService { s := &IDService{ Host: h, currid: make(map[inet.Conn]chan struct{}), } h.SetStreamHandler(ID, s.RequestHandler) return s }
func NewRelayService(h host.Host, sh inet.StreamHandler) *RelayService { s := &RelayService{ host: h, handler: sh, } h.SetStreamHandler(ID, s.requestHandler) return s }
// NewDiagnostics instantiates a new diagnostics service running on the given network func NewDiagnostics(self peer.ID, h host.Host) *Diagnostics { d := &Diagnostics{ host: h, self: self, birth: time.Now(), diagMap: make(map[string]time.Time), } h.SetStreamHandler(ProtocolDiag, d.handleNewStream) return d }
// NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host func NewFromIpfsHost(host host.Host, r routing.IpfsRouting) BitSwapNetwork { bitswapNetwork := impl{ host: host, routing: r, } host.SetStreamHandler(ProtocolBitswap, bitswapNetwork.handleNewStream) host.Network().Notify((*netNotifiee)(&bitswapNetwork)) // TODO: StopNotify. return &bitswapNetwork }
func testHasProtocolVersions(t *testing.T, h host.Host, p peer.ID) { v, err := h.Peerstore().Get(p, "ProtocolVersion") if v == nil { t.Error("no protocol version") return } if v.(string) != identify.IpfsVersion { t.Error("protocol mismatch", err) } v, err = h.Peerstore().Get(p, "AgentVersion") if v.(string) != identify.ClientVersion { t.Error("agent version mismatch", err) } }
func getDialableListenAddrs(ph host.Host) ([]*net.TCPAddr, error) { var out []*net.TCPAddr for _, addr := range ph.Addrs() { na, err := manet.ToNetAddr(addr) if err != nil { continue } tcp, ok := na.(*net.TCPAddr) if ok { out = append(out, tcp) } } if len(out) == 0 { return nil, errors.New("failed to find good external addr from peerhost") } return out, nil }
func testKnowsAddrs(t *testing.T, h host.Host, p peer.ID, expected []ma.Multiaddr) { actual := h.Peerstore().Addrs(p) if len(actual) != len(expected) { t.Error("dont have the same addresses") } have := map[string]struct{}{} for _, addr := range actual { have[addr.String()] = struct{}{} } for _, addr := range expected { if _, found := have[addr.String()]; !found { t.Errorf("%s did not have addr for %s: %s", h.ID(), p, addr) // panic("ahhhhhhh") } } }
func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error { ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout) defer cancel() id := host.ID() // get bootstrap peers from config. retrieving them here makes // sure we remain observant of changes to client configuration. peers := cfg.BootstrapPeers() // determine how many bootstrap connections to open connected := host.Network().Peers() if len(connected) >= cfg.MinPeerThreshold { log.Event(ctx, "bootstrapSkip", id) log.Debugf("%s core bootstrap skipped -- connected to %d (> %d) nodes", id, len(connected), cfg.MinPeerThreshold) return nil } numToDial := cfg.MinPeerThreshold - len(connected) // filter out bootstrap nodes we are already connected to var notConnected []peer.PeerInfo for _, p := range peers { if host.Network().Connectedness(p.ID) != inet.Connected { notConnected = append(notConnected, p) } } // if connected to all bootstrap peer candidates, exit if len(notConnected) < 1 { log.Debugf("%s no more bootstrap peers to create %d connections", id, numToDial) return ErrNotEnoughBootstrapPeers } // connect to a random susbset of bootstrap candidates randSubset := randomSubsetOfPeers(notConnected, numToDial) defer log.EventBegin(ctx, "bootstrapStart", id).Done() log.Debugf("%s bootstrapping to %d nodes: %s", id, numToDial, randSubset) if err := bootstrapConnect(ctx, host, randSubset); err != nil { return err } return nil }
func NewMdnsService(peerhost host.Host, interval time.Duration) (Service, error) { // TODO: dont let mdns use logging... golog.SetOutput(ioutil.Discard) var ipaddrs []net.IP port := 4001 addrs, err := getDialableListenAddrs(peerhost) if err != nil { log.Warning(err) } else { port = addrs[0].Port for _, a := range addrs { ipaddrs = append(ipaddrs, a.IP) } } myid := peerhost.ID().Pretty() info := []string{myid} service, err := mdns.NewMDNSService(myid, ServiceTag, "", "", port, ipaddrs, info) if err != nil { return nil, err } // Create the mDNS server, defer shutdown server, err := mdns.NewServer(&mdns.Config{Zone: service}) if err != nil { return nil, err } s := &mdnsService{ server: server, service: service, host: peerhost, interval: interval, } go s.pollForEntries() return s, nil }
// NewDHT creates a new DHT object with the given peer as the 'local' host func NewDHT(ctx context.Context, h host.Host, dstore ds.ThreadSafeDatastore) *IpfsDHT { dht := new(IpfsDHT) dht.datastore = dstore dht.self = h.ID() dht.peerstore = h.Peerstore() dht.host = h // register for network notifs. dht.host.Network().Notify((*netNotifiee)(dht)) dht.proc = goprocess.WithTeardown(func() error { // remove ourselves from network notifs. dht.host.Network().StopNotify((*netNotifiee)(dht)) return nil }) dht.ctx = ctx h.SetStreamHandler(ProtocolDHT, dht.handleNewStream) dht.providers = NewProviderManager(dht.ctx, dht.self) dht.proc.AddChild(dht.providers.proc) goprocessctx.CloseAfterContext(dht.proc, ctx) dht.routingTable = kb.NewRoutingTable(20, kb.ConvertPeerID(dht.self), time.Minute, dht.peerstore) dht.birth = time.Now() dht.Validator = make(record.Validator) dht.Validator["pk"] = record.PublicKeyValidator return dht }
func bootstrapConnect(ctx context.Context, ph host.Host, peers []peer.PeerInfo) error { if len(peers) < 1 { return ErrNotEnoughBootstrapPeers } errs := make(chan error, len(peers)) var wg sync.WaitGroup for _, p := range peers { // performed asynchronously because when performed synchronously, if // one `Connect` call hangs, subsequent calls are more likely to // fail/abort due to an expiring context. // Also, performed asynchronously for dial speed. wg.Add(1) go func(p peer.PeerInfo) { defer wg.Done() defer log.EventBegin(ctx, "bootstrapDial", ph.ID(), p.ID).Done() log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID) ph.Peerstore().AddAddrs(p.ID, p.Addrs, peer.PermanentAddrTTL) if err := ph.Connect(ctx, p); err != nil { log.Event(ctx, "bootstrapDialFailed", p.ID) log.Debugf("failed to bootstrap with %v: %s", p.ID, err) errs <- err return } log.Event(ctx, "bootstrapDialSuccess", p.ID) log.Infof("bootstrapped with %v", p.ID) }(p) } wg.Wait() // our failure condition is when no connection attempt succeeded. // So drain the errs channel, counting the results. close(errs) count := 0 var err error for err = range errs { if err != nil { count++ } } if count == len(peers) { return fmt.Errorf("failed to bootstrap. %s", err) } return nil }
func NewPingService(h host.Host) *PingService { ps := &PingService{h} h.SetStreamHandler(ID, ps.PingHandler) return ps }