func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) { resp := Message{ Type: pmes.GetType(), ID: pmes.GetId(), Response: true, } defer func() { mes := swarm.NewMessage(p, resp.ToProtobuf()) dht.netChan.Outgoing <- mes }() level := pmes.GetValue()[0] u.DOut("handleFindPeer: searching for '%s'\n", peer.ID(pmes.GetKey()).Pretty()) closest := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey()))) if closest == nil { u.PErr("handleFindPeer: could not find anything.\n") return } if len(closest.Addresses) == 0 { u.PErr("handleFindPeer: no addresses for connected peer...\n") return } // If the found peer further away than this peer... if kb.Closer(dht.self.ID, closest.ID, u.Key(pmes.GetKey())) { return } u.DOut("handleFindPeer: sending back '%s'\n", closest.ID.Pretty()) resp.Peers = []*peer.Peer{closest} resp.Success = true }
// Handle getting ID from this peer and adding it into the map func (s *Swarm) handleNewConn(nconn net.Conn) { p := new(peer.Peer) conn := &Conn{ Peer: p, Addr: nil, Conn: nconn, } newConnChans(conn) sin, sout, err := ident.Handshake(s.local, p, conn.Incoming.MsgChan, conn.Outgoing.MsgChan) if err != nil { u.PErr("%v\n", err.Error()) conn.Close() return } // Get address to contact remote peer from addr := <-sin maddr, err := ma.NewMultiaddr(string(addr)) if err != nil { u.PErr("Got invalid address from peer.") s.Error(err) return } p.AddAddress(maddr) conn.secIn = sin conn.secOut = sout err = s.StartConn(conn) if err != nil { s.Error(err) } }
func (s *Swarm) routeMessages() { for { select { case mes, ok := <-s.toFilter: if !ok { return } wrapper, err := Unwrap(mes.Data) if err != nil { u.PErr("error in route messages: %s\n", err) } ch, ok := s.filterChans[PBWrapper_MessageType(wrapper.GetType())] if !ok { u.PErr("Received message with invalid type: %d\n", wrapper.GetType()) continue } mes.Data = wrapper.GetMessage() ch.Incoming <- mes case gchan := <-s.newFilters: nch, ok := s.filterChans[gchan.Type] if !ok { nch = NewChan(16) s.filterChans[gchan.Type] = nch go s.muxChan(nch, gchan.Type) } gchan.resp <- nch case <-s.haltroute: return } } }
// TODO: Im not certain on this implementation, we get a list of peers/providers // from someone what do we do with it? Connect to each of them? randomly pick // one to get the value from? Or just connect to one at a time until we get a // successful connection and request the value from it? func (dht *IpfsDHT) getFromPeerList(key u.Key, timeout time.Duration, peerlist []*PBDHTMessage_PBPeer, level int) ([]byte, error) { for _, pinfo := range peerlist { p, _ := dht.Find(peer.ID(pinfo.GetId())) if p == nil { maddr, err := ma.NewMultiaddr(pinfo.GetAddr()) if err != nil { u.PErr("getValue error: %s\n", err) continue } p, err = dht.network.GetConnection(peer.ID(pinfo.GetId()), maddr) if err != nil { u.PErr("getValue error: %s\n", err) continue } } pmes, err := dht.getValueSingle(p, key, timeout, level) if err != nil { u.DErr("getFromPeers error: %s\n", err) continue } dht.providers.AddProvider(key, p) // Make sure it was a successful get if pmes.GetSuccess() && pmes.Value != nil { return pmes.GetValue(), nil } } return nil, u.ErrNotFound }
func (bs *BitSwap) handleMessages() { for { select { case mes := <-bs.meschan.Incoming: pmes := new(PBMessage) err := proto.Unmarshal(mes.Data, pmes) if err != nil { u.PErr("%v\n", err) continue } if pmes.Blocks != nil { for _, blkData := range pmes.Blocks { blk, err := blocks.NewBlock(blkData) if err != nil { u.PErr("%v\n", err) continue } go bs.blockReceive(mes.Peer, blk) } } if pmes.Wantlist != nil { for _, want := range pmes.Wantlist { go bs.peerWantsBlock(mes.Peer, want) } } case <-bs.haltChan: return } } }
// peerWantsBlock will check if we have the block in question, // and then if we do, check the ledger for whether or not we should send it. func (bs *BitSwap) peerWantsBlock(p *peer.Peer, want string) { u.DOut("peer [%s] wants block [%s]\n", p.ID.Pretty(), u.Key(want).Pretty()) ledger := bs.getLedger(p) dsk := ds.NewKey(want) blk_i, err := bs.datastore.Get(dsk) if err != nil { if err == ds.ErrNotFound { ledger.Wants(u.Key(want)) } u.PErr("datastore get error: %v\n", err) return } blk, ok := blk_i.([]byte) if !ok { u.PErr("data conversion error.\n") return } if ledger.ShouldSend() { u.DOut("Sending block to peer.\n") bblk, err := blocks.NewBlock(blk) if err != nil { u.PErr("newBlock error: %v\n", err) return } bs.SendBlock(p, bblk) ledger.SentBytes(len(blk)) } else { u.DOut("Decided not to send block.") } }
// Read in all messages from swarm and handle them appropriately // NOTE: this function is just a quick sketch func (dht *IpfsDHT) handleMessages() { u.DOut("Begin message handling routine\n") errs := dht.network.GetErrChan() for { select { case mes, ok := <-dht.netChan.Incoming: if !ok { u.DOut("handleMessages closing, bad recv on incoming\n") return } pmes := new(PBDHTMessage) err := proto.Unmarshal(mes.Data, pmes) if err != nil { u.PErr("Failed to decode protobuf message: %s\n", err) continue } dht.Update(mes.Peer) // Note: not sure if this is the correct place for this if pmes.GetResponse() { dht.listener.Respond(pmes.GetId(), mes) continue } // u.DOut("[peer: %s]\nGot message type: '%s' [id = %x, from = %s]\n", dht.self.ID.Pretty(), PBDHTMessage_MessageType_name[int32(pmes.GetType())], pmes.GetId(), mes.Peer.ID.Pretty()) switch pmes.GetType() { case PBDHTMessage_GET_VALUE: go dht.handleGetValue(mes.Peer, pmes) case PBDHTMessage_PUT_VALUE: go dht.handlePutValue(mes.Peer, pmes) case PBDHTMessage_FIND_NODE: go dht.handleFindPeer(mes.Peer, pmes) case PBDHTMessage_ADD_PROVIDER: go dht.handleAddProvider(mes.Peer, pmes) case PBDHTMessage_GET_PROVIDERS: go dht.handleGetProviders(mes.Peer, pmes) case PBDHTMessage_PING: go dht.handlePing(mes.Peer, pmes) case PBDHTMessage_DIAGNOSTIC: go dht.handleDiagnostic(mes.Peer, pmes) default: u.PErr("Recieved invalid message type") } case err := <-errs: u.PErr("dht err: %s\n", err) case <-dht.shutdown: return } } }
func (dht *IpfsDHT) FindProvidersAsync(key u.Key, count int, timeout time.Duration) chan *peer.Peer { peerOut := make(chan *peer.Peer, count) go func() { ps := newPeerSet() provs := dht.providers.GetProviders(key) for _, p := range provs { count-- // NOTE: assuming that this list of peers is unique ps.Add(p) peerOut <- p if count <= 0 { return } } peers := dht.routingTables[0].NearestPeers(kb.ConvertKey(key), AlphaValue) for _, pp := range peers { go func() { pmes, err := dht.findProvidersSingle(pp, key, 0, timeout) if err != nil { u.PErr("%v\n", err) return } dht.addPeerListAsync(key, pmes.GetPeers(), ps, count, peerOut) }() } }() return peerOut }
func initConnections(cfg *config.Config, peers *peer.Map, route *dht.IpfsDHT) { for _, sp := range cfg.Peers { maddr, err := ma.NewMultiaddr(sp.Address) if err != nil { u.PErr("error: %v\n", err) continue } p, err := route.Connect(maddr) if err != nil { u.PErr("Bootstrapping error: %v\n", err) } (*peers)[p.Key()] = p } }
// getValueSingle simply performs the get value RPC with the given parameters func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duration, level int) (*PBDHTMessage, error) { pmes := Message{ Type: PBDHTMessage_GET_VALUE, Key: string(key), Value: []byte{byte(level)}, ID: swarm.GenerateMessageID(), } responseChan := dht.listener.Listen(pmes.ID, 1, time.Minute) mes := swarm.NewMessage(p, pmes.ToProtobuf()) t := time.Now() dht.netChan.Outgoing <- mes // Wait for either the response or a timeout timeup := time.After(timeout) select { case <-timeup: dht.listener.Unlisten(pmes.ID) return nil, u.ErrTimeout case resp, ok := <-responseChan: if !ok { u.PErr("response channel closed before timeout, please investigate.\n") return nil, u.ErrTimeout } roundtrip := time.Since(t) resp.Peer.SetLatency(roundtrip) pmesOut := new(PBDHTMessage) err := proto.Unmarshal(resp.Data, pmesOut) if err != nil { return nil, err } return pmesOut, nil } }
// GetBlock attempts to retrieve a particular block from peers, within timeout. func (bs *BitSwap) GetBlock(k u.Key, timeout time.Duration) ( *blocks.Block, error) { u.DOut("Bitswap GetBlock: '%s'\n", k.Pretty()) begin := time.Now() tleft := timeout - time.Now().Sub(begin) provs_ch := bs.routing.FindProvidersAsync(k, 20, timeout) valchan := make(chan []byte) after := time.After(tleft) // TODO: when the data is received, shut down this for loop ASAP go func() { for p := range provs_ch { go func(pr *peer.Peer) { blk, err := bs.getBlock(k, pr, tleft) if err != nil { u.PErr("getBlock returned: %v\n", err) return } select { case valchan <- blk: default: } }(p) } }() select { case blkdata := <-valchan: close(valchan) return blocks.NewBlock(blkdata) case <-after: return nil, u.ErrTimeout } }
func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *PBDHTMessage) { u.DOut("handleGetValue for key: %s\n", pmes.GetKey()) dskey := ds.NewKey(pmes.GetKey()) resp := &Message{ Response: true, ID: pmes.GetId(), Key: pmes.GetKey(), } iVal, err := dht.datastore.Get(dskey) if err == nil { u.DOut("handleGetValue success!\n") resp.Success = true resp.Value = iVal.([]byte) } else if err == ds.ErrNotFound { // Check if we know any providers for the requested value provs := dht.providers.GetProviders(u.Key(pmes.GetKey())) if len(provs) > 0 { u.DOut("handleGetValue returning %d provider[s]\n", len(provs)) resp.Peers = provs resp.Success = true } else { // No providers? // Find closest peer on given cluster to desired key and reply with that info level := 0 if len(pmes.GetValue()) < 1 { // TODO: maybe return an error? Defaulting isnt a good idea IMO u.PErr("handleGetValue: no routing level specified, assuming 0\n") } else { level = int(pmes.GetValue()[0]) // Using value field to specify cluster level } u.DOut("handleGetValue searching level %d clusters\n", level) closer := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey()))) if closer.ID.Equal(dht.self.ID) { u.DOut("Attempted to return self! this shouldnt happen...\n") resp.Peers = nil goto out } // If this peer is closer than the one from the table, return nil if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) { resp.Peers = nil u.DOut("handleGetValue could not find a closer node than myself.\n") } else { u.DOut("handleGetValue returning a closer peer: '%s'\n", closer.ID.Pretty()) resp.Peers = []*peer.Peer{closer} } } } else { //temp: what other errors can a datastore return? panic(err) } out: mes := swarm.NewMessage(p, resp.ToProtobuf()) dht.netChan.Outgoing <- mes }
// NewBlockService creates a BlockService with given datastore instance. func NewBlockService(d ds.Datastore, rem *bitswap.BitSwap) (*BlockService, error) { if d == nil { return nil, fmt.Errorf("BlockService requires valid datastore") } if rem == nil { u.PErr("Caution: blockservice running in local (offline) mode.\n") } return &BlockService{Datastore: d, Remote: rem}, nil }
func refCmd(c *commander.Command, inp []string) error { if len(inp) < 1 { u.POut(c.Long) return nil } n, err := localNode(false) if err != nil { return err } recursive := c.Flag.Lookup("r").Value.Get().(bool) unique := c.Flag.Lookup("u").Value.Get().(bool) refsSeen := map[u.Key]bool{} printRef := func(h mh.Multihash) { if unique { _, found := refsSeen[u.Key(h)] if found { return } refsSeen[u.Key(h)] = true } u.POut("%s\n", h.B58String()) } var printRefs func(nd *mdag.Node, recursive bool) printRefs = func(nd *mdag.Node, recursive bool) { for _, link := range nd.Links { printRef(link.Hash) if recursive { nd, err := n.DAG.Get(u.Key(link.Hash)) if err != nil { u.PErr("error: cannot retrieve %s (%s)\n", link.Hash.B58String(), err) return } printRefs(nd, recursive) } } } for _, fn := range inp { nd, err := n.Resolver.ResolvePath(fn) if err != nil { return err } printRefs(nd, recursive) } return nil }
// Cleaner looking helper function to make a new message struct func NewMessage(p *peer.Peer, data proto.Message) *Message { bytes, err := proto.Marshal(data) if err != nil { u.PErr("%v\n", err.Error()) return nil } return &Message{ Peer: p, Data: bytes, } }
func (dht *IpfsDHT) getValueOrPeers(p *peer.Peer, key u.Key, timeout time.Duration, level int) ([]byte, []*peer.Peer, error) { pmes, err := dht.getValueSingle(p, key, timeout, level) if err != nil { return nil, nil, err } if pmes.GetSuccess() { if pmes.Value == nil { // We were given provider[s] val, err := dht.getFromPeerList(key, timeout, pmes.GetPeers(), level) if err != nil { return nil, nil, err } return val, nil, nil } // Success! We were given the value return pmes.GetValue(), nil, nil } // We were given a closer node var peers []*peer.Peer for _, pb := range pmes.GetPeers() { if peer.ID(pb.GetId()).Equal(dht.self.ID) { continue } addr, err := ma.NewMultiaddr(pb.GetAddr()) if err != nil { u.PErr("%v\n", err.Error()) continue } np, err := dht.network.GetConnection(peer.ID(pb.GetId()), addr) if err != nil { u.PErr("%v\n", err.Error()) continue } peers = append(peers, np) } return nil, peers, nil }
// FindProviders searches for peers who can provide the value for given key. func (dht *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer, error) { ll := startNewRPC("FindProviders") defer func() { ll.EndLog() ll.Print() }() u.DOut("Find providers for: '%s'\n", key) p := dht.routingTables[0].NearestPeer(kb.ConvertKey(key)) if p == nil { return nil, kb.ErrLookupFailure } for level := 0; level < len(dht.routingTables); { pmes, err := dht.findProvidersSingle(p, key, level, timeout) if err != nil { return nil, err } if pmes.GetSuccess() { u.DOut("Got providers back from findProviders call!\n") provs := dht.addPeerList(key, pmes.GetPeers()) ll.Success = true return provs, nil } u.DOut("Didnt get providers, just closer peers.\n") closer := pmes.GetPeers() if len(closer) == 0 { level++ continue } if peer.ID(closer[0].GetId()).Equal(dht.self.ID) { u.DOut("Got myself back as a closer peer.") return nil, u.ErrNotFound } maddr, err := ma.NewMultiaddr(closer[0].GetAddr()) if err != nil { // ??? Move up route level??? panic("not yet implemented") } np, err := dht.network.GetConnection(peer.ID(closer[0].GetId()), maddr) if err != nil { u.PErr("[%s] Failed to connect to: %s\n", dht.self.ID.Pretty(), closer[0].GetAddr()) level++ continue } p = np } return nil, u.ErrNotFound }
func (dl *DaemonListener) Listen() { fmt.Println("listen.") for { conn, err := dl.list.Accept() fmt.Println("Loop!") if err != nil { if !dl.closed { u.PErr("DaemonListener Accept: %v\n", err) } return } go dl.handleConnection(conn) } }
//TODO: this function could also be done asynchronously func (dht *IpfsDHT) addPeerListAsync(k u.Key, peers []*PBDHTMessage_PBPeer, ps *peerSet, count int, out chan *peer.Peer) { for _, pbp := range peers { if peer.ID(pbp.GetId()).Equal(dht.self.ID) { continue } maddr, err := ma.NewMultiaddr(pbp.GetAddr()) if err != nil { u.PErr("%v\n", err) continue } p, err := dht.network.GetConnection(peer.ID(pbp.GetId()), maddr) if err != nil { u.PErr("%v\n", err) continue } dht.providers.AddProvider(k, p) if ps.AddIfSmallerThan(p, count) { out <- p } else if ps.Size() >= count { return } } }
func (s *Swarm) muxChan(ch *Chan, typ PBWrapper_MessageType) { for { select { case <-ch.Close: return case mes := <-ch.Outgoing: data, err := Wrap(mes.Data, typ) if err != nil { u.PErr("muxChan error: %s\n", err) continue } mes.Data = data s.Chan.Outgoing <- mes } } }
func (bs *BitSwap) blockReceive(p *peer.Peer, blk *blocks.Block) { u.DOut("blockReceive: %s\n", blk.Key().Pretty()) err := bs.datastore.Put(ds.NewKey(string(blk.Key())), blk.Data) if err != nil { u.PErr("blockReceive error: %v\n", err) return } mes := &swarm.Message{ Peer: p, Data: blk.Data, } bs.listener.Respond(string(blk.Key()), mes) ledger := bs.getLedger(p) ledger.ReceivedBytes(len(blk.Data)) }
// Open listeners for each network the swarm should listen on func (s *Swarm) Listen() error { var ret_err *SwarmListenErr for i, addr := range s.local.Addresses { err := s.connListen(addr) if err != nil { if ret_err == nil { ret_err = new(SwarmListenErr) ret_err.Errors = make([]error, len(s.local.Addresses)) } ret_err.Errors[i] = err u.PErr("Failed to listen on: %s [%s]", addr, err) } } if ret_err == nil { return nil } return ret_err }
func (bs *BitSwap) getBlock(k u.Key, p *peer.Peer, timeout time.Duration) ([]byte, error) { u.DOut("[%s] getBlock '%s' from [%s]\n", bs.peer.ID.Pretty(), k.Pretty(), p.ID.Pretty()) message := newMessage() message.AppendWanted(k) after := time.After(timeout) resp := bs.listener.Listen(string(k), 1, timeout) bs.meschan.Outgoing <- message.ToSwarm(p) select { case resp_mes := <-resp: return resp_mes.Data, nil case <-after: u.PErr("getBlock for '%s' timed out.\n", k.Pretty()) return nil, u.ErrTimeout } }
func (ss *SizeSplitter) Split(r io.Reader) chan []byte { out := make(chan []byte) go func() { defer close(out) for { chunk := make([]byte, ss.Size) nread, err := r.Read(chunk) if err != nil { if err == io.EOF { return } u.PErr("block split error: %v\n", err) return } if nread < ss.Size { chunk = chunk[:nread] } out <- chunk } }() return out }
// TODO: Could be done async func (dht *IpfsDHT) addPeerList(key u.Key, peers []*PBDHTMessage_PBPeer) []*peer.Peer { var provArr []*peer.Peer for _, prov := range peers { // Dont add outselves to the list if peer.ID(prov.GetId()).Equal(dht.self.ID) { continue } // Dont add someone who is already on the list p := dht.network.Find(u.Key(prov.GetId())) if p == nil { u.DOut("given provider %s was not in our network already.\n", peer.ID(prov.GetId()).Pretty()) var err error p, err = dht.peerFromInfo(prov) if err != nil { u.PErr("error connecting to new peer: %s\n", err) continue } } dht.providers.AddProvider(key, p) provArr = append(provArr, p) } return provArr }