func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) { resp := Message{ Type: pmes.GetType(), ID: pmes.GetId(), Response: true, } defer func() { mes := swarm.NewMessage(p, resp.ToProtobuf()) dht.netChan.Outgoing <- mes }() level := pmes.GetValue()[0] u.DOut("handleFindPeer: searching for '%s'\n", peer.ID(pmes.GetKey()).Pretty()) closest := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey()))) if closest == nil { u.PErr("handleFindPeer: could not find anything.\n") return } if len(closest.Addresses) == 0 { u.PErr("handleFindPeer: no addresses for connected peer...\n") return } // If the found peer further away than this peer... if kb.Closer(dht.self.ID, closest.ID, u.Key(pmes.GetKey())) { return } u.DOut("handleFindPeer: sending back '%s'\n", closest.ID.Pretty()) resp.Peers = []*peer.Peer{closest} resp.Success = true }
// peerWantsBlock will check if we have the block in question, // and then if we do, check the ledger for whether or not we should send it. func (bs *BitSwap) peerWantsBlock(p *peer.Peer, want string) { u.DOut("peer [%s] wants block [%s]\n", p.ID.Pretty(), u.Key(want).Pretty()) ledger := bs.getLedger(p) dsk := ds.NewKey(want) blk_i, err := bs.datastore.Get(dsk) if err != nil { if err == ds.ErrNotFound { ledger.Wants(u.Key(want)) } u.PErr("datastore get error: %v\n", err) return } blk, ok := blk_i.([]byte) if !ok { u.PErr("data conversion error.\n") return } if ledger.ShouldSend() { u.DOut("Sending block to peer.\n") bblk, err := blocks.NewBlock(blk) if err != nil { u.PErr("newBlock error: %v\n", err) return } bs.SendBlock(p, bblk) ledger.SentBytes(len(blk)) } else { u.DOut("Decided not to send block.") } }
// GetBlock retrieves a particular block from the service, // Getting it from the datastore using the key (hash). func (s *BlockService) GetBlock(k u.Key) (*blocks.Block, error) { u.DOut("BlockService GetBlock: '%s'\n", k.Pretty()) dsk := ds.NewKey(string(k)) datai, err := s.Datastore.Get(dsk) if err == nil { u.DOut("Blockservice: Got data in datastore.\n") bdata, ok := datai.([]byte) if !ok { return nil, fmt.Errorf("data associated with %s is not a []byte", k) } return &blocks.Block{ Multihash: mh.Multihash(k), Data: bdata, }, nil } else if err == ds.ErrNotFound && s.Remote != nil { u.DOut("Blockservice: Searching bitswap.\n") blk, err := s.Remote.GetBlock(k, time.Second*5) if err != nil { return nil, err } return blk, nil } else { u.DOut("Blockservice GetBlock: Not found.\n") return nil, u.ErrNotFound } }
// ResolvePath fetches the node for given path. It uses the first // path component as a hash (key) of the first node, then resolves // all other components walking the links, with ResolveLinks. func (s *Resolver) ResolvePath(fpath string) (*merkledag.Node, error) { u.DOut("Resolve: '%s'\n", fpath) fpath = path.Clean(fpath) parts := strings.Split(fpath, "/") // skip over empty first elem if len(parts[0]) == 0 { parts = parts[1:] } // if nothing, bail. if len(parts) == 0 { return nil, fmt.Errorf("ipfs path must contain at least one component") } // first element in the path is a b58 hash (for now) h, err := mh.FromB58String(parts[0]) if err != nil { return nil, err } u.DOut("Resolve dag get.\n") nd, err := s.DAG.Get(u.Key(h)) if err != nil { return nil, err } return s.ResolveLinks(nd, parts[1:]) }
// Ping a peer, log the time it took func (dht *IpfsDHT) Ping(p *peer.Peer, timeout time.Duration) error { // Thoughts: maybe this should accept an ID and do a peer lookup? u.DOut("Enter Ping.\n") pmes := Message{ID: swarm.GenerateMessageID(), Type: PBDHTMessage_PING} mes := swarm.NewMessage(p, pmes.ToProtobuf()) before := time.Now() responseChan := dht.listener.Listen(pmes.ID, 1, time.Minute) dht.netChan.Outgoing <- mes tout := time.After(timeout) select { case <-responseChan: roundtrip := time.Since(before) p.SetLatency(roundtrip) u.DOut("Ping took %s.\n", roundtrip.String()) return nil case <-tout: // Timed out, think about removing peer from network u.DOut("[%s] Ping peer [%s] timed out.", dht.self.ID.Pretty(), p.ID.Pretty()) dht.listener.Unlisten(pmes.ID) return u.ErrTimeout } }
func (l *logDhtRPC) Print() { b, err := json.Marshal(l) if err != nil { u.DOut(err.Error()) } else { u.DOut(string(b)) } }
// Read in all messages from swarm and handle them appropriately // NOTE: this function is just a quick sketch func (dht *IpfsDHT) handleMessages() { u.DOut("Begin message handling routine\n") errs := dht.network.GetErrChan() for { select { case mes, ok := <-dht.netChan.Incoming: if !ok { u.DOut("handleMessages closing, bad recv on incoming\n") return } pmes := new(PBDHTMessage) err := proto.Unmarshal(mes.Data, pmes) if err != nil { u.PErr("Failed to decode protobuf message: %s\n", err) continue } dht.Update(mes.Peer) // Note: not sure if this is the correct place for this if pmes.GetResponse() { dht.listener.Respond(pmes.GetId(), mes) continue } // u.DOut("[peer: %s]\nGot message type: '%s' [id = %x, from = %s]\n", dht.self.ID.Pretty(), PBDHTMessage_MessageType_name[int32(pmes.GetType())], pmes.GetId(), mes.Peer.ID.Pretty()) switch pmes.GetType() { case PBDHTMessage_GET_VALUE: go dht.handleGetValue(mes.Peer, pmes) case PBDHTMessage_PUT_VALUE: go dht.handlePutValue(mes.Peer, pmes) case PBDHTMessage_FIND_NODE: go dht.handleFindPeer(mes.Peer, pmes) case PBDHTMessage_ADD_PROVIDER: go dht.handleAddProvider(mes.Peer, pmes) case PBDHTMessage_GET_PROVIDERS: go dht.handleGetProviders(mes.Peer, pmes) case PBDHTMessage_PING: go dht.handlePing(mes.Peer, pmes) case PBDHTMessage_DIAGNOSTIC: go dht.handleDiagnostic(mes.Peer, pmes) default: u.PErr("Recieved invalid message type") } case err := <-errs: u.PErr("dht err: %s\n", err) case <-dht.shutdown: return } } }
// FindProviders searches for peers who can provide the value for given key. func (dht *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer, error) { ll := startNewRPC("FindProviders") defer func() { ll.EndLog() ll.Print() }() u.DOut("Find providers for: '%s'\n", key) p := dht.routingTables[0].NearestPeer(kb.ConvertKey(key)) if p == nil { return nil, kb.ErrLookupFailure } for level := 0; level < len(dht.routingTables); { pmes, err := dht.findProvidersSingle(p, key, level, timeout) if err != nil { return nil, err } if pmes.GetSuccess() { u.DOut("Got providers back from findProviders call!\n") provs := dht.addPeerList(key, pmes.GetPeers()) ll.Success = true return provs, nil } u.DOut("Didnt get providers, just closer peers.\n") closer := pmes.GetPeers() if len(closer) == 0 { level++ continue } if peer.ID(closer[0].GetId()).Equal(dht.self.ID) { u.DOut("Got myself back as a closer peer.") return nil, u.ErrNotFound } maddr, err := ma.NewMultiaddr(closer[0].GetAddr()) if err != nil { // ??? Move up route level??? panic("not yet implemented") } np, err := dht.network.GetConnection(peer.ID(closer[0].GetId()), maddr) if err != nil { u.PErr("[%s] Failed to connect to: %s\n", dht.self.ID.Pretty(), closer[0].GetAddr()) level++ continue } p = np } return nil, u.ErrNotFound }
func (dht *IpfsDHT) getDiagnostic(timeout time.Duration) ([]*diagInfo, error) { u.DOut("Begin Diagnostic") //Send to N closest peers targets := dht.routingTables[0].NearestPeers(kb.ConvertPeerID(dht.self.ID), 10) // TODO: Add timeout to this struct so nodes know when to return pmes := Message{ Type: PBDHTMessage_DIAGNOSTIC, ID: swarm.GenerateMessageID(), } listenChan := dht.listener.Listen(pmes.ID, len(targets), time.Minute*2) pbmes := pmes.ToProtobuf() for _, p := range targets { mes := swarm.NewMessage(p, pbmes) dht.netChan.Outgoing <- mes } var out []*diagInfo after := time.After(timeout) for count := len(targets); count > 0; { select { case <-after: u.DOut("Diagnostic request timed out.") return out, u.ErrTimeout case resp := <-listenChan: pmesOut := new(PBDHTMessage) err := proto.Unmarshal(resp.Data, pmesOut) if err != nil { // NOTE: here and elsewhere, need to audit error handling, // some errors should be continued on from return out, err } dec := json.NewDecoder(bytes.NewBuffer(pmesOut.GetValue())) for { di := new(diagInfo) err := dec.Decode(di) if err != nil { break } out = append(out, di) } } } return nil, nil }
func (dl *DaemonListener) handleConnection(conn net.Conn) { defer conn.Close() dec := json.NewDecoder(conn) var command Command err := dec.Decode(&command) if err != nil { fmt.Fprintln(conn, err) return } u.DOut("Got command: %v\n", command) switch command.Command { case "add": err = commands.Add(dl.node, command.Args, command.Opts, conn) case "cat": err = commands.Cat(dl.node, command.Args, command.Opts, conn) case "ls": err = commands.Ls(dl.node, command.Args, command.Opts, conn) case "pin": err = commands.Pin(dl.node, command.Args, command.Opts, conn) case "peers": err = commands.Peers(dl.node, command.Args, command.Opts, conn) default: err = fmt.Errorf("Invalid Command: '%s'", command.Command) } if err != nil { fmt.Fprintln(conn, err) } }
// GetBlock attempts to retrieve a particular block from peers, within timeout. func (bs *BitSwap) GetBlock(k u.Key, timeout time.Duration) ( *blocks.Block, error) { u.DOut("Bitswap GetBlock: '%s'\n", k.Pretty()) begin := time.Now() tleft := timeout - time.Now().Sub(begin) provs_ch := bs.routing.FindProvidersAsync(k, 20, timeout) valchan := make(chan []byte) after := time.After(tleft) // TODO: when the data is received, shut down this for loop ASAP go func() { for p := range provs_ch { go func(pr *peer.Peer) { blk, err := bs.getBlock(k, pr, tleft) if err != nil { u.PErr("getBlock returned: %v\n", err) return } select { case valchan <- blk: default: } }(p) } }() select { case blkdata := <-valchan: close(valchan) return blocks.NewBlock(blkdata) case <-after: return nil, u.ErrTimeout } }
func (dht *IpfsDHT) findProvidersSingle(p *peer.Peer, key u.Key, level int, timeout time.Duration) (*PBDHTMessage, error) { pmes := Message{ Type: PBDHTMessage_GET_PROVIDERS, Key: string(key), ID: swarm.GenerateMessageID(), Value: []byte{byte(level)}, } mes := swarm.NewMessage(p, pmes.ToProtobuf()) listenChan := dht.listener.Listen(pmes.ID, 1, time.Minute) dht.netChan.Outgoing <- mes after := time.After(timeout) select { case <-after: dht.listener.Unlisten(pmes.ID) return nil, u.ErrTimeout case resp := <-listenChan: u.DOut("FindProviders: got response.\n") pmesOut := new(PBDHTMessage) err := proto.Unmarshal(resp.Data, pmesOut) if err != nil { return nil, err } return pmesOut, nil } }
// Attr returns the attributes of a given node. func (s *Node) Attr() fuse.Attr { u.DOut("Node attr.\n") if len(s.Nd.Links) > 0 { return fuse.Attr{Mode: os.ModeDir | 0555} } size, _ := s.Nd.Size() return fuse.Attr{Mode: 0444, Size: uint64(size)} }
func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *PBDHTMessage) { u.DOut("handleGetValue for key: %s\n", pmes.GetKey()) dskey := ds.NewKey(pmes.GetKey()) resp := &Message{ Response: true, ID: pmes.GetId(), Key: pmes.GetKey(), } iVal, err := dht.datastore.Get(dskey) if err == nil { u.DOut("handleGetValue success!\n") resp.Success = true resp.Value = iVal.([]byte) } else if err == ds.ErrNotFound { // Check if we know any providers for the requested value provs := dht.providers.GetProviders(u.Key(pmes.GetKey())) if len(provs) > 0 { u.DOut("handleGetValue returning %d provider[s]\n", len(provs)) resp.Peers = provs resp.Success = true } else { // No providers? // Find closest peer on given cluster to desired key and reply with that info level := 0 if len(pmes.GetValue()) < 1 { // TODO: maybe return an error? Defaulting isnt a good idea IMO u.PErr("handleGetValue: no routing level specified, assuming 0\n") } else { level = int(pmes.GetValue()[0]) // Using value field to specify cluster level } u.DOut("handleGetValue searching level %d clusters\n", level) closer := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey()))) if closer.ID.Equal(dht.self.ID) { u.DOut("Attempted to return self! this shouldnt happen...\n") resp.Peers = nil goto out } // If this peer is closer than the one from the table, return nil if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) { resp.Peers = nil u.DOut("handleGetValue could not find a closer node than myself.\n") } else { u.DOut("handleGetValue returning a closer peer: '%s'\n", closer.ID.Pretty()) resp.Peers = []*peer.Peer{closer} } } } else { //temp: what other errors can a datastore return? panic(err) } out: mes := swarm.NewMessage(p, resp.ToProtobuf()) dht.netChan.Outgoing <- mes }
// Lookup performs a lookup under this node. func (s *Node) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { u.DOut("Lookup '%s'\n", name) nd, err := s.Ipfs.Resolver.ResolveLinks(s.Nd, []string{name}) if err != nil { // todo: make this error more versatile. return nil, fuse.ENOENT } return &Node{Ipfs: s.Ipfs, Nd: nd}, nil }
func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *PBDHTMessage) { u.DOut("[%s] Responding to ping from [%s]!\n", dht.self.ID.Pretty(), p.ID.Pretty()) resp := Message{ Type: pmes.GetType(), Response: true, ID: pmes.GetId(), } dht.netChan.Outgoing <- swarm.NewMessage(p, resp.ToProtobuf()) }
// ReadAll reads the object data as file data func (s *Node) ReadAll(intr fs.Intr) ([]byte, fuse.Error) { u.DOut("Read node.\n") r, err := mdag.NewDagReader(s.Nd, s.Ipfs.DAG) if err != nil { return nil, err } // this is a terrible function... 'ReadAll'? // what if i have a 6TB file? GG RAM. return ioutil.ReadAll(r) }
// AddBlock adds a particular block to the service, Putting it into the datastore. func (s *BlockService) AddBlock(b *blocks.Block) (u.Key, error) { k := b.Key() dsk := ds.NewKey(string(k)) u.DOut("storing [%s] in datastore\n", k.Pretty()) err := s.Datastore.Put(dsk, b.Data) if err != nil { return k, err } if s.Remote != nil { err = s.Remote.HaveBlock(b) } return k, err }
// Close closes the connection, and associated channels. func (s *Conn) Close() error { u.DOut("Closing Conn.\n") if s.Conn == nil { return fmt.Errorf("Already closed") // already closed } // closing net connection err := s.Conn.Close() s.Conn = nil // closing channels s.Incoming.Close() s.Outgoing.Close() s.Closed <- true return err }
// Removes a given peer from the swarm and closes connections to it func (s *Swarm) Drop(p *peer.Peer) error { u.DOut("Dropping peer: [%s]\n", p.ID.Pretty()) s.connsLock.RLock() conn, found := s.conns[u.Key(p.ID)] s.connsLock.RUnlock() if !found { return u.ErrNotFound } s.connsLock.Lock() delete(s.conns, u.Key(p.ID)) s.connsLock.Unlock() return conn.Close() }
// Lookup performs a lookup under this node. func (s *Root) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) { u.DOut("Root Lookup: '%s'\n", name) switch name { case "mach_kernel", ".hidden", "._.": // Just quiet some log noise on OS X. return nil, fuse.ENOENT } nd, err := s.Ipfs.Resolver.ResolvePath(name) if err != nil { // todo: make this error more versatile. return nil, fuse.ENOENT } return &Node{Ipfs: s.Ipfs, Nd: nd}, nil }
// ReadDir reads the link structure as directory entries func (s *Node) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { u.DOut("Node ReadDir\n") entries := make([]fuse.Dirent, len(s.Nd.Links)) for i, link := range s.Nd.Links { n := link.Name if len(n) == 0 { n = link.Hash.B58String() } entries[i] = fuse.Dirent{Name: n, Type: fuse.DT_File} } if len(entries) > 0 { return entries, nil } return nil, fuse.ENOENT }
func (bs *BitSwap) blockReceive(p *peer.Peer, blk *blocks.Block) { u.DOut("blockReceive: %s\n", blk.Key().Pretty()) err := bs.datastore.Put(ds.NewKey(string(blk.Key())), blk.Data) if err != nil { u.PErr("blockReceive error: %v\n", err) return } mes := &swarm.Message{ Peer: p, Data: blk.Data, } bs.listener.Respond(string(blk.Key()), mes) ledger := bs.getLedger(p) ledger.ReceivedBytes(len(blk.Data)) }
func (bs *BitSwap) getBlock(k u.Key, p *peer.Peer, timeout time.Duration) ([]byte, error) { u.DOut("[%s] getBlock '%s' from [%s]\n", bs.peer.ID.Pretty(), k.Pretty(), p.ID.Pretty()) message := newMessage() message.AppendWanted(k) after := time.After(timeout) resp := bs.listener.Listen(string(k), 1, timeout) bs.meschan.Outgoing <- message.ToSwarm(p) select { case resp_mes := <-resp: return resp_mes.Data, nil case <-after: u.PErr("getBlock for '%s' timed out.\n", k.Pretty()) return nil, u.ErrTimeout } }
// StartConn adds the passed in connection to its peerMap and starts // the fanIn routine for that connection func (s *Swarm) StartConn(conn *Conn) error { if conn == nil { return errors.New("Tried to start nil connection.") } u.DOut("Starting connection: %s\n", conn.Peer.Key().Pretty()) // add to conns s.connsLock.Lock() if _, ok := s.conns[conn.Peer.Key()]; ok { s.connsLock.Unlock() return ErrAlreadyOpen } s.conns[conn.Peer.Key()] = conn s.connsLock.Unlock() // kick off reader goroutine go s.fanIn(conn) return nil }
// Add adds a node to the DAGService, storing the block in the BlockService func (n *DAGService) Add(nd *Node) (u.Key, error) { k, _ := nd.Key() u.DOut("DagService Add [%s]\n", k.Pretty()) if n == nil { return "", fmt.Errorf("DAGService is nil") } d, err := nd.Encoded(false) if err != nil { return "", err } b, err := blocks.NewBlock(d) if err != nil { return "", err } return n.Blocks.AddBlock(b) }
// Connect to a new peer at the given address, ping and add to the routing table func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) { maddrstr, _ := addr.String() u.DOut("Connect to new peer: %s\n", maddrstr) npeer, err := dht.network.ConnectNew(addr) if err != nil { return nil, err } // Ping new peer to register in their routing table // NOTE: this should be done better... err = dht.Ping(npeer, time.Second*2) if err != nil { return nil, fmt.Errorf("failed to ping newly connected peer: %s\n", err) } dht.Update(npeer) return npeer, nil }
// TODO: Could be done async func (dht *IpfsDHT) addPeerList(key u.Key, peers []*PBDHTMessage_PBPeer) []*peer.Peer { var provArr []*peer.Peer for _, prov := range peers { // Dont add outselves to the list if peer.ID(prov.GetId()).Equal(dht.self.ID) { continue } // Dont add someone who is already on the list p := dht.network.Find(u.Key(prov.GetId())) if p == nil { u.DOut("given provider %s was not in our network already.\n", peer.ID(prov.GetId()).Pretty()) var err error p, err = dht.peerFromInfo(prov) if err != nil { u.PErr("error connecting to new peer: %s\n", err) continue } } dht.providers.AddProvider(key, p) provArr = append(provArr, p) } return provArr }
func (ml *MessageListener) run() { for { select { case <-ml.haltchan: return case id := <-ml.unlist: trg, ok := ml.listeners[id] if !ok { continue } close(trg.resp) delete(ml.listeners, id) case li := <-ml.nlist: ml.listeners[li.id] = li case s := <-ml.send: trg, ok := ml.listeners[s.id] if !ok { u.DOut("Send with no listener.") continue } if time.Now().After(trg.eol) { close(trg.resp) delete(ml.listeners, s.id) continue } trg.resp <- s.mes trg.count-- if trg.count == 0 { close(trg.resp) delete(ml.listeners, s.id) } } } }
// ReadDir reads a particular directory. Disallowed for root. func (*Root) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) { u.DOut("Read Root.\n") return nil, fuse.EPERM }