// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self. func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) []peer.ID { closer := dht.nearestPeersToQuery(pmes, count) // no node? nil if closer == nil { return nil } // == to self? thats bad for _, p := range closer { if p == dht.self { log.Debug("Attempted to return self! this shouldnt happen...") return nil } } var filtered []peer.ID for _, clp := range closer { // Dont send a peer back themselves if p == clp { continue } // must all be closer than self key := key.Key(pmes.GetKey()) if !kb.Closer(dht.self, clp, key) { filtered = append(filtered, clp) } } // ok seems like closer nodes return filtered }
func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleAddProvider", lm).Done() key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.Pretty() } log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key) // add provider should use the address given in the message pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) for _, pi := range pinfos { if pi.ID != p { // we should ignore this provider reccord! not from originator. // (we chould sign them and check signature later...) log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p) continue } if len(pi.Addrs) < 1 { log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p) continue } log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs) if pi.ID != dht.self { // dont add own addrs. // add the received addresses to our peerstore. dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL) } dht.providers.AddProvider(ctx, key, p) } return nil, nil }
func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handleFindPeer", p).Done() resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel()) var closest []peer.ID // if looking for self... special case where we send it on CloserPeers. if peer.ID(pmes.GetKey()) == dht.self { closest = []peer.ID{dht.self} } else { closest = dht.betterPeersToQuery(pmes, p, CloserPeerCount) } if closest == nil { log.Infof("%s handleFindPeer %s: could not find anything.", dht.self, p) return resp, nil } var withAddresses []peer.PeerInfo closestinfos := peer.PeerInfos(dht.peerstore, closest) for _, pi := range closestinfos { if len(pi.Addrs) > 0 { withAddresses = append(withAddresses, pi) log.Debugf("handleFindPeer: sending back '%s'", pi.ID) } } resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses) return resp, nil }
// SendRequest sends the request to each remote sequentially (randomized order), // stopping after the first successful response. If all fail, returns the last // error. func (px *standard) SendRequest(ctx context.Context, m *dhtpb.Message) (*dhtpb.Message, error) { var err error for _, remote := range sortedByKey(px.remoteIDs, m.GetKey()) { var reply *dhtpb.Message reply, err = px.sendRequest(ctx, m, remote) // careful don't redeclare err! if err != nil { continue } return reply, nil // success } return nil, err // NB: returns the last error }
// Store a value in this peer local storage func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handlePutValue", p).Done() dskey := key.Key(pmes.GetKey()).DsKey() if err := dht.verifyRecordLocally(pmes.GetRecord()); err != nil { log.Debugf("Bad dht record in PUT from: %s. %s", key.Key(pmes.GetRecord().GetAuthor()), err) return nil, err } data, err := proto.Marshal(pmes.GetRecord()) if err != nil { return nil, err } err = dht.datastore.Put(dskey, data) log.Debugf("%s handlePutValue %v", dht.self, dskey) return pmes, err }
// SendMessage sends message to each remote sequentially (randomized order), // stopping after the first successful response. If all fail, returns the last // error. func (px *standard) SendMessage(ctx context.Context, m *dhtpb.Message) error { var err error var numSuccesses int for _, remote := range sortedByKey(px.remoteIDs, m.GetKey()) { if err = px.sendMessage(ctx, m, remote); err != nil { // careful don't re-declare err! continue } numSuccesses++ switch m.GetType() { case dhtpb.Message_ADD_PROVIDER, dhtpb.Message_PUT_VALUE: if numSuccesses < replicationFactor { continue } } return nil // success } return err // NB: returns the last error }
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleGetProviders", lm).Done() resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.Pretty() } // debug logging niceness. reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, key) log.Debugf("%s begin", reqDesc) defer log.Debugf("%s end", reqDesc) // check if we have this value, to add ourselves as provider. has, err := dht.datastore.Has(key.DsKey()) if err != nil && err != ds.ErrNotFound { log.Debugf("unexpected datastore error: %v\n", err) has = false } // setup providers providers := dht.providers.GetProviders(ctx, key) if has { providers = append(providers, dht.self) log.Debugf("%s have the value. added self as provider", reqDesc) } if providers != nil && len(providers) > 0 { infos := peer.PeerInfos(dht.peerstore, providers) resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos) } // Also send closer peers. closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if closer != nil { infos := peer.PeerInfos(dht.peerstore, closer) resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos) } return resp, nil }
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handleGetValue", p).Done() log.Debugf("%s handleGetValue for key: %s", dht.self, pmes.GetKey()) // setup response resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) // first, is there even a key? k := pmes.GetKey() if k == "" { return nil, errors.New("handleGetValue but no key was provided") // TODO: send back an error response? could be bad, but the other node's hanging. } // let's first check if we have the value locally. log.Debugf("%s handleGetValue looking into ds", dht.self) dskey := key.Key(k).DsKey() iVal, err := dht.datastore.Get(dskey) log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal) // if we got an unexpected error, bail. if err != nil && err != ds.ErrNotFound { return nil, err } // if we have the value, send it back if err == nil { log.Debugf("%s handleGetValue success!", dht.self) byts, ok := iVal.([]byte) if !ok { return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey) } rec := new(pb.Record) err := proto.Unmarshal(byts, rec) if err != nil { log.Debug("Failed to unmarshal dht record from datastore") return nil, err } resp.Record = rec } // Find closest peer on given cluster to desired key and reply with that info closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if len(closer) > 0 { closerinfos := peer.PeerInfos(dht.peerstore, closer) for _, pi := range closerinfos { log.Debugf("handleGetValue returning closer peer: '%s'", pi.ID) if len(pi.Addrs) < 1 { log.Errorf(`no addresses on peer being sent! [local:%s] [sending:%s] [remote:%s]`, dht.self, pi.ID, p) } } resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos) } return resp, nil }
func (s *Server) handleMessage( ctx context.Context, p peer.ID, req *dhtpb.Message) (peer.ID, *dhtpb.Message) { defer log.EventBegin(ctx, "routingMessageReceived", req, p).Done() var response = dhtpb.NewMessage(req.GetType(), req.GetKey(), req.GetClusterLevel()) switch req.GetType() { case dhtpb.Message_GET_VALUE: rawRecord, err := getRoutingRecord(s.routingBackend, key.Key(req.GetKey())) if err != nil { return "", nil } response.Record = rawRecord return p, response case dhtpb.Message_PUT_VALUE: // FIXME: verify complains that the peer's ID is not present in the // peerstore. Mocknet problem? // if err := verify(s.peerstore, req.GetRecord()); err != nil { // log.Event(ctx, "validationFailed", req, p) // return "", nil // } putRoutingRecord(s.routingBackend, key.Key(req.GetKey()), req.GetRecord()) return p, req case dhtpb.Message_FIND_NODE: p := s.peerstore.PeerInfo(peer.ID(req.GetKey())) pri := []dhtpb.PeerRoutingInfo{ { PeerInfo: p, // Connectedness: TODO }, } response.CloserPeers = dhtpb.PeerRoutingInfosToPBPeers(pri) return p.ID, response case dhtpb.Message_ADD_PROVIDER: for _, provider := range req.GetProviderPeers() { providerID := peer.ID(provider.GetId()) if providerID == p { store := []*dhtpb.Message_Peer{provider} storeProvidersToPeerstore(s.peerstore, p, store) if err := putRoutingProviders(s.routingBackend, key.Key(req.GetKey()), store); err != nil { return "", nil } } else { log.Event(ctx, "addProviderBadRequest", p, req) } } return "", nil case dhtpb.Message_GET_PROVIDERS: providers, err := getRoutingProviders(s.routingBackend, key.Key(req.GetKey())) if err != nil { return "", nil } response.ProviderPeers = providers return p, response case dhtpb.Message_PING: return p, req default: } return "", nil }
// nearestPeersToQuery returns the routing tables closest peers. func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID { key := key.Key(pmes.GetKey()) closer := dht.routingTable.NearestPeers(kb.ConvertKey(key), count) return closer }