func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handleFindPeer", p).Done() resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel()) var closest []peer.ID // if looking for self... special case where we send it on CloserPeers. if peer.ID(pmes.GetKey()) == dht.self { closest = []peer.ID{dht.self} } else { closest = dht.betterPeersToQuery(pmes, p, CloserPeerCount) } if closest == nil { log.Infof("%s handleFindPeer %s: could not find anything.", dht.self, p) return resp, nil } var withAddresses []peer.PeerInfo closestinfos := peer.PeerInfos(dht.peerstore, closest) for _, pi := range closestinfos { if len(pi.Addrs) > 0 { withAddresses = append(withAddresses, pi) log.Debugf("handleFindPeer: sending back '%s'", pi.ID) } } resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses) return resp, nil }
func (dht *IpfsDHT) handleNewMessage(s inet.Stream) { defer s.Close() ctx := dht.Context() cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(cw) mPeer := s.Conn().RemotePeer() // receive msg pmes := new(pb.Message) if err := r.ReadMsg(pmes); err != nil { log.Debugf("Error unmarshaling data: %s", err) return } // update the peer (on valid msgs only) dht.updateFromMessage(ctx, mPeer, pmes) // get handler for this msg type. handler := dht.handlerForMsgType(pmes.GetType()) if handler == nil { log.Debug("got back nil handler from handlerForMsgType") return } // dispatch handler. rpmes, err := handler(ctx, mPeer, pmes) if err != nil { log.Debugf("handle message error: %s", err) return } // if nil response, return it before serializing if rpmes == nil { log.Debug("Got back nil response from request.") return } // send out response msg if err := w.WriteMsg(rpmes); err != nil { log.Debugf("send response error: %s", err) return } return }
// SendMessage sends message to each remote sequentially (randomized order), // stopping after the first successful response. If all fail, returns the last // error. func (px *standard) SendMessage(ctx context.Context, m *dhtpb.Message) error { var err error var numSuccesses int for _, remote := range sortedByKey(px.remoteIDs, m.GetKey()) { if err = px.sendMessage(ctx, m, remote); err != nil { // careful don't re-declare err! continue } numSuccesses++ switch m.GetType() { case dhtpb.Message_ADD_PROVIDER, dhtpb.Message_PUT_VALUE: if numSuccesses < replicationFactor { continue } } return nil // success } return err // NB: returns the last error }
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleGetProviders", lm).Done() resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.Pretty() } // debug logging niceness. reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, key) log.Debugf("%s begin", reqDesc) defer log.Debugf("%s end", reqDesc) // check if we have this value, to add ourselves as provider. has, err := dht.datastore.Has(key.DsKey()) if err != nil && err != ds.ErrNotFound { log.Debugf("unexpected datastore error: %v\n", err) has = false } // setup providers providers := dht.providers.GetProviders(ctx, key) if has { providers = append(providers, dht.self) log.Debugf("%s have the value. added self as provider", reqDesc) } if providers != nil && len(providers) > 0 { infos := peer.PeerInfos(dht.peerstore, providers) resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos) } // Also send closer peers. closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if closer != nil { infos := peer.PeerInfos(dht.peerstore, closer) resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos) log.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos) } return resp, nil }
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handleGetValue", p).Done() log.Debugf("%s handleGetValue for key: %s", dht.self, pmes.GetKey()) // setup response resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) // first, is there even a key? k := key.Key(pmes.GetKey()) if k == "" { return nil, errors.New("handleGetValue but no key was provided") // TODO: send back an error response? could be bad, but the other node's hanging. } rec, err := dht.checkLocalDatastore(k) if err != nil { return nil, err } resp.Record = rec // Find closest peer on given cluster to desired key and reply with that info closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if len(closer) > 0 { closerinfos := peer.PeerInfos(dht.peerstore, closer) for _, pi := range closerinfos { log.Debugf("handleGetValue returning closer peer: '%s'", pi.ID) if len(pi.Addrs) < 1 { log.Errorf(`no addresses on peer being sent! [local:%s] [sending:%s] [remote:%s]`, dht.self, pi.ID, p) } } resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos) } return resp, nil }
// If less than K nodes are in the entire network, it should fail when we make // a GET rpc and nobody has the value func TestLessThanKResponses(t *testing.T) { // t.Skip("skipping test to debug another") // t.Skip("skipping test because it makes a lot of output") ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 6) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) for i := 1; i < 5; i++ { d.Update(ctx, hosts[i].ID()) } // Reply with random peers to every message for _, host := range hosts { host := host // shadow loop var host.SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: pi := host.Peerstore().PeerInfo(hosts[1].ID()) resp := &pb.Message{ Type: pmes.Type, CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.PeerInfo{pi}), } if err := pbw.WriteMsg(resp); err != nil { panic(err) } default: panic("Shouldnt recieve this.") } }) } ctx, cancel := context.WithTimeout(ctx, time.Second*30) defer cancel() if _, err := d.GetValue(ctx, key.Key("hello")); err != nil { switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func TestNotFound(t *testing.T) { // t.Skip("skipping test to debug another") if testing.Short() { t.SkipNow() } ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 16) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) for _, p := range hosts { d.Update(ctx, p.ID()) } // Reply with random peers to every message for _, host := range hosts { host := host // shadow loop var host.SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: resp := &pb.Message{Type: pmes.Type} ps := []peer.PeerInfo{} for i := 0; i < 7; i++ { p := hosts[rand.Intn(len(hosts))].ID() pi := host.Peerstore().PeerInfo(p) ps = append(ps, pi) } resp.CloserPeers = pb.PeerInfosToPBPeers(d.host.Network(), ps) if err := pbw.WriteMsg(resp); err != nil { panic(err) } default: panic("Shouldnt recieve this.") } }) } // long timeout to ensure timing is not at play. ctx, cancel := context.WithTimeout(ctx, time.Second*20) defer cancel() v, err := d.GetValue(ctx, key.Key("hello")) log.Debugf("get value got %v", v) if err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { defer log.EventBegin(ctx, "handleGetValue", p).Done() log.Debugf("%s handleGetValue for key: %s", dht.self, pmes.GetKey()) // setup response resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) // first, is there even a key? k := pmes.GetKey() if k == "" { return nil, errors.New("handleGetValue but no key was provided") // TODO: send back an error response? could be bad, but the other node's hanging. } // let's first check if we have the value locally. log.Debugf("%s handleGetValue looking into ds", dht.self) dskey := key.Key(k).DsKey() iVal, err := dht.datastore.Get(dskey) log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal) // if we got an unexpected error, bail. if err != nil && err != ds.ErrNotFound { return nil, err } // if we have the value, send it back if err == nil { log.Debugf("%s handleGetValue success!", dht.self) byts, ok := iVal.([]byte) if !ok { return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey) } rec := new(pb.Record) err := proto.Unmarshal(byts, rec) if err != nil { log.Debug("Failed to unmarshal dht record from datastore") return nil, err } resp.Record = rec } // Find closest peer on given cluster to desired key and reply with that info closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount) if len(closer) > 0 { closerinfos := peer.PeerInfos(dht.peerstore, closer) for _, pi := range closerinfos { log.Debugf("handleGetValue returning closer peer: '%s'", pi.ID) if len(pi.Addrs) < 1 { log.Errorf(`no addresses on peer being sent! [local:%s] [sending:%s] [remote:%s]`, dht.self, pi.ID, p) } } resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos) } return resp, nil }
func (s *Server) handleMessage( ctx context.Context, p peer.ID, req *dhtpb.Message) (peer.ID, *dhtpb.Message) { defer log.EventBegin(ctx, "routingMessageReceived", req, p).Done() var response = dhtpb.NewMessage(req.GetType(), req.GetKey(), req.GetClusterLevel()) switch req.GetType() { case dhtpb.Message_GET_VALUE: rawRecord, err := getRoutingRecord(s.routingBackend, key.Key(req.GetKey())) if err != nil { return "", nil } response.Record = rawRecord return p, response case dhtpb.Message_PUT_VALUE: // FIXME: verify complains that the peer's ID is not present in the // peerstore. Mocknet problem? // if err := verify(s.peerstore, req.GetRecord()); err != nil { // log.Event(ctx, "validationFailed", req, p) // return "", nil // } putRoutingRecord(s.routingBackend, key.Key(req.GetKey()), req.GetRecord()) return p, req case dhtpb.Message_FIND_NODE: p := s.peerstore.PeerInfo(peer.ID(req.GetKey())) pri := []dhtpb.PeerRoutingInfo{ { PeerInfo: p, // Connectedness: TODO }, } response.CloserPeers = dhtpb.PeerRoutingInfosToPBPeers(pri) return p.ID, response case dhtpb.Message_ADD_PROVIDER: for _, provider := range req.GetProviderPeers() { providerID := peer.ID(provider.GetId()) if providerID == p { store := []*dhtpb.Message_Peer{provider} storeProvidersToPeerstore(s.peerstore, p, store) if err := putRoutingProviders(s.routingBackend, key.Key(req.GetKey()), store); err != nil { return "", nil } } else { log.Event(ctx, "addProviderBadRequest", p, req) } } return "", nil case dhtpb.Message_GET_PROVIDERS: providers, err := getRoutingProviders(s.routingBackend, key.Key(req.GetKey())) if err != nil { return "", nil } response.ProviderPeers = providers return p, response case dhtpb.Message_PING: return p, req default: } return "", nil }