func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { lm := make(lgbl.DeferredMap) lm["peer"] = func() interface{} { return p.Pretty() } defer log.EventBegin(ctx, "handleAddProvider", lm).Done() key := key.Key(pmes.GetKey()) lm["key"] = func() interface{} { return key.Pretty() } log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key) // add provider should use the address given in the message pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers()) for _, pi := range pinfos { if pi.ID != p { // we should ignore this provider reccord! not from originator. // (we chould sign them and check signature later...) log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p) continue } if len(pi.Addrs) < 1 { log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p) continue } log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs) if pi.ID != dht.self { // dont add own addrs. // add the received addresses to our peerstore. dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL) } dht.providers.AddProvider(ctx, key, p) } return nil, nil }
func getRoutingProviders(ds datastore.Datastore, k key.Key) ([]*dhtpb.Message_Peer, error) { e := log.EventBegin(context.Background(), "getProviders", &k) defer e.Done() var providers []*dhtpb.Message_Peer if v, err := ds.Get(providerKey(k)); err == nil { if data, ok := v.([]byte); ok { var msg dhtpb.Message if err := proto.Unmarshal(data, &msg); err != nil { return nil, err } providers = append(providers, msg.GetProviderPeers()...) } } return providers, nil }
func TestGetFailures(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 2) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) d.Update(ctx, hosts[1].ID()) // Reply with failures to every message hosts[1].SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() io.Copy(ioutil.Discard, s) }) // This one should time out ctx1, _ := context.WithTimeout(context.Background(), 200*time.Millisecond) if _, err := d.GetValue(ctx1, key.Key("test")); err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } if err.Error() != "process closing" { t.Fatal("Got different error than we expected", err) } } else { t.Fatal("Did not get expected error!") } t.Log("Timeout test passed.") // Reply with failures to every message hosts[1].SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } resp := &pb.Message{ Type: pmes.Type, } if err := pbw.WriteMsg(resp); err != nil { panic(err) } }) // This one should fail with NotFound. // long context timeout to ensure we dont end too early. // the dht should be exhausting its query and returning not found. // (was 3 seconds before which should be _plenty_ of time, but maybe // travis machines really have a hard time...) ctx2, _ := context.WithTimeout(context.Background(), 20*time.Second) _, err = d.GetValue(ctx2, key.Key("test")) if err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } if err != routing.ErrNotFound { t.Fatalf("Expected ErrNotFound, got: %s", err) } } else { t.Fatal("expected error, got none.") } t.Log("ErrNotFound check passed!") // Now we test this DHT's handleGetValue failure { typ := pb.Message_GET_VALUE str := "hello" sk, err := d.getOwnPrivateKey() if err != nil { t.Fatal(err) } rec, err := record.MakePutRecord(sk, key.Key(str), []byte("blah"), true) if err != nil { t.Fatal(err) } req := pb.Message{ Type: &typ, Key: &str, Record: rec, } s, err := hosts[1].NewStream(ProtocolDHT, hosts[0].ID()) if err != nil { t.Fatal(err) } defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) if err := pbw.WriteMsg(&req); err != nil { t.Fatal(err) } pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { t.Fatal(err) } if pmes.GetRecord() != nil { t.Fatal("shouldnt have value") } if pmes.GetProviderPeers() != nil { t.Fatal("shouldnt have provider peers") } } }
func (s *Server) handleMessage( ctx context.Context, p peer.ID, req *dhtpb.Message) (peer.ID, *dhtpb.Message) { defer log.EventBegin(ctx, "routingMessageReceived", req, p).Done() var response = dhtpb.NewMessage(req.GetType(), req.GetKey(), req.GetClusterLevel()) switch req.GetType() { case dhtpb.Message_GET_VALUE: rawRecord, err := getRoutingRecord(s.routingBackend, key.Key(req.GetKey())) if err != nil { return "", nil } response.Record = rawRecord return p, response case dhtpb.Message_PUT_VALUE: // FIXME: verify complains that the peer's ID is not present in the // peerstore. Mocknet problem? // if err := verify(s.peerstore, req.GetRecord()); err != nil { // log.Event(ctx, "validationFailed", req, p) // return "", nil // } putRoutingRecord(s.routingBackend, key.Key(req.GetKey()), req.GetRecord()) return p, req case dhtpb.Message_FIND_NODE: p := s.peerstore.PeerInfo(peer.ID(req.GetKey())) pri := []dhtpb.PeerRoutingInfo{ { PeerInfo: p, // Connectedness: TODO }, } response.CloserPeers = dhtpb.PeerRoutingInfosToPBPeers(pri) return p.ID, response case dhtpb.Message_ADD_PROVIDER: for _, provider := range req.GetProviderPeers() { providerID := peer.ID(provider.GetId()) if providerID == p { store := []*dhtpb.Message_Peer{provider} storeProvidersToPeerstore(s.peerstore, p, store) if err := putRoutingProviders(s.routingBackend, key.Key(req.GetKey()), store); err != nil { return "", nil } } else { log.Event(ctx, "addProviderBadRequest", p, req) } } return "", nil case dhtpb.Message_GET_PROVIDERS: providers, err := getRoutingProviders(s.routingBackend, key.Key(req.GetKey())) if err != nil { return "", nil } response.ProviderPeers = providers return p, response case dhtpb.Message_PING: return p, req default: } return "", nil }