// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self. func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, count int) []peer.Peer { closer := dht.nearestPeersToQuery(pmes, count) // no node? nil if closer == nil { return nil } // == to self? thats bad for _, p := range closer { if p.ID().Equal(dht.self.ID()) { log.Error("Attempted to return self! this shouldnt happen...") return nil } } var filtered []peer.Peer for _, p := range closer { // must all be closer than self key := u.Key(pmes.GetKey()) if !kb.Closer(dht.self.ID(), p.ID(), key) { filtered = append(filtered, p) } } // ok seems like closer nodes return filtered }
func (dht *IpfsDHT) handleAddProvider(p peer.Peer, pmes *pb.Message) (*pb.Message, error) { key := u.Key(pmes.GetKey()) log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, peer.ID(key)) // add provider should use the address given in the message for _, pb := range pmes.GetProviderPeers() { pid := peer.ID(pb.GetId()) if pid.Equal(p.ID()) { addr, err := pb.Address() if err != nil { log.Errorf("provider %s error with address %s", p, *pb.Addr) continue } log.Infof("received provider %s %s for %s", p, addr, key) p.AddAddress(addr) dht.providers.AddProvider(key, p) } else { log.Errorf("handleAddProvider received provider %s from %s", pid, p) } } return pmes, nil // send back same msg as confirmation. }
// nearestPeersToQuery returns the routing tables closest peers. func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.Peer { level := pmes.GetClusterLevel() cluster := dht.routingTables[level] key := u.Key(pmes.GetKey()) closer := cluster.NearestPeers(kb.ConvertKey(key), count) return closer }
// HandleMessage implements the inet.Handler interface. func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.NetMessage { mData := mes.Data() if mData == nil { log.Error("Message contained nil data.") return nil } mPeer := mes.Peer() if mPeer == nil { log.Error("Message contained nil peer.") return nil } // deserialize msg pmes := new(pb.Message) err := proto.Unmarshal(mData, pmes) if err != nil { log.Error("Error unmarshaling data") return nil } // update the peer (on valid msgs only) dht.Update(ctx, mPeer) log.Event(ctx, "foo", dht.self, mPeer, pmes) // get handler for this msg type. handler := dht.handlerForMsgType(pmes.GetType()) if handler == nil { log.Error("got back nil handler from handlerForMsgType") return nil } // dispatch handler. rpmes, err := handler(mPeer, pmes) if err != nil { log.Errorf("handle message error: %s", err) return nil } // if nil response, return it before serializing if rpmes == nil { log.Warning("Got back nil response from request.") return nil } // serialize response msg rmes, err := msg.FromObject(mPeer, rpmes) if err != nil { log.Errorf("serialze response error: %s", err) return nil } return rmes }
func (dht *IpfsDHT) handleFindPeer(p peer.Peer, pmes *pb.Message) (*pb.Message, error) { resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel()) var closest []peer.Peer // if looking for self... special case where we send it on CloserPeers. if peer.ID(pmes.GetKey()).Equal(dht.self.ID()) { closest = []peer.Peer{dht.self} } else { closest = dht.betterPeersToQuery(pmes, CloserPeerCount) } if closest == nil { log.Errorf("handleFindPeer: could not find anything.") return resp, nil } var withAddresses []peer.Peer for _, p := range closest { if len(p.Addresses()) > 0 { withAddresses = append(withAddresses, p) } } for _, p := range withAddresses { log.Debugf("handleFindPeer: sending back '%s'", p) } resp.CloserPeers = pb.PeersToPBPeers(withAddresses) return resp, nil }
// Store a value in this peer local storage func (dht *IpfsDHT) handlePutValue(p peer.Peer, pmes *pb.Message) (*pb.Message, error) { dht.dslock.Lock() defer dht.dslock.Unlock() dskey := u.Key(pmes.GetKey()).DsKey() err := dht.verifyRecord(pmes.GetRecord()) if err != nil { fmt.Println(u.Key(pmes.GetRecord().GetAuthor())) log.Error("Bad dht record in put request") return nil, err } data, err := proto.Marshal(pmes.GetRecord()) if err != nil { return nil, err } err = dht.datastore.Put(dskey, data) log.Debugf("%s handlePutValue %v\n", dht.self, dskey) return pmes, err }
// If less than K nodes are in the entire network, it should fail when we make // a GET rpc and nobody has the value func TestLessThanKResponses(t *testing.T) { // t.Skip("skipping test because it makes a lot of output") ctx := context.Background() u.Debug = false fn := &fauxNet{} fs := &fauxSender{} local := makePeer(nil) peerstore := peer.NewPeerstore() peerstore.Add(local) d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore()) var ps []peer.Peer for i := 0; i < 5; i++ { ps = append(ps, _randPeer()) d.Update(ctx, ps[i]) } other := _randPeer() // Reply with random peers to every message fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage { pmes := new(pb.Message) err := proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: resp := &pb.Message{ Type: pmes.Type, CloserPeers: pb.PeersToPBPeers([]peer.Peer{other}), } mes, err := msg.FromObject(mes.Peer(), resp) if err != nil { t.Error(err) } return mes default: panic("Shouldnt recieve this.") } }) ctx, _ = context.WithTimeout(ctx, time.Second*30) _, err := d.GetValue(ctx, u.Key("hello")) if err != nil { switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func TestNotFound(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() fn := &fauxNet{} fs := &fauxSender{} local := makePeer(nil) peerstore := peer.NewPeerstore() peerstore.Add(local) d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore()) var ps []peer.Peer for i := 0; i < 5; i++ { ps = append(ps, _randPeer()) d.Update(ctx, ps[i]) } // Reply with random peers to every message fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage { pmes := new(pb.Message) err := proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: resp := &pb.Message{Type: pmes.Type} peers := []peer.Peer{} for i := 0; i < 7; i++ { peers = append(peers, _randPeer()) } resp.CloserPeers = pb.PeersToPBPeers(peers) mes, err := msg.FromObject(mes.Peer(), resp) if err != nil { t.Error(err) } return mes default: panic("Shouldnt recieve this.") } }) ctx, _ = context.WithTimeout(ctx, time.Second*5) v, err := d.GetValue(ctx, u.Key("hello")) log.Debugf("get value got %v", v) if err != nil { switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func TestGetFailures(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() fn := &fauxNet{} fs := &fauxSender{} peerstore := peer.NewPeerstore() local := makePeer(nil) d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore()) other := makePeer(nil) d.Update(ctx, other) // This one should time out // u.POut("Timout Test\n") ctx1, _ := context.WithTimeout(context.Background(), time.Second) _, err := d.GetValue(ctx1, u.Key("test")) if err != nil { if err != context.DeadlineExceeded { t.Fatal("Got different error than we expected", err) } } else { t.Fatal("Did not get expected error!") } // u.POut("NotFound Test\n") // Reply with failures to every message fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage { pmes := new(pb.Message) err := proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } resp := &pb.Message{ Type: pmes.Type, } m, err := msg.FromObject(mes.Peer(), resp) return m }) // This one should fail with NotFound ctx2, _ := context.WithTimeout(context.Background(), time.Second) _, err = d.GetValue(ctx2, u.Key("test")) if err != nil { if err != routing.ErrNotFound { t.Fatalf("Expected ErrNotFound, got: %s", err) } } else { t.Fatal("expected error, got none.") } fs.handlers = nil // Now we test this DHT's handleGetValue failure typ := pb.Message_GET_VALUE str := "hello" rec, err := d.makePutRecord(u.Key(str), []byte("blah")) if err != nil { t.Fatal(err) } req := pb.Message{ Type: &typ, Key: &str, Record: rec, } // u.POut("handleGetValue Test\n") mes, err := msg.FromObject(other, &req) if err != nil { t.Error(err) } mes = d.HandleMessage(ctx, mes) pmes := new(pb.Message) err = proto.Unmarshal(mes.Data(), pmes) if err != nil { t.Fatal(err) } if pmes.GetRecord() != nil { t.Fatal("shouldnt have value") } if pmes.GetProviderPeers() != nil { t.Fatal("shouldnt have provider peers") } }
func (dht *IpfsDHT) handleGetValue(p peer.Peer, pmes *pb.Message) (*pb.Message, error) { log.Debugf("%s handleGetValue for key: %s\n", dht.self, pmes.GetKey()) // setup response resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) // first, is the key even a key? key := pmes.GetKey() if key == "" { return nil, errors.New("handleGetValue but no key was provided") } // let's first check if we have the value locally. log.Debugf("%s handleGetValue looking into ds", dht.self) dskey := u.Key(pmes.GetKey()).DsKey() iVal, err := dht.datastore.Get(dskey) log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal) // if we got an unexpected error, bail. if err != nil && err != ds.ErrNotFound { return nil, err } // Note: changed the behavior here to return _as much_ info as possible // (potentially all of {value, closer peers, provider}) // if we have the value, send it back if err == nil { log.Debugf("%s handleGetValue success!", dht.self) byts, ok := iVal.([]byte) if !ok { return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey) } rec := new(pb.Record) err := proto.Unmarshal(byts, rec) if err != nil { log.Error("Failed to unmarshal dht record from datastore") return nil, err } resp.Record = rec } // if we know any providers for the requested value, return those. provs := dht.providers.GetProviders(u.Key(pmes.GetKey())) if len(provs) > 0 { log.Debugf("handleGetValue returning %d provider[s]", len(provs)) resp.ProviderPeers = pb.PeersToPBPeers(provs) } // Find closest peer on given cluster to desired key and reply with that info closer := dht.betterPeersToQuery(pmes, CloserPeerCount) if closer != nil { for _, p := range closer { log.Debugf("handleGetValue returning closer peer: '%s'", p) if len(p.Addresses()) < 1 { log.Critical("no addresses on peer being sent!") } } resp.CloserPeers = pb.PeersToPBPeers(closer) } return resp, nil }
func (dht *IpfsDHT) handleGetProviders(p peer.Peer, pmes *pb.Message) (*pb.Message, error) { resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) // check if we have this value, to add ourselves as provider. log.Debugf("handling GetProviders: '%s'", u.Key(pmes.GetKey())) dsk := u.Key(pmes.GetKey()).DsKey() has, err := dht.datastore.Has(dsk) if err != nil && err != ds.ErrNotFound { log.Errorf("unexpected datastore error: %v\n", err) has = false } // setup providers providers := dht.providers.GetProviders(u.Key(pmes.GetKey())) if has { providers = append(providers, dht.self) } // if we've got providers, send thos those. if providers != nil && len(providers) > 0 { resp.ProviderPeers = pb.PeersToPBPeers(providers) } // Also send closer peers. closer := dht.betterPeersToQuery(pmes, CloserPeerCount) if closer != nil { resp.CloserPeers = pb.PeersToPBPeers(closer) } return resp, nil }