func (px *standard) sendRequest(ctx context.Context, m *dhtpb.Message, remote peer.ID) (*dhtpb.Message, error) { e := log.EventBegin(ctx, "sendRoutingRequest", px.Host.ID(), remote, eventlog.Pair("request", m)) defer e.Done() if err := px.Host.Connect(ctx, peer.PeerInfo{ID: remote}); err != nil { e.SetError(err) return nil, err } s, err := px.Host.NewStream(ProtocolSNR, remote) if err != nil { e.SetError(err) return nil, err } defer s.Close() r := ggio.NewDelimitedReader(s, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(s) if err = w.WriteMsg(m); err != nil { e.SetError(err) return nil, err } response := &dhtpb.Message{} if err = r.ReadMsg(response); err != nil { e.SetError(err) return nil, err } // need ctx expiration? if response == nil { err := errors.New("no response to request") e.SetError(err) return nil, err } e.Append(eventlog.Pair("response", response)) e.Append(eventlog.Pair("uuid", eventlog.Uuid("foo"))) return response, nil }
func FromNet(r io.Reader) (BitSwapMessage, error) { pbr := ggio.NewDelimitedReader(r, inet.MessageSizeMax) pb := new(pb.Message) if err := pbr.ReadMsg(pb); err != nil { return nil, err } m := newMessageFromProto(*pb) return m, nil }
func (dht *IpfsDHT) handleNewMessage(s inet.Stream) { defer s.Close() ctx := dht.Context() cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(cw) mPeer := s.Conn().RemotePeer() // receive msg pmes := new(pb.Message) if err := r.ReadMsg(pmes); err != nil { log.Debugf("Error unmarshaling data: %s", err) return } // update the peer (on valid msgs only) dht.updateFromMessage(ctx, mPeer, pmes) // get handler for this msg type. handler := dht.handlerForMsgType(pmes.GetType()) if handler == nil { log.Debug("got back nil handler from handlerForMsgType") return } // dispatch handler. rpmes, err := handler(ctx, mPeer, pmes) if err != nil { log.Debugf("handle message error: %s", err) return } // if nil response, return it before serializing if rpmes == nil { log.Debug("Got back nil response from request.") return } // send out response msg if err := w.WriteMsg(rpmes); err != nil { log.Debugf("send response error: %s", err) return } return }
func (lb *Loopback) HandleStream(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) var incoming dhtpb.Message if err := pbr.ReadMsg(&incoming); err != nil { log.Debug(err) return } ctx := context.TODO() outgoing := lb.Handler.HandleRequest(ctx, s.Conn().RemotePeer(), &incoming) pbw := ggio.NewDelimitedWriter(s) if err := pbw.WriteMsg(outgoing); err != nil { return // TODO logerr } }
// sendRequest sends out a request, but also makes sure to // measure the RTT for latency measurements. func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { log.Debugf("%s dht starting stream", dht.self) s, err := dht.host.NewStream(ProtocolDHT, p) if err != nil { return nil, err } defer s.Close() cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(cw) start := time.Now() if err := w.WriteMsg(pmes); err != nil { return nil, err } log.Event(ctx, "dhtSentMessage", dht.self, p, pmes) rpmes := new(pb.Message) if err := r.ReadMsg(rpmes); err != nil { return nil, err } if rpmes == nil { return nil, errors.New("no response to request") } // update the peer (on valid msgs only) dht.updateFromMessage(ctx, p, rpmes) dht.peerstore.RecordLatency(p, time.Since(start)) log.Event(ctx, "dhtReceivedMessage", dht.self, p, rpmes) return rpmes, nil }
func TestGetFailures(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 2) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) d.Update(ctx, hosts[1].ID()) // Reply with failures to every message hosts[1].SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() io.Copy(ioutil.Discard, s) }) // This one should time out ctx1, _ := context.WithTimeout(context.Background(), 200*time.Millisecond) if _, err := d.GetValue(ctx1, key.Key("test")); err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } if err != context.DeadlineExceeded && err != context.Canceled { t.Fatal("Got different error than we expected", err) } } else { t.Fatal("Did not get expected error!") } t.Log("Timeout test passed.") // Reply with failures to every message hosts[1].SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } resp := &pb.Message{ Type: pmes.Type, } if err := pbw.WriteMsg(resp); err != nil { panic(err) } }) // This one should fail with NotFound. // long context timeout to ensure we dont end too early. // the dht should be exhausting its query and returning not found. // (was 3 seconds before which should be _plenty_ of time, but maybe // travis machines really have a hard time...) ctx2, _ := context.WithTimeout(context.Background(), 20*time.Second) _, err = d.GetValue(ctx2, key.Key("test")) if err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } if err != routing.ErrNotFound { t.Fatalf("Expected ErrNotFound, got: %s", err) } } else { t.Fatal("expected error, got none.") } t.Log("ErrNotFound check passed!") // Now we test this DHT's handleGetValue failure { typ := pb.Message_GET_VALUE str := "hello" sk, err := d.getOwnPrivateKey() if err != nil { t.Fatal(err) } rec, err := record.MakePutRecord(sk, key.Key(str), []byte("blah"), true) if err != nil { t.Fatal(err) } req := pb.Message{ Type: &typ, Key: &str, Record: rec, } s, err := hosts[1].NewStream(ProtocolDHT, hosts[0].ID()) if err != nil { t.Fatal(err) } defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) if err := pbw.WriteMsg(&req); err != nil { t.Fatal(err) } pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { t.Fatal(err) } if pmes.GetRecord() != nil { t.Fatal("shouldnt have value") } if pmes.GetProviderPeers() != nil { t.Fatal("shouldnt have provider peers") } } }
// If less than K nodes are in the entire network, it should fail when we make // a GET rpc and nobody has the value func TestLessThanKResponses(t *testing.T) { // t.Skip("skipping test to debug another") // t.Skip("skipping test because it makes a lot of output") ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 6) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) for i := 1; i < 5; i++ { d.Update(ctx, hosts[i].ID()) } // Reply with random peers to every message for _, host := range hosts { host := host // shadow loop var host.SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: pi := host.Peerstore().PeerInfo(hosts[1].ID()) resp := &pb.Message{ Type: pmes.Type, CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.PeerInfo{pi}), } if err := pbw.WriteMsg(resp); err != nil { panic(err) } default: panic("Shouldnt recieve this.") } }) } ctx, _ = context.WithTimeout(ctx, time.Second*30) if _, err := d.GetValue(ctx, key.Key("hello")); err != nil { switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func TestNotFound(t *testing.T) { // t.Skip("skipping test to debug another") if testing.Short() { t.SkipNow() } ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 16) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) for _, p := range hosts { d.Update(ctx, p.ID()) } // Reply with random peers to every message for _, host := range hosts { host := host // shadow loop var host.SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: resp := &pb.Message{Type: pmes.Type} ps := []peer.PeerInfo{} for i := 0; i < 7; i++ { p := hosts[rand.Intn(len(hosts))].ID() pi := host.Peerstore().PeerInfo(p) ps = append(ps, pi) } resp.CloserPeers = pb.PeerInfosToPBPeers(d.host.Network(), ps) if err := pbw.WriteMsg(resp); err != nil { panic(err) } default: panic("Shouldnt recieve this.") } }) } // long timeout to ensure timing is not at play. ctx, _ = context.WithTimeout(ctx, time.Second*20) v, err := d.GetValue(ctx, key.Key("hello")) log.Debugf("get value got %v", v) if err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }