func (px *standard) sendRequest(ctx context.Context, m *dhtpb.Message, remote peer.ID) (*dhtpb.Message, error) { e := log.EventBegin(ctx, "sendRoutingRequest", px.Host.ID(), remote, logging.Pair("request", m)) defer e.Done() if err := px.Host.Connect(ctx, peer.PeerInfo{ID: remote}); err != nil { e.SetError(err) return nil, err } s, err := px.Host.NewStream(ProtocolSNR, remote) if err != nil { e.SetError(err) return nil, err } defer s.Close() r := ggio.NewDelimitedReader(s, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(s) if err = w.WriteMsg(m); err != nil { e.SetError(err) return nil, err } response := &dhtpb.Message{} if err = r.ReadMsg(response); err != nil { e.SetError(err) return nil, err } // need ctx expiration? if response == nil { err := errors.New("no response to request") e.SetError(err) return nil, err } e.Append(logging.Pair("response", response)) e.Append(logging.Pair("uuid", logging.Uuid("foo"))) return response, nil }
func (m *impl) ToNet(w io.Writer) error { pbw := ggio.NewDelimitedWriter(w) if err := pbw.WriteMsg(m.ToProto()); err != nil { return err } return nil }
func TestVarintNoClose(t *testing.T) { buf := bytes.NewBuffer(nil) writer := io.NewDelimitedWriter(buf) reader := io.NewDelimitedReader(buf, 1024*1024) if err := iotest(writer, reader); err != nil { t.Error(err) } }
//issue 32 func TestVarintMaxSize(t *testing.T) { buf := newBuffer() writer := io.NewDelimitedWriter(buf) reader := io.NewDelimitedReader(buf, 20) if err := iotest(writer, reader); err != goio.ErrShortBuffer { t.Error(err) } else { t.Logf("%s", err) } }
func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (<-chan *DiagInfo, error) { s, err := d.host.NewStream(ProtocolDiag, p) if err != nil { return nil, err } cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(cw) start := time.Now() if err := w.WriteMsg(pmes); err != nil { return nil, err } out := make(chan *DiagInfo) go func() { defer func() { close(out) s.Close() rtt := time.Since(start) log.Infof("diagnostic request took: %s", rtt.String()) }() for { rpmes := new(pb.Message) if err := r.ReadMsg(rpmes); err != nil { log.Debugf("Error reading diagnostic from stream: %s", err) return } if rpmes == nil { log.Debug("Got no response back from diag request.") return } di, err := decodeDiagJson(rpmes.GetData()) if err != nil { log.Debug(err) return } select { case out <- di: case <-ctx.Done(): return } } }() return out, nil }
func (d *Diagnostics) HandleMessage(ctx context.Context, s inet.Stream) error { cr := ctxio.NewReader(ctx, s) cw := ctxio.NewWriter(ctx, s) r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) // maxsize w := ggio.NewDelimitedWriter(cw) // deserialize msg pmes := new(pb.Message) if err := r.ReadMsg(pmes); err != nil { log.Debugf("Failed to decode protobuf message: %v", err) return nil } // Print out diagnostic log.Infof("[peer: %s] Got message from [%s]\n", d.self.Pretty(), s.Conn().RemotePeer()) // Make sure we havent already handled this request to prevent loops if err := d.startDiag(pmes.GetDiagID()); err != nil { return nil } resp := newMessage(pmes.GetDiagID()) resp.Data = d.getDiagInfo().Marshal() if err := w.WriteMsg(resp); err != nil { log.Debugf("Failed to write protobuf message over stream: %s", err) return err } timeout := pmes.GetTimeoutDuration() if timeout < HopTimeoutDecrement { return fmt.Errorf("timeout too short: %s", timeout) } ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement) dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes) if err != nil { log.Debugf("diagnostic from peers err: %s", err) return err } for b := range dpeers { resp := newMessage(pmes.GetDiagID()) resp.Data = b.Marshal() if err := w.WriteMsg(resp); err != nil { log.Debugf("Failed to write protobuf message over stream: %s", err) return err } } return nil }
func TestVarintNormal(t *testing.T) { buf := newBuffer() writer := io.NewDelimitedWriter(buf) reader := io.NewDelimitedReader(buf, 1024*1024) if err := iotest(writer, reader); err != nil { t.Error(err) } if !buf.closed { t.Fatalf("did not close buffer") } }
func (ids *IDService) RequestHandler(s inet.Stream) { defer s.Close() c := s.Conn() bwc := ids.Host.GetBandwidthReporter() s = mstream.WrapStream(s, ID, bwc) w := ggio.NewDelimitedWriter(s) mes := pb.Identify{} ids.populateMessage(&mes, s.Conn()) w.WriteMsg(&mes) log.Debugf("%s sent message to %s %s", ID, c.RemotePeer(), c.RemoteMultiaddr()) }
func (dht *IpfsDHT) handleNewMessage(s inet.Stream) { defer s.Close() ctx := dht.Context() cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(cw) mPeer := s.Conn().RemotePeer() // receive msg pmes := new(pb.Message) if err := r.ReadMsg(pmes); err != nil { log.Debugf("Error unmarshaling data: %s", err) return } // update the peer (on valid msgs only) dht.updateFromMessage(ctx, mPeer, pmes) // get handler for this msg type. handler := dht.handlerForMsgType(pmes.GetType()) if handler == nil { log.Debug("got back nil handler from handlerForMsgType") return } // dispatch handler. rpmes, err := handler(ctx, mPeer, pmes) if err != nil { log.Debugf("handle message error: %s", err) return } // if nil response, return it before serializing if rpmes == nil { log.Debug("Got back nil response from request.") return } // send out response msg if err := w.WriteMsg(rpmes); err != nil { log.Debugf("send response error: %s", err) return } return }
func (lb *Loopback) HandleStream(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) var incoming dhtpb.Message if err := pbr.ReadMsg(&incoming); err != nil { log.Debug(err) return } ctx := context.TODO() outgoing := lb.Handler.HandleRequest(ctx, s.Conn().RemotePeer(), &incoming) pbw := ggio.NewDelimitedWriter(s) if err := pbw.WriteMsg(outgoing); err != nil { return // TODO logerr } }
// sendMessage sends out a message func (dht *IpfsDHT) sendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error { log.Debugf("%s dht starting stream", dht.self) s, err := dht.host.NewStream(ProtocolDHT, p) if err != nil { return err } defer s.Close() cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func w := ggio.NewDelimitedWriter(cw) if err := w.WriteMsg(pmes); err != nil { return err } log.Event(ctx, "dhtSentMessage", dht.self, p, pmes) return nil }
func (px *standard) sendMessage(ctx context.Context, m *dhtpb.Message, remote peer.ID) (err error) { e := log.EventBegin(ctx, "sendRoutingMessage", px.Host.ID(), remote, m) defer func() { if err != nil { e.SetError(err) } e.Done() }() if err = px.Host.Connect(ctx, peer.PeerInfo{ID: remote}); err != nil { return err } s, err := px.Host.NewStream(ProtocolSNR, remote) if err != nil { return err } defer s.Close() pbw := ggio.NewDelimitedWriter(s) if err := pbw.WriteMsg(m); err != nil { return err } return nil }
// sendRequest sends out a request, but also makes sure to // measure the RTT for latency measurements. func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) { log.Debugf("%s dht starting stream", dht.self) s, err := dht.host.NewStream(ProtocolDHT, p) if err != nil { return nil, err } defer s.Close() cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) w := ggio.NewDelimitedWriter(cw) start := time.Now() if err := w.WriteMsg(pmes); err != nil { return nil, err } log.Event(ctx, "dhtSentMessage", dht.self, p, pmes) rpmes := new(pb.Message) if err := r.ReadMsg(rpmes); err != nil { return nil, err } if rpmes == nil { return nil, errors.New("no response to request") } // update the peer (on valid msgs only) dht.updateFromMessage(ctx, p, rpmes) dht.peerstore.RecordLatency(p, time.Since(start)) log.Event(ctx, "dhtReceivedMessage", dht.self, p, rpmes) return rpmes, nil }
func TestGetFailures(t *testing.T) { if testing.Short() { t.SkipNow() } ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 2) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) d.Update(ctx, hosts[1].ID()) // Reply with failures to every message hosts[1].SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() io.Copy(ioutil.Discard, s) }) // This one should time out ctx1, _ := context.WithTimeout(context.Background(), 200*time.Millisecond) if _, err := d.GetValue(ctx1, key.Key("test")); err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } if err.Error() != "process closing" { t.Fatal("Got different error than we expected", err) } } else { t.Fatal("Did not get expected error!") } t.Log("Timeout test passed.") // Reply with failures to every message hosts[1].SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } resp := &pb.Message{ Type: pmes.Type, } if err := pbw.WriteMsg(resp); err != nil { panic(err) } }) // This one should fail with NotFound. // long context timeout to ensure we dont end too early. // the dht should be exhausting its query and returning not found. // (was 3 seconds before which should be _plenty_ of time, but maybe // travis machines really have a hard time...) ctx2, _ := context.WithTimeout(context.Background(), 20*time.Second) _, err = d.GetValue(ctx2, key.Key("test")) if err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } if err != routing.ErrNotFound { t.Fatalf("Expected ErrNotFound, got: %s", err) } } else { t.Fatal("expected error, got none.") } t.Log("ErrNotFound check passed!") // Now we test this DHT's handleGetValue failure { typ := pb.Message_GET_VALUE str := "hello" sk, err := d.getOwnPrivateKey() if err != nil { t.Fatal(err) } rec, err := record.MakePutRecord(sk, key.Key(str), []byte("blah"), true) if err != nil { t.Fatal(err) } req := pb.Message{ Type: &typ, Key: &str, Record: rec, } s, err := hosts[1].NewStream(ProtocolDHT, hosts[0].ID()) if err != nil { t.Fatal(err) } defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) if err := pbw.WriteMsg(&req); err != nil { t.Fatal(err) } pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { t.Fatal(err) } if pmes.GetRecord() != nil { t.Fatal("shouldnt have value") } if pmes.GetProviderPeers() != nil { t.Fatal("shouldnt have provider peers") } } }
// If less than K nodes are in the entire network, it should fail when we make // a GET rpc and nobody has the value func TestLessThanKResponses(t *testing.T) { // t.Skip("skipping test to debug another") // t.Skip("skipping test because it makes a lot of output") ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 6) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) for i := 1; i < 5; i++ { d.Update(ctx, hosts[i].ID()) } // Reply with random peers to every message for _, host := range hosts { host := host // shadow loop var host.SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: pi := host.Peerstore().PeerInfo(hosts[1].ID()) resp := &pb.Message{ Type: pmes.Type, CloserPeers: pb.PeerInfosToPBPeers(d.host.Network(), []peer.PeerInfo{pi}), } if err := pbw.WriteMsg(resp); err != nil { panic(err) } default: panic("Shouldnt recieve this.") } }) } ctx, cancel := context.WithTimeout(ctx, time.Second*30) defer cancel() if _, err := d.GetValue(ctx, key.Key("hello")); err != nil { switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }
func TestNotFound(t *testing.T) { // t.Skip("skipping test to debug another") if testing.Short() { t.SkipNow() } ctx := context.Background() mn, err := mocknet.FullMeshConnected(ctx, 16) if err != nil { t.Fatal(err) } hosts := mn.Hosts() tsds := dssync.MutexWrap(ds.NewMapDatastore()) d := NewDHT(ctx, hosts[0], tsds) for _, p := range hosts { d.Update(ctx, p.ID()) } // Reply with random peers to every message for _, host := range hosts { host := host // shadow loop var host.SetStreamHandler(ProtocolDHT, func(s inet.Stream) { defer s.Close() pbr := ggio.NewDelimitedReader(s, inet.MessageSizeMax) pbw := ggio.NewDelimitedWriter(s) pmes := new(pb.Message) if err := pbr.ReadMsg(pmes); err != nil { panic(err) } switch pmes.GetType() { case pb.Message_GET_VALUE: resp := &pb.Message{Type: pmes.Type} ps := []peer.PeerInfo{} for i := 0; i < 7; i++ { p := hosts[rand.Intn(len(hosts))].ID() pi := host.Peerstore().PeerInfo(p) ps = append(ps, pi) } resp.CloserPeers = pb.PeerInfosToPBPeers(d.host.Network(), ps) if err := pbw.WriteMsg(resp); err != nil { panic(err) } default: panic("Shouldnt recieve this.") } }) } // long timeout to ensure timing is not at play. ctx, cancel := context.WithTimeout(ctx, time.Second*20) defer cancel() v, err := d.GetValue(ctx, key.Key("hello")) log.Debugf("get value got %v", v) if err != nil { if merr, ok := err.(u.MultiErr); ok && len(merr) > 0 { err = merr[0] } switch err { case routing.ErrNotFound: //Success! return case u.ErrTimeout: t.Fatal("Should not have gotten timeout!") default: t.Fatalf("Got unexpected error: %s", err) } } t.Fatal("Expected to recieve an error.") }