func RandTestBogusPrivateKey() (TestBogusPrivateKey, error) { r := u.NewTimeSeededRand() k := make([]byte, 5) if _, err := io.ReadFull(r, k); err != nil { return nil, err } return TestBogusPrivateKey(k), nil }
// RandPeerID generates random "valid" peer IDs. it does not NEED to generate // keys because it is as if we lost the key right away. fine to read randomness // and hash it. to generate proper keys and corresponding PeerID, use: // sk, pk, _ := testutil.RandKeyPair() // id, _ := peer.IDFromPublicKey(pk) func RandPeerID() (peer.ID, error) { buf := make([]byte, 16) if _, err := io.ReadFull(u.NewTimeSeededRand(), buf); err != nil { return "", err } h := u.Hash(buf) return peer.ID(h), nil }
func newSender() (chan sendChans, func(s inet.Stream)) { scc := make(chan sendChans) return scc, func(s inet.Stream) { sc := newSendChans() scc <- sc defer func() { s.Close() sc.closed <- struct{}{} }() buf := make([]byte, 65536) buf2 := make([]byte, 65536) u.NewTimeSeededRand().Read(buf) for { select { case <-sc.close_: return case <-sc.send: } // send a randomly sized subchunk from := rand.Intn(len(buf) / 2) to := rand.Intn(len(buf) / 2) sendbuf := buf[from : from+to] log.Debugf("sender sending %d bytes", len(sendbuf)) n, err := s.Write(sendbuf) if err != nil { log.Debug("sender error. exiting:", err) return } log.Debugf("sender wrote %d bytes", n) sc.sent <- struct{}{} if n, err = io.ReadFull(s, buf2[:len(sendbuf)]); err != nil { log.Debug("sender error. failed to read:", err) return } log.Debugf("sender read %d bytes", n) sc.read <- struct{}{} } } }
func ping(s inet.Stream) (time.Duration, error) { buf := make([]byte, PingSize) u.NewTimeSeededRand().Read(buf) before := time.Now() _, err := s.Write(buf) if err != nil { return 0, err } rbuf := make([]byte, PingSize) _, err = io.ReadFull(s, rbuf) if err != nil { return 0, err } if !bytes.Equal(buf, rbuf) { return 0, errors.New("ping packet was incorrect!") } return time.Now().Sub(before), nil }
func RandTestKeyPair(bits int) (ci.PrivKey, ci.PubKey, error) { return ci.GenerateKeyPairWithReader(ci.RSA, bits, u.NewTimeSeededRand()) }
// TestStBackpressureStreamWrite tests whether streams see proper // backpressure when writing data over the network streams. func TestStBackpressureStreamWrite(t *testing.T) { // senderWrote signals that the sender wrote bytes to remote. // the value is the count of bytes written. senderWrote := make(chan int, 10000) // sender signals it's done (errored out) senderDone := make(chan struct{}) // writeStats lets us listen to all the writes and return // how many happened and how much was written writeStats := func() (int, int) { writes := 0 bytes := 0 for { select { case n := <-senderWrote: writes++ bytes = bytes + n default: log.Debugf("stats: sender wrote %d bytes, %d writes", bytes, writes) return bytes, writes } } } // sender attempts to write as fast as possible, signaling on the // completion of every write. This makes it possible to see how // fast it's actually writing. We pair this with a receiver // that waits for a signal to read. sender := func(s inet.Stream) { defer func() { s.Close() senderDone <- struct{}{} }() // ready a buffer of random data buf := make([]byte, 65536) u.NewTimeSeededRand().Read(buf) for { // send a randomly sized subchunk from := rand.Intn(len(buf) / 2) to := rand.Intn(len(buf) / 2) sendbuf := buf[from : from+to] n, err := s.Write(sendbuf) if err != nil { log.Debug("sender error. exiting:", err) return } log.Debugf("sender wrote %d bytes", n) senderWrote <- n } } // receive a number of bytes from a stream. // returns the number of bytes written. receive := func(s inet.Stream, expect int) { log.Debugf("receiver to read %d bytes", expect) rbuf := make([]byte, expect) n, err := io.ReadFull(s, rbuf) if err != nil { t.Error("read failed:", err) } if expect != n { t.Error("read len differs: %d != %d", expect, n) } } // ok let's do it! // setup the networks ctx := context.Background() h1 := testutil.GenHostSwarm(t, ctx) h2 := testutil.GenHostSwarm(t, ctx) // setup sender handler on 1 h1.SetStreamHandler(protocol.TestingID, sender) h2pi := h2.Peerstore().PeerInfo(h2.ID()) log.Debugf("dialing %s", h2pi.Addrs) if err := h1.Connect(ctx, h2pi); err != nil { t.Fatalf("Failed to connect:", err) } // open a stream, from 2->1, this is our reader s, err := h2.NewStream(protocol.TestingID, h1.ID()) if err != nil { t.Fatal(err) } // let's make sure r/w works. testSenderWrote := func(bytesE int) { bytesA, writesA := writeStats() if bytesA != bytesE { t.Errorf("numbers failed: %d =?= %d bytes, via %d writes", bytesA, bytesE, writesA) } } // trigger lazy connection handshaking _, err = s.Read(nil) if err != nil { t.Fatal(err) } // 500ms rounds of lockstep write + drain roundsStart := time.Now() roundsTotal := 0 for roundsTotal < (2 << 20) { // let the sender fill its buffers, it will stop sending. <-time.After(300 * time.Millisecond) b, _ := writeStats() testSenderWrote(0) testSenderWrote(0) // drain it all, wait again receive(s, b) roundsTotal = roundsTotal + b } roundsTime := time.Now().Sub(roundsStart) // now read continously, while we measure stats. stop := make(chan struct{}) contStart := time.Now() go func() { for { select { case <-stop: return default: receive(s, 2<<15) } } }() contTotal := 0 for contTotal < (2 << 20) { n := <-senderWrote contTotal += n } stop <- struct{}{} contTime := time.Now().Sub(contStart) // now compare! continuous should've been faster AND larger if roundsTime < contTime { t.Error("continuous should have been faster") } if roundsTotal < contTotal { t.Error("continuous should have been larger, too!") } // and a couple rounds more for good measure ;) for i := 0; i < 3; i++ { // let the sender fill its buffers, it will stop sending. <-time.After(300 * time.Millisecond) b, _ := writeStats() testSenderWrote(0) testSenderWrote(0) // drain it all, wait again receive(s, b) } // this doesn't work :(: // // now for the sugar on top: let's tear down the receiver. it should // // exit the sender. // n1.Close() // testSenderWrote(0) // testSenderWrote(0) // select { // case <-time.After(2 * time.Second): // t.Error("receiver shutdown failed to exit sender") // case <-senderDone: // log.Info("handler backpressure works!") // } }