func TestCallbacksWork(t *testing.T) { fake := new(FakeStream) var sent int64 var recv int64 sentCB := func(n int64, proto protocol.ID, p peer.ID) { sent += n } recvCB := func(n int64, proto protocol.ID, p peer.ID) { recv += n } ms := newMeteredStream(fake, protocol.ID("TEST"), peer.ID("PEER"), recvCB, sentCB) toWrite := int64(100000) toRead := int64(100000) fake.ReadBuf = io.LimitReader(randbo.New(), toRead) writeData := io.LimitReader(randbo.New(), toWrite) n, err := io.Copy(ms, writeData) if err != nil { t.Fatal(err) } if n != toWrite { t.Fatal("incorrect write amount") } if toWrite != sent { t.Fatal("incorrectly reported writes", toWrite, sent) } n, err = io.Copy(ioutil.Discard, ms) if err != nil { t.Fatal(err) } if n != toRead { t.Fatal("incorrect read amount") } if toRead != recv { t.Fatal("incorrectly reported reads") } }
func main() { log.SetPrefix("prandom: ") _, err := io.Copy(os.Stdout, randbo.New()) if err != nil { log.Fatal(err) } }
func BenchmarkBlockRewrites(n *core.IpfsNode, cfg *BenchCfg) error { buf := make([]byte, cfg.Blocksize) randbo.New().Read(buf) blk := blocks.NewBlock(buf) // write the block first, before starting the benchmark. // we're just looking at the time it takes to write a block thats already // been written k, err := n.Blocks.AddBlock(blk) if err != nil { return err } f := func(b *testing.B) { for i := 0; i < b.N; i++ { _, err := n.Blocks.AddBlock(blk) if err != nil { b.Fatal(err) } } } br := testing.Benchmark(f) fmt.Printf("BlockRewrites:\t\t%s\n", br) // clean up err = n.Blocks.DeleteBlock(k) if err != nil { return err } return nil }
func BenchmarkConsecutivePut(b *testing.B) { r := rand.New() var blocks [][]byte var keys []datastore.Key for i := 0; i < b.N; i++ { blk := make([]byte, 256*1024) r.Read(blk) blocks = append(blocks, blk) key := base32.StdEncoding.EncodeToString(blk[:8]) keys = append(keys, datastore.NewKey(key)) } temp, cleanup := tempdir(b) defer cleanup() fs, err := flatfs.New(temp, 2) if err != nil { b.Fatalf("New fail: %v\n", err) } b.ResetTimer() for i := 0; i < b.N; i++ { err := fs.Put(keys[i], blocks[i]) if err != nil { b.Fatal(err) } } }
func benchAddSize(n *core.IpfsNode, cfg *BenchCfg, size int64) error { f := func(b *testing.B) { b.SetBytes(size) for i := 0; i < b.N; i++ { r := io.LimitReader(randbo.New(), size) spl := chunk.NewSizeSplitter(r, cfg.Blocksize) _, err := importer.BuildDagFromReader(n.DAG, spl, nil) if err != nil { fmt.Printf("ERRROR: ", err) b.Fatal(err) } } } br := testing.Benchmark(f) bs := humanize.IBytes(uint64(size)) fmt.Printf("Add File (%s):\t%s\n", bs, br) err := cr.GarbageCollect(n, context.Background()) if err != nil { return err } return nil }
func BenchmarkRandomBlockWrites(n *core.IpfsNode, cfg *BenchCfg) error { buf := make([]byte, cfg.Blocksize) read := randbo.New() var keys []key.Key f := func(b *testing.B) { b.SetBytes(cfg.Blocksize) for i := 0; i < b.N; i++ { read.Read(buf) blk := blocks.NewBlock(buf) k, err := n.Blocks.AddBlock(blk) if err != nil { b.Fatal(err) } keys = append(keys, k) } } br := testing.Benchmark(f) fmt.Printf("RandomBlockWrites:\t%s\n", br) // clean up for _, k := range keys { err := n.Blocks.DeleteBlock(k) if err != nil { return err } } return nil }
func main() { list := flag.Bool("l", false, "listen on the given address") spew := flag.Bool("spew", false, "spew random data on the connection") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [-l] <host> <port>\n", os.Args[0]) } flag.Parse() c := make(chan os.Signal) signal.Notify(c, os.Interrupt) if len(flag.Args()) < 2 { flag.Usage() os.Exit(1) } addr := fmt.Sprintf("%s:%s", flag.Arg(0), flag.Arg(1)) var con io.ReadWriteCloser if *list { sock, err := utp.NewSocket("udp", addr) if err != nil { fmt.Fprintf(os.Stderr, "create socket failed: %s\n", err) os.Exit(1) } defer sock.Close() utpcon, err := sock.Accept() if err != nil { fmt.Fprintf(os.Stderr, "accept failed: %s\n", err) os.Exit(1) } con = utpcon } else { utpcon, err := utp.Dial(addr) if err != nil { fmt.Fprintf(os.Stderr, "dial failed: %s\n", err) os.Exit(1) } con = utpcon } var in io.Reader = os.Stdin if *spew { in = rand.New() } go func() { <-c con.Close() }() beNetcat(con, in) }
func createRandomDataFile(b *testing.B, length int) string { f, err := os.Create("/tmp/test") if err != nil { b.Fatalf("couldn't create file: %v\n", err) } rand := randbo.New() io.CopyN(f, rand, int64(length)) f.Close() return "/tmp/test" }
// newID returns a pseudo-random, URL-encoded, base64 // string used for connection identifiers. func newID() string { buf := make([]byte, 15) n, err := randbo.New().Read(buf) if err != nil { glog.Fatal(err) } if n != len(buf) { glog.Fatal("short read") } return base64.URLEncoding.EncodeToString(buf) }
// newsparkyServer creates a sparkyServer object and pre-fills some random data func newsparkyServer() sparkyServer { ss := sparkyServer{} // Make a 10MB byte slice ss.randomData = make([]byte, 1024*1024*10) // Fill our 10MB byte slice with random data _, err := randbo.New().Read(ss.randomData) if err != nil { log.Fatalln("error generating random data:", err) } return ss }
func main() { if len(os.Args) < 2 { fmt.Println("need to specify size!") os.Exit(1) } n, err := strconv.Atoi(os.Args[1]) if err != nil { fmt.Println(err) os.Exit(1) } r := randbo.New() io.CopyN(os.Stdout, r, int64(n)) }
func RunBatchTest(t *testing.T, ds dstore.Batching) { batch, err := ds.Batch() if err != nil { t.Fatal(err) } r := rand.New() var blocks [][]byte var keys []dstore.Key for i := 0; i < 20; i++ { blk := make([]byte, 256*1024) r.Read(blk) blocks = append(blocks, blk) key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8])) keys = append(keys, key) err := batch.Put(key, blk) if err != nil { t.Fatal(err) } } // Ensure they are not in the datastore before comitting for _, k := range keys { _, err := ds.Get(k) if err == nil { t.Fatal("should not have found this block") } } // commit, write them to the datastore err = batch.Commit() if err != nil { t.Fatal(err) } for i, k := range keys { blk, err := ds.Get(k) if err != nil { t.Fatal(err) } if !bytes.Equal(blk.([]byte), blocks[i]) { t.Fatal("blocks not correct!") } } }
// NewsparkyClient creates a new sparkyClient object func newsparkyClient() *sparkyClient { m := sparkyClient{} // Make a 10MB byte slice to hold our random data blob m.randomData = make([]byte, 1024*1024*10) // Use a randbo Reader to fill our big slice with random data _, err := randbo.New().Read(m.randomData) if err != nil { log.Fatalln("error generating random data:", err) } // Create a bytes.Reader over this byte slice m.randReader = bytes.NewReader(m.randomData) return &m }
// MeteredCopy copies to or from a net.Conn, keeping count of the data it passes func (ss *sparkyServer) MeteredCopy() { var err error var timer *time.Timer // Set a timer that we'll use to stop the test. If we're running an inbound test, // we extend the timer by two seconds to allow the client to finish its sending. if ss.testType == inbound { timer = time.NewTimer(time.Second * time.Duration(testLength+2)) } else if ss.testType == outbound { timer = time.NewTimer(time.Second * time.Duration(testLength)) } // Create a new randbo Reader rnd := randbo.New() for { select { case <-timer.C: if *debug { log.Println(testLength, "seconds have elapsed.") } return default: // Copy our random data from randbo to our ResponseWriter, 100KB at a time switch ss.testType { case outbound: _, err = io.CopyN(ss.client, rnd, 1024*blockSize) case inbound: _, err = io.CopyN(ioutil.Discard, ss.client, 1024*blockSize) } // io.EOF is normal when a client drops off after the test if err != nil { if err != io.EOF { log.Println("Error copying:", err) } return } // // With each 100K copied, we send a message on our blockTicker channel ss.blockTicker <- true } } }
func TestEcho(t *testing.T) { a, b := net.Pipe() mpa := NewMultiplex(a, false) mpb := NewMultiplex(b, true) mes := make([]byte, 40960) rand.New().Read(mes) go func() { s, err := mpb.Accept() if err != nil { t.Fatal(err) } defer s.Close() io.Copy(s, s) }() s := mpa.NewStream() _, err := s.Write(mes) if err != nil { t.Fatal(err) } buf := make([]byte, len(mes)) n, err := io.ReadFull(s, buf) if err != nil { t.Fatal(err) } if n != len(mes) { t.Fatal("read wrong amount") } if err := arrComp(buf, mes); err != nil { t.Fatal(err) } s.Close() mpa.Close() mpb.Close() }
func createInternalName(suffix string) string { buf := make([]byte, 30) n, err := randbo.New().Read(buf) if err != nil { log.Fatalf("createInternalName failed: %s", err) } if n != len(buf) { log.Fatalf("createInternalName failed: (n = %d) != (len = %d)", n, len(buf)) } hex := fmt.Sprintf("%x.%s", buf, suffix) return strings.Join( []string{hex[0:1], hex[1:2], hex[2:3], hex[3:4], hex}, "/", ) }
func BenchmarkBatchedPut(b *testing.B) { r := rand.New() var blocks [][]byte var keys []datastore.Key for i := 0; i < b.N; i++ { blk := make([]byte, 256*1024) r.Read(blk) blocks = append(blocks, blk) key := base32.StdEncoding.EncodeToString(blk[:8]) keys = append(keys, datastore.NewKey(key)) } temp, cleanup := tempdir(b) defer cleanup() fs, err := flatfs.New(temp, 2, false) if err != nil { b.Fatalf("New fail: %v\n", err) } b.ResetTimer() for i := 0; i < b.N; { batch, err := fs.Batch() if err != nil { b.Fatal(err) } for n := i; i-n < 512 && i < b.N; i++ { err := batch.Put(keys[i], blocks[i]) if err != nil { b.Fatal(err) } } err = batch.Commit() if err != nil { b.Fatal(err) } } }
func worker(done chan struct{}, wg *sync.WaitGroup) { defer wg.Done() memc, err := gomemcache.Connect(*queueHost, *queuePort) defer memc.Close() if err != nil { log.Println(err) } setsRemaning := *numSets getsRemaning := *numGets getSetRatio := float32(*numGets) / float32(*numSets) dataSource := &dataSource{make([]byte, *itemSize), randbo.New()} for { if getsRemaning > 0 { if setsRemaning < 1 { err = get(memc) getsRemaning-- } else if float32(getsRemaning)/float32(setsRemaning) > getSetRatio { err = get(memc) getsRemaning-- } else { err = set(memc, dataSource) setsRemaning-- } } else if setsRemaning > 0 { err = set(memc, dataSource) setsRemaning-- } else { return } if err != nil && err.Error() != "memcache: not found" { log.Println(err) memc, err = gomemcache.Connect(*queueHost, *queuePort) if err != nil { log.Println(err) } } } }
func benchDiskWriteSize(dir string, size int64) error { benchdir := path.Join(dir, fmt.Sprintf("benchfiles-%d", size)) err := os.Mkdir(benchdir, 0777) if err != nil { return err } n := 0 f := func(b *testing.B) { b.SetBytes(size) r := randbo.New() for i := 0; i < b.N; i++ { n++ fi, err := os.Create(path.Join(dir, fmt.Sprint(n))) if err != nil { fmt.Println(err) b.Fatal(err) } _, err = io.CopyN(fi, r, size) if err != nil { fi.Close() fmt.Println(err) b.Fatal(err) } fi.Close() } } br := testing.Benchmark(f) bs := humanize.IBytes(uint64(size)) fmt.Printf("DiskWrite (%s):\t%s\n", bs, br) err = os.RemoveAll(benchdir) if err != nil { return err } return nil }
func RunBatchDeleteTest(t *testing.T, ds dstore.Batching) { r := rand.New() var keys []dstore.Key for i := 0; i < 20; i++ { blk := make([]byte, 16) r.Read(blk) key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8])) keys = append(keys, key) err := ds.Put(key, blk) if err != nil { t.Fatal(err) } } batch, err := ds.Batch() if err != nil { t.Fatal(err) } for _, k := range keys { err := batch.Delete(k) if err != nil { t.Fatal(err) } } err = batch.Commit() if err != nil { t.Fatal(err) } for _, k := range keys { _, err := ds.Get(k) if err == nil { t.Fatal("shouldnt have found block") } } }
package main import ( "github.com/dustin/randbo" ) const ( ReadableText = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" ) var ( UsedTokens = make(map[string]struct{}) CryptoReader = randbo.New() ) func GenerateRandomString(n int) string { for { bytes := make([]byte, n) CryptoReader.Read(bytes) for k, v := range bytes { bytes[k] = ReadableText[v%byte(len(ReadableText))] } str := string(bytes) if _, ok := UsedTokens[str]; !ok { UsedTokens[str] = struct{}{} return str } } }
// Kicks off a metered copy (throughput test) by sending a command to the server // and then performing the appropriate I/O copy, sending "ticks" by channel as // each block of data passes through. func (sc *sparkyClient) MeteredCopy(testType command, measurerDone chan<- struct{}) { var rnd io.Reader var tl time.Duration // Connect to the remote sparkyfish server sc.beginSession() defer sc.conn.Close() // Send the appropriate command to the sparkyfish server to initiate our // throughput test switch testType { case inbound: // For inbound tests, we bump our timer by 2 seconds to account for // the remote server's test startup time tl = time.Second * time.Duration(throughputTestLength+2) // Send the SND command to the remote server, requesting a download test // (remote sends). err := sc.writeCommand("SND") if err != nil { termui.Close() log.Fatalln(err) } case outbound: tl = time.Second * time.Duration(throughputTestLength) // Send the RCV command to the remote server, requesting an upload test // (remote receives). err := sc.writeCommand("RCV") if err != nil { termui.Close() log.Fatalln(err) } // Create a new randbo Reader, used to generate our random data that we'll upload rnd = randbo.New() } // Set a timer for running the tests timer := time.NewTimer(tl) switch testType { case inbound: // Receive, tally, and discard incoming data as fast as we can until the sender stops sending or the timer expires for { select { case <-timer.C: // Timer has elapsed and test is finished close(measurerDone) return default: // Copy data from our net.Conn to the rubbish bin in (blockSize) KB chunks _, err := io.CopyN(ioutil.Discard, sc.conn, 1024*blockSize) if err != nil { // Handle the EOF when the test timer has expired at the remote end. if err == io.EOF || err == io.ErrClosedPipe || err == syscall.EPIPE { close(measurerDone) return } log.Println("Error copying:", err) return } // With each chunk copied, we send a message on our blockTicker channel sc.blockTicker <- true } } case outbound: // Send and tally outgoing data as fast as we can until the receiver stops receiving or the timer expires for { select { case <-timer.C: // Timer has elapsed and test is finished close(measurerDone) return default: // Copy data from our RNG to the net.Conn in (blockSize) KB chunks _, err := io.CopyN(sc.conn, rnd, 1024*blockSize) if err != nil { if err == io.EOF || err == io.ErrClosedPipe || err == syscall.EPIPE { close(measurerDone) return } log.Println("Error copying:", err) return } // With each chunk copied, we send a message on our blockTicker channel sc.blockTicker <- true } } } }
func TestStress(t *testing.T) { mw := NewMirrorWriter() nreaders := 20 var readers []io.Reader for i := 0; i < nreaders; i++ { pr, pw := io.Pipe() mw.AddWriter(pw) readers = append(readers, pr) } hashout := make(chan []byte) numwriters := 20 writesize := 1024 writecount := 300 f := func(r io.Reader) { h := fnv.New64a() sum, err := io.Copy(h, r) if err != nil { t.Fatal(err) } if sum != int64(numwriters*writesize*writecount) { t.Fatal("read wrong number of bytes") } hashout <- h.Sum(nil) } for _, r := range readers { go f(r) } work := sync.WaitGroup{} for i := 0; i < numwriters; i++ { work.Add(1) go func() { defer work.Done() r := randbo.New() buf := make([]byte, writesize) for j := 0; j < writecount; j++ { r.Read(buf) mw.Write(buf) time.Sleep(time.Millisecond * 5) } }() } work.Wait() mw.Close() check := make(map[string]bool) for i := 0; i < nreaders; i++ { h := <-hashout check[string(h)] = true } if len(check) > 1 { t.Fatal("writers received different data!") } }
func StartSender(s *client, signal chan error, opts *Options, wg *sync.WaitGroup) { ch, err := s.openChannel() if err != nil { signal <- err return } group, err := s.flake.Next() if err != nil { signal <- err return } h := murmur3.New32() if len(opts.Args.MessageBody) > 0 { m := make(map[string]string) for _, kv := range opts.Args.MessageBody { s := strings.SplitN(kv, "=", 2) if len(s) == 1 { m[s[0]] = "" } else { m[s[0]] = s[1] } } encoded, err := json.Marshal(m) if err != nil { signal <- err return } sum, err := s.send(ch, group, opts, encoded) if err != nil { signal <- err return } h.Write(sum) } else { r := rand.New(rand.NewSource(time.Now().UnixNano())) for i := 0; i < opts.Count; i++ { sizeInKb := opts.Size * 1024 size := int(sizeInKb) if opts.StdDev > 0 { dev := float64(opts.StdDev) s := r.NormFloat64()*dev*100 + sizeInKb size = int(s) } if size == 0 { size++ } buf := make([]byte, size) _, err = randbo.New().Read(buf) if err != nil { signal <- err return } sum, err := s.send(ch, group, opts, buf) if err != nil { signal <- err return } h.Write(sum) time.Sleep(time.Duration(opts.Interval) * time.Millisecond) } } if opts.Entropy { log.Infof("[%d] sender entropy (%x)", group, h.Sum(nil)) } wg.Done() signal <- nil }
/* {{{ func EnvInit(c *web.C, h http.Handler) http.Handler * 初始化环境 */ func EnvInit(c *web.C, h http.Handler) http.Handler { fn := func(w http.ResponseWriter, r *http.Request) { ac := new(Access) //access日志信息 ac.Time = time.Now() ac.Method = r.Method ac.URI = r.RequestURI ac.Proto = r.Proto ac.Host = r.Host ac.InHeader = &r.Header // env if c.Env == nil { //c.Env = make(map[string]interface{}) c.Env = make(map[interface{}]interface{}) } // make rand string(for debug, session...) buf := make([]byte, 16) randbo.New().Read(buf) //号称最快的随机字符串 ac.Session = fmt.Sprintf("%x", buf) c.Env[RequestIDKey] = ac.Session c.Env[LogPrefixKey] = "[" + ac.Session[:10] + "]" //只显示前十位 Trace("[%s] [%s %s] started", ac.Session[:10], r.Method, r.RequestURI) lw := utils.WrapWriter(w) pathPieces := strings.Split(r.URL.Path, "/") for off, piece := range pathPieces { if piece != "" { if off == 1 { c.Env[EndpointKey] = piece } if off == 2 && piece[0] != '@' { //@开头是selector c.Env[RowkeyKey] = piece } if off > 1 && piece[0] == '@' { c.Env[SelectorKey] = piece } } } // real ip(处理在代理服务器之后的情况) if rip := realIP(r); rip != "" { c.Env[OriginalRemoteAddrKey] = r.RemoteAddr r.RemoteAddr = rip } ac.IP = r.RemoteAddr //init RESTContext var rcErr error var rc *RESTContext rc, rcHolder, rcErr = RCHolder(*c, w, r) rc.Access = ac rc.Access.ReqLength = len(rc.RequestBody) if rcErr != nil { rc.RESTBadRequest(rcErr) return } h.ServeHTTP(lw, r) } return http.HandlerFunc(fn) }
"github.com/stretchr/testify/assert" // "github.com/stuphlabs/pullcord" "io/ioutil" "net/http" "regexp" "strconv" "strings" "testing" ) var exampleCookieValueRegex = regexp.MustCompile( "^[0-9A-Fa-f]{" + strconv.Itoa(minSessionCookieValueRandSize*2) + "}$", ) var randgen = randbo.New() // gostring is a testing helper function that serializes any object. func gostring(i interface{}) string { return fmt.Sprintf("%#v", i) } var cookieMaskTestPage = falcore.NewRequestFilter( func(req *falcore.Request) *http.Response { var content = "<html><body><h1>cookies</h1><ul>" for _, cke := range req.HttpRequest.Cookies() { content += "<li class=\"cke\">" + cke.String() + "</li>" }