func actorWriteFile(d *Directory) error { fi, err := randomFile(d) if err != nil { return err } if fi == nil { return nil } size := rand.Intn(1024) buf := make([]byte, size) randbo.New().Read(buf) s, err := fi.Size() if err != nil { return err } wfd, err := fi.Open(OpenWriteOnly, true) if err != nil { return err } offset := rand.Int63n(s) n, err := wfd.WriteAt(buf, offset) if err != nil { return err } if n != size { return fmt.Errorf("didnt write enough") } return wfd.Close() }
func actorMakeFile(d *Directory) error { d, err := randomWalk(d, rand.Intn(7)) if err != nil { return err } name := randomName() f, err := NewFile(name, &dag.Node{Data: ft.FilePBData(nil, 0)}, d, d.dserv) if err != nil { return err } wfd, err := f.Open(OpenWriteOnly, true) if err != nil { return err } r := io.LimitReader(randbo.New(), int64(77*rand.Intn(123))) _, err = io.Copy(wfd, r) if err != nil { return err } err = wfd.Close() if err != nil { return err } return nil }
func BenchmarkConsecutivePut(b *testing.B) { r := rand.New() var blocks [][]byte var keys []datastore.Key for i := 0; i < b.N; i++ { blk := make([]byte, 256*1024) r.Read(blk) blocks = append(blocks, blk) key := base32.StdEncoding.EncodeToString(blk[:8]) keys = append(keys, datastore.NewKey(key)) } temp, cleanup := tempdir(b) defer cleanup() fs, err := flatfs.New(temp, 2) if err != nil { b.Fatalf("New fail: %v\n", err) } b.ResetTimer() for i := 0; i < b.N; i++ { err := fs.Put(keys[i], blocks[i]) if err != nil { b.Fatal(err) } } }
func RunBatchTest(t *testing.T, ds dstore.Batching) { batch, err := ds.Batch() if err != nil { t.Fatal(err) } r := rand.New() var blocks [][]byte var keys []dstore.Key for i := 0; i < 20; i++ { blk := make([]byte, 256*1024) r.Read(blk) blocks = append(blocks, blk) key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8])) keys = append(keys, key) err := batch.Put(key, blk) if err != nil { t.Fatal(err) } } // Ensure they are not in the datastore before comitting for _, k := range keys { _, err := ds.Get(k) if err == nil { t.Fatal("should not have found this block") } } // commit, write them to the datastore err = batch.Commit() if err != nil { t.Fatal(err) } for i, k := range keys { blk, err := ds.Get(k) if err != nil { t.Fatal(err) } if !bytes.Equal(blk.([]byte), blocks[i]) { t.Fatal("blocks not correct!") } } }
func TestConcurrentReads(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ds, rt := setupRoot(ctx, t) rootdir := rt.GetValue().(*Directory) path := "a/b/c" d := mkdirP(t, rootdir, path) buf := make([]byte, 2048) randbo.New().Read(buf) fi := fileNodeFromReader(t, ds, bytes.NewReader(buf)) err := d.AddChild("afile", fi) if err != nil { t.Fatal(err) } var wg sync.WaitGroup nloops := 100 for i := 0; i < 10; i++ { wg.Add(1) go func(me int) { defer wg.Done() mybuf := make([]byte, len(buf)) for j := 0; j < nloops; j++ { offset := rand.Intn(len(buf)) length := rand.Intn(len(buf) - offset) err := readFile(rt, "/a/b/c/afile", int64(offset), mybuf[:length]) if err != nil { t.Error("readfile failed: ", err) return } if !bytes.Equal(mybuf[:length], buf[offset:offset+length]) { t.Error("incorrect read!") } } }(i) } wg.Wait() }
func TestEcho(t *testing.T) { a, b := net.Pipe() mpa := NewMultiplex(a, false) mpb := NewMultiplex(b, true) mes := make([]byte, 40960) rand.New().Read(mes) go func() { s, err := mpb.Accept() if err != nil { t.Fatal(err) } defer s.Close() io.Copy(s, s) }() s := mpa.NewStream() _, err := s.Write(mes) if err != nil { t.Fatal(err) } buf := make([]byte, len(mes)) n, err := io.ReadFull(s, buf) if err != nil { t.Fatal(err) } if n != len(mes) { t.Fatal("read wrong amount") } if err := arrComp(buf, mes); err != nil { t.Fatal(err) } s.Close() mpa.Close() mpb.Close() }
func RunBatchDeleteTest(t *testing.T, ds dstore.Batching) { r := rand.New() var keys []dstore.Key for i := 0; i < 20; i++ { blk := make([]byte, 16) r.Read(blk) key := dstore.NewKey(base32.StdEncoding.EncodeToString(blk[:8])) keys = append(keys, key) err := ds.Put(key, blk) if err != nil { t.Fatal(err) } } batch, err := ds.Batch() if err != nil { t.Fatal(err) } for _, k := range keys { err := batch.Delete(k) if err != nil { t.Fatal(err) } } err = batch.Commit() if err != nil { t.Fatal(err) } for _, k := range keys { _, err := ds.Get(k) if err == nil { t.Fatal("shouldnt have found block") } } }
func TestStress(t *testing.T) { mw := NewMirrorWriter() nreaders := 20 var readers []io.Reader for i := 0; i < nreaders; i++ { pr, pw := io.Pipe() mw.AddWriter(pw) readers = append(readers, pr) } hashout := make(chan []byte) numwriters := 20 writesize := 1024 writecount := 300 f := func(r io.Reader) { h := fnv.New64a() sum, err := io.Copy(h, r) if err != nil { t.Fatal(err) } if sum != int64(numwriters*writesize*writecount) { t.Fatal("read wrong number of bytes") } hashout <- h.Sum(nil) } for _, r := range readers { go f(r) } work := sync.WaitGroup{} for i := 0; i < numwriters; i++ { work.Add(1) go func() { defer work.Done() r := randbo.New() buf := make([]byte, writesize) for j := 0; j < writecount; j++ { r.Read(buf) mw.Write(buf) time.Sleep(time.Millisecond * 5) } }() } work.Wait() mw.Close() check := make(map[string]bool) for i := 0; i < nreaders; i++ { h := <-hashout check[string(h)] = true } if len(check) > 1 { t.Fatal("writers received different data!") } }