func Test_Transport_Download_Many(t *testing.T) { // t.Skip() root := "Download-many" halt, tree, ml, _, tr := seed(t, root) defer halt() assert.NoError(t, tree.PopulateN(10, 1000)) mx, err := ml.FeedManifests(true, true, true, lists.NewFileList().ListDir(tree.CWD)...) assert.NoError(t, err) err = tr.Upload(mx) assert.NoError(t, err) // Kill some blobs tree.KillBLOB("file-two.bin") tree.KillBLOB("one/file-two.bin") tree.KillBLOB("one/file-three.bin") req := lists.BlobMap{ "file-two.bin": mx["file-two.bin"], "one/file-two.bin": mx["one/file-two.bin"], "one/file-three.bin": mx["one/file-three.bin"], } for i := 0; i < 256; i++ { nm := fmt.Sprintf("big/file-big-%d.bin", i) tree.KillBLOB(nm) req[nm] = mx[nm] } err = tr.Download(req) assert.NoError(t, err) }
func Test_Transport_FetchSpec(t *testing.T) { // t.Skip() root := "Spec" halt, tree, ml, _, tr := seed(t, root) defer halt() mx, err := ml.FeedManifests(true, true, true, lists.NewFileList().ListDir(tree.CWD)...) assert.NoError(t, err) err = tr.Upload(mx) assert.NoError(t, err) // make spec nameMap := map[string]proto.ID{} for name, m := range mx { nameMap[name] = m.ID } spec1, err := proto.NewSpec(time.Now().UnixNano(), nameMap, []string{}) assert.NoError(t, err) err = tr.UploadSpec(spec1) assert.NoError(t, err) spec2, err := tr.GetSpec(spec1.ID) assert.NoError(t, err) assert.Equal(t, spec1.ID, spec2.ID) }
func Test_Assembler_Assemble(t *testing.T) { tree := fixtures.NewTree("Assembler", "") assert.NoError(t, tree.Populate()) defer tree.Squash() ml, err := model.New(tree.CWD, false, proto.CHUNK_SIZE, 16) assert.NoError(t, err) names := []string{ "file-two.bin", "one/file-two.bin", "one/file-three.bin", } // get manifests mx, err := ml.FeedManifests(true, true, true, lists.NewFileList(names...).ListDir(tree.CWD)...) assert.NoError(t, err) a, err := model.NewAssembler(ml) assert.NoError(t, err) for name, man := range mx { f, err := os.Open(filepath.Join(tree.CWD, name)) assert.NoError(t, err) for _, chunk := range man.Chunks { buf := make([]byte, chunk.Size) _, err = f.Read(buf) assert.NoError(t, err) err = a.StoreChunk(bytes.NewReader(buf), chunk.ID) assert.NoError(t, err) } } // Kill some blobs tree.KillBLOB("file-two.bin") tree.KillBLOB("one/file-two.bin") err = a.Done(mx) assert.NoError(t, err) mx1, err := ml.FeedManifests(true, true, true, lists.NewFileList(names...).ListDir(tree.CWD)...) assert.NoError(t, err) assert.Equal(t, mx, mx1) }
func Test_Filelist1(t *testing.T) { lister := lists.NewFileList() assert.Equal(t, []string{"a", "b"}, lister.List([]string{ "a", "b", ".hidden", "deep/.hidden", })) }
func Test_Transport_Upload(t *testing.T) { root := "Upload" halt, tree, ml, _, tr := seed(t, root) defer halt() mx, err := ml.FeedManifests(true, true, true, lists.NewFileList().ListDir(tree.CWD)...) assert.NoError(t, err) err = tr.Upload(mx) assert.NoError(t, err) }
func Test_Model_IsBlobs(t *testing.T) { tree := fixtures.NewTree("is-blob", "") assert.NoError(t, tree.Populate()) defer tree.Squash() names := lists.NewFileList().ListDir(tree.CWD) m, err := model.New(tree.CWD, false, 1024*1024, 16) assert.NoError(t, err) _, err = m.IsBlobs(names...) assert.NoError(t, err) }
func Test_Model_FeedManifests_Nil(t *testing.T) { tree := fixtures.NewTree("feed-manifests", "") assert.NoError(t, tree.Populate()) defer tree.Squash() names := lists.NewFileList().ListDir(tree.CWD) tree.KillBLOB("file-one.bin") m, err := model.New(tree.CWD, false, 1024*1024, 16) assert.NoError(t, err) lx, err := m.FeedManifests(true, true, false, names...) assert.Error(t, err) assert.Len(t, lx.Names(), 15) }
func Test_Transport_CreateUpload(t *testing.T) { root := "DeclareUpload" halt, tree, ml, _, tr := seed(t, root) defer halt() mx, err := ml.FeedManifests(true, true, true, lists.NewFileList().ListDir(tree.CWD)...) assert.NoError(t, err) upload := transport.NewUpload(tr, time.Hour) toUp, err := upload.SendCreateUpload(mx) assert.NoError(t, err) assert.Len(t, toUp, 4) }
func (c *UpCmd) Run(args ...string) (err error) { var mod *model.Model if mod, err = model.New(c.WD, c.UseGit, c.ChunkSize, c.PoolSize); err != nil { return } feed := lists.NewFileList(args...).ListDir(c.WD) isDirty, dirty, err := mod.Check(feed...) if err != nil { return } if isDirty { err = fmt.Errorf("dirty files in working tree %s", dirty) return } if c.UseGit { // filter by attrs feed, err = mod.Git.FilterByAttr("bar", feed...) } blobs, err := mod.FeedManifests(true, false, true, feed...) if err != nil { return } logx.Debugf("collected blobs %s", blobs.IDMap()) trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) err = trans.Upload(blobs) if err != nil { return } if c.Squash { if err = mod.SquashBlobs(blobs); err != nil { return } if c.UseGit { err = mod.Git.UpdateIndex(blobs.Names()...) } } return }
func Test_Model_FeedManifests_Many(t *testing.T) { if testing.Short() { t.Skip() } tree := fixtures.NewTree("collect-manifests-large", "") defer tree.Squash() assert.NoError(t, tree.Populate()) assert.NoError(t, tree.PopulateN(10, 300)) names := lists.NewFileList().ListDir(tree.CWD) m, err := model.New(tree.CWD, false, 1024*1024, 16) assert.NoError(t, err) lx, err := m.FeedManifests(true, true, true, names...) assert.NoError(t, err) assert.Len(t, lx.Names(), 316) }
func Test_Transport_UploadChunk(t *testing.T) { root := "UploadChunk" halt, tree, ml, _, tr := seed(t, root) defer halt() mx, err := ml.FeedManifests(true, true, true, lists.NewFileList().ListDir(tree.CWD)...) assert.NoError(t, err) upload := transport.NewUpload(tr, time.Hour) missing, err := upload.SendCreateUpload(mx) assert.NoError(t, err) toUp := mx.GetChunkLinkSlice(missing) for _, tu := range toUp { err = upload.UploadChunk(tu.Name, tu.Chunk) assert.NoError(t, err) } }
func Test_Transport_Check(t *testing.T) { root := "Check" halt, tree, ml, _, tr := seed(t, root) defer halt() mx, err := ml.FeedManifests(true, true, true, lists.NewFileList().ListDir(tree.CWD)...) assert.NoError(t, err) err = tr.Upload(mx) assert.NoError(t, err) res, err := tr.Check([]proto.ID{ "eebd7b0c388d7f4d4ede4681b472969d5f09228c0473010d670a6918a3c05e79", "eebd7b0c388d7f4d4ede4681b472969d5f09228c0473010d670a6918a3c05e7a", }) assert.NoError(t, err) assert.Equal(t, []proto.ID{ "eebd7b0c388d7f4d4ede4681b472969d5f09228c0473010d670a6918a3c05e7a", }, res) }
func Benchmark_Model_FeedManifests_Large(b *testing.B) { tree := fixtures.NewTree("collect-manifests-large-B", "") defer tree.Squash() assert.NoError(b, tree.Populate()) assert.NoError(b, tree.PopulateN(1024*1024*500, 5)) names := lists.NewFileList().ListDir(tree.CWD) b.ResetTimer() for i := 0; i < b.N; i++ { b.StartTimer() m, err := model.New(tree.CWD, false, 1024*1024, 16) assert.NoError(b, err) lx, err := m.FeedManifests(true, true, true, names...) b.StopTimer() assert.NoError(b, err) for _, man := range lx { b.SetBytes(man.Size) } } }
func Test_Storage_Upload_FinishUpload(t *testing.T) { logx.SetLevel(logx.DEBUG) tree := fixtures.NewTree("finish-upload", "") defer tree.Squash() assert.NoError(t, tree.Populate()) m, err := model.New(tree.CWD, false, proto.CHUNK_SIZE, 16) assert.NoError(t, err) os.RemoveAll("testdata/finish-upload-storage") stor := storage.NewBlockStorage(&storage.BlockStorageOptions{ "testdata/finish-upload-storage", 2, 16, 32, }) defer os.RemoveAll("testdata/finish-upload-storage") names := lists.NewFileList().ListDir(tree.CWD) mans, err := m.FeedManifests(true, false, true, names...) uID, _ := uuid.NewV4() missing, err := stor.CreateUploadSession(*uID, mans.GetManifestSlice(), time.Hour) assert.NoError(t, err) toUpload := mans.GetChunkLinkSlice(missing) for _, v := range toUpload { r, err := os.Open(tree.BlobFilename(v.Name)) assert.NoError(t, err) defer r.Close() buf := make([]byte, v.Size) _, err = r.ReadAt(buf, v.Offset) err = stor.UploadChunk(*uID, v.Chunk.ID, bytes.NewReader(buf)) assert.NoError(t, err) } err = stor.FinishUploadSession(*uID) assert.NoError(t, err) }
func Test_Transport_Download(t *testing.T) { root := "Download" halt, tree, ml, _, tr := seed(t, root) defer halt() mx, err := ml.FeedManifests(true, true, true, lists.NewFileList().ListDir(tree.CWD)...) assert.NoError(t, err) err = tr.Upload(mx) assert.NoError(t, err) // Kill some blobs tree.KillBLOB("file-two.bin") tree.KillBLOB("one/file-two.bin") tree.KillBLOB("one/file-three.bin") err = tr.Download(lists.BlobMap{ "file-two.bin": mx["file-two.bin"], "one/file-two.bin": mx["one/file-two.bin"], "one/file-three.bin": mx["one/file-three.bin"], }) assert.NoError(t, err) }
func Test_Storage_Upload_CreateUpload(t *testing.T) { logx.SetLevel(logx.DEBUG) tree := fixtures.NewTree("create-upload", "") defer tree.Squash() assert.NoError(t, tree.Populate()) m, err := model.New(tree.CWD, false, proto.CHUNK_SIZE, 16) assert.NoError(t, err) os.RemoveAll("testdata/create-upload-storage") stor := storage.NewBlockStorage(&storage.BlockStorageOptions{ "testdata/create-upload-storage", 2, 16, 32, }) defer os.RemoveAll("testdata/create-upload-storage") names := lists.NewFileList().ListDir(tree.CWD) mans, err := m.FeedManifests(true, false, true, names...) uID, _ := uuid.NewV4() missing, err := stor.CreateUploadSession(*uID, mans.GetManifestSlice(), time.Hour) assert.NoError(t, err) assert.Len(t, missing, 4) }
func (c *LsCmd) Run(args ...string) (err error) { if c.NoBlobs && c.NoManifests { err = fmt.Errorf("both -no-blobs and -no-manifests are on") return } mod, err := model.New(c.WD, c.UseGit, proto.CHUNK_SIZE, c.PoolSize) if err != nil { return } feed := lists.NewFileList(args...).ListDir(c.WD) var dirty map[string]struct{} if c.UseGit { if feed, err = mod.Git.FilterByAttr("bar", feed...); err != nil { return } var dirtyst []string if dirtyst, err = mod.Git.DiffFiles(feed...); err != nil { return } dirty = map[string]struct{}{} for _, n := range dirtyst { dirty[n] = struct{}{} } } blobs, err := mod.FeedManifests(!c.NoBlobs, !c.NoManifests, true, feed...) if err != nil { return } missingOnRemote := map[proto.ID]struct{}{} if !c.NoRemote { var exists []proto.ID trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) if exists, err = trans.Check(blobs.IDMap().IDs()); err != nil { return } for _, id := range exists { missingOnRemote[id] = struct{}{} } } // print this stuff w := new(tabwriter.Writer) w.Init(c.Stdout, 0, 8, 2, '\t', 0) var line []string toLine := func(term string) { line = append(line, term) } flushLine := func() { fmt.Fprintln(w, strings.Join(line, "\t")) line = []string{} } if !c.NoHeader { if !c.NoName { toLine("NAME") } if !c.NoBlobs && !c.NoManifests { toLine("BLOB") } if !c.NoRemote { toLine("SYNC") } if c.UseGit { toLine("GIT") } if !c.NoID { toLine("ID") } if !c.NoSize { toLine("SIZE") } flushLine() } var names sort.StringSlice for n, _ := range blobs { names = append(names, n) } names.Sort() var blobMap map[string]bool if !c.NoBlobs && !c.NoManifests { if blobMap, err = mod.IsBlobs(names...); err != nil { return } } for _, name := range names { if !c.NoName { toLine(name) } if !c.NoBlobs && !c.NoManifests { if blobMap[name] { toLine("yes") } else { toLine("no") } } if !c.NoRemote { if _, missing := missingOnRemote[blobs[name].ID]; missing { toLine("no") } else { toLine("yes") } } if c.UseGit { if _, bad := dirty[name]; !bad { toLine("ok") } else { toLine("dirty") } } if !c.NoID { if !c.FullID { toLine(blobs[name].ID.String()[:12]) } else { toLine(blobs[name].ID.String()) } } if !c.NoSize { toLine(fmt.Sprintf("%d", blobs[name].Size)) } flushLine() } w.Flush() return }
func Test_Git_Divert1(t *testing.T) { tree := fixtures.NewTree("Git_divert", "") assert.NoError(t, tree.Populate()) defer tree.Squash() g, err := gitFixture(tree) assert.NoError(t, err) // logx.SetLevel(logx.TRACE) // get blobmap for further checks mod, err := model.New(tree.CWD, false, proto.CHUNK_SIZE, 16) assert.NoError(t, err) names := lists.NewFileList().ListDir(tree.CWD) mans1, err := mod.FeedManifests(true, true, true, names...) assert.NoError(t, err) // Run divert on "in-other" and "one" divert := git.NewDivert(g) spec, err := divert.PrepareBegin("other", "in-other", "one", "two/file-four with spaces.bin") assert.NoError(t, err) err = divert.Begin(spec) assert.NoError(t, err) // Make two blobs and collect their manifests bn1 := filepath.Join(tree.CWD, "in-other", "blob.bin") bn2 := filepath.Join(tree.CWD, "one", "file-one.bin") fixtures.MakeNamedBLOB(bn1, 110) fixtures.MakeNamedBLOB(bn2, 200) oMan1, err := fixtures.NewShadowFromFile(bn1) assert.NoError(t, err) oMan2, err := fixtures.NewShadowFromFile(bn2) assert.NoError(t, err) // commit spec1, err := divert.ReadSpec() assert.NoError(t, err) err = divert.Commit(spec1, "from-master") assert.NoError(t, err) err = divert.Cleanup(spec1) assert.NoError(t, err) err = divert.CleanSpec() assert.NoError(t, err) // Final checks branch, _, err := g.GetBranches() assert.NoError(t, err) assert.Equal(t, "master", branch) // check master files names = lists.NewFileList().ListDir(tree.CWD) mans2, err := mod.FeedManifests(true, true, true, names...) assert.NoError(t, err) assert.EqualValues(t, mans1, mans2) // check stored branch err = g.Checkout("other") assert.NoError(t, err) oMan1p, err := fixtures.NewShadowFromFile(bn1) assert.NoError(t, err) assert.EqualValues(t, oMan1, oMan1p) oMan2p, err := fixtures.NewShadowFromFile(bn2) assert.NoError(t, err) assert.EqualValues(t, oMan2, oMan2p) }
func (c *SpecExportCmd) Run(args ...string) (err error) { var mod *model.Model if mod, err = model.New(c.WD, c.UseGit, c.ChunkSize, c.PoolSize); err != nil { return } feed := lists.NewFileList(args...).ListDir(c.WD) isDirty, dirty, err := mod.Check(feed...) if err != nil { return } if isDirty { err = fmt.Errorf("dirty files in working tree %s", dirty) return } if c.UseGit { // filter by attrs feed, err = mod.Git.FilterByAttr("bar", feed...) } blobs, err := mod.FeedManifests(true, true, true, feed...) if err != nil { return } // make specmap nameMap := map[string]proto.ID{} for name, m := range blobs { nameMap[name] = m.ID } spec, err := proto.NewSpec(time.Now().UnixNano(), nameMap, []string{}) if err != nil { return } if c.DoCC { ccName := fmt.Sprintf("bar-spec-%d-%s.json", time.Now().UnixNano(), spec.ID) logx.Infof("storing carbon copy to %s", ccName) ccf, err := os.Create(lists.OSFromSlash(lists.OSJoin(c.WD, ccName))) if err != nil { return err } defer ccf.Close() if err = json.NewEncoder(ccf).Encode(&spec); err != nil { return err } } if !c.Upload { err = json.NewEncoder(c.Stdout).Encode(&spec) return } trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) if err = trans.UploadSpec(spec); err != nil { return } fmt.Fprint(c.Stdout, spec.ID) return }