func (c *GitDivertFinishCmd) Run(args ...string) (err error) { mod, err := model.New(c.WD, true, c.ChunkSize, c.PoolSize) if err != nil { return } divert := git.NewDivert(mod.Git) isInProgress, err := divert.IsInProgress() if err != nil { return } if !isInProgress { err = fmt.Errorf("diversion is not in progress") } spec, err := divert.ReadSpec() if err != nil { return } if err = divert.Commit(spec, c.Message); err != nil { return } err = divert.Cleanup(spec) return }
func (c *GitDivertBeginCmd) Run(args ...string) (err error) { logx.Debugf("beginning covert op %s", args) if len(args) == 0 { err = fmt.Errorf("no branch") return } branch := args[0] names := args[1:] mod, err := model.New(c.WD, true, c.ChunkSize, c.PoolSize) if err != nil { return } divert := git.NewDivert(mod.Git) var spec git.DivertSpec if spec, err = divert.PrepareBegin(branch, names...); err != nil { return } if err = divert.Begin(spec); err == nil { return } return }
func Test_Assembler_StoreChunk(t *testing.T) { wd, _ := os.Getwd() wd = filepath.Join(wd, "testdata", "assembler-StoreChunk") m, err := model.New(wd, false, proto.CHUNK_SIZE, 128) assert.NoError(t, err) data := []byte("mama myla ramu") hasher := sha3.New256() _, err = hasher.Write([]byte(data)) id := proto.ID(hex.EncodeToString(hasher.Sum(nil))) a, err := model.NewAssembler(m) assert.NoError(t, err) defer a.Close() err = a.StoreChunk(bytes.NewReader(data), id) assert.NoError(t, err) // check stored chunk f, err := os.Open(filepath.Join(a.Where, id.String())) assert.NoError(t, err) defer f.Close() defer os.Remove(filepath.Join(a.Where, id.String())) r2, err := ioutil.ReadAll(f) assert.NoError(t, err) assert.Equal(t, data, r2) }
func (c *PingCmd) Run(args ...string) (err error) { var mod *model.Model if mod, err = model.New(c.WD, false, c.ChunkSize, c.PoolSize); err != nil { return } trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) res, err := trans.ServerInfo() logx.Info(res) return }
func Test_Model_IsBlobs(t *testing.T) { tree := fixtures.NewTree("is-blob", "") assert.NoError(t, tree.Populate()) defer tree.Squash() names := lists.NewFileList().ListDir(tree.CWD) m, err := model.New(tree.CWD, false, 1024*1024, 16) assert.NoError(t, err) _, err = m.IsBlobs(names...) assert.NoError(t, err) }
func Test_Model_FeedManifests_Nil(t *testing.T) { tree := fixtures.NewTree("feed-manifests", "") assert.NoError(t, tree.Populate()) defer tree.Squash() names := lists.NewFileList().ListDir(tree.CWD) tree.KillBLOB("file-one.bin") m, err := model.New(tree.CWD, false, 1024*1024, 16) assert.NoError(t, err) lx, err := m.FeedManifests(true, true, false, names...) assert.Error(t, err) assert.Len(t, lx.Names(), 15) }
func Test_Transport_ServerInfo(t *testing.T) { root := "testdata-Ping" srv, err := fixtures.NewFixtureServer(root) assert.NoError(t, err) defer srv.Stop() mod, err := model.New("", false, proto.CHUNK_SIZE, 16) assert.NoError(t, err) tr := transport.NewTransport(mod, srv.HTTPEndpoint, srv.RPCEndpoints[0], 16) defer tr.Close() res, err := tr.ServerInfo() assert.NoError(t, err) assert.Equal(t, int64(1024*1024*2), res.ChunkSize) }
func (c *UpCmd) Run(args ...string) (err error) { var mod *model.Model if mod, err = model.New(c.WD, c.UseGit, c.ChunkSize, c.PoolSize); err != nil { return } feed := lists.NewFileList(args...).ListDir(c.WD) isDirty, dirty, err := mod.Check(feed...) if err != nil { return } if isDirty { err = fmt.Errorf("dirty files in working tree %s", dirty) return } if c.UseGit { // filter by attrs feed, err = mod.Git.FilterByAttr("bar", feed...) } blobs, err := mod.FeedManifests(true, false, true, feed...) if err != nil { return } logx.Debugf("collected blobs %s", blobs.IDMap()) trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) err = trans.Upload(blobs) if err != nil { return } if c.Squash { if err = mod.SquashBlobs(blobs); err != nil { return } if c.UseGit { err = mod.Git.UpdateIndex(blobs.Names()...) } } return }
func (c *GitDivertPushCmd) Run(args ...string) (err error) { mod, err := model.New(c.WD, true, c.ChunkSize, c.PoolSize) if err != nil { return } var upstream, branch string if len(args) == 0 { err = fmt.Errorf("no upstream and/or branch provided") return } if len(args) == 1 { upstream = "origin" branch = args[0] } else { upstream = args[0] branch = args[1] } // checks current, branches, err := mod.Git.GetBranches() if err != nil { return } if branch == current { err = fmt.Errorf("cannot push current branch. use `git push ...`") return } var exists bool for _, i := range branches { if branch == i { exists = true break } } if !exists { err = fmt.Errorf("branch %s is not exists") return } if err = mod.Git.Push(upstream, branch); err != nil { return } logx.Debugf("%s/%s pushed", upstream, branch) return }
func Test_Assembler_Assemble(t *testing.T) { tree := fixtures.NewTree("Assembler", "") assert.NoError(t, tree.Populate()) defer tree.Squash() ml, err := model.New(tree.CWD, false, proto.CHUNK_SIZE, 16) assert.NoError(t, err) names := []string{ "file-two.bin", "one/file-two.bin", "one/file-three.bin", } // get manifests mx, err := ml.FeedManifests(true, true, true, lists.NewFileList(names...).ListDir(tree.CWD)...) assert.NoError(t, err) a, err := model.NewAssembler(ml) assert.NoError(t, err) for name, man := range mx { f, err := os.Open(filepath.Join(tree.CWD, name)) assert.NoError(t, err) for _, chunk := range man.Chunks { buf := make([]byte, chunk.Size) _, err = f.Read(buf) assert.NoError(t, err) err = a.StoreChunk(bytes.NewReader(buf), chunk.ID) assert.NoError(t, err) } } // Kill some blobs tree.KillBLOB("file-two.bin") tree.KillBLOB("one/file-two.bin") err = a.Done(mx) assert.NoError(t, err) mx1, err := ml.FeedManifests(true, true, true, lists.NewFileList(names...).ListDir(tree.CWD)...) assert.NoError(t, err) assert.Equal(t, mx, mx1) }
func (c *GitCleanCmd) Run(args ...string) (err error) { mod, err := model.New(c.WD, true, c.ChunkSize, c.PoolSize) var name string if len(args) > 0 { name = args[0] } // check divert divert := git.NewDivert(mod.Git) isInProgress, err := divert.IsInProgress() if err != nil { return } if isInProgress { var spec git.DivertSpec if spec, err = divert.ReadSpec(); err != nil { return } var exists bool for _, n := range spec.TargetFiles { if n == name { exists = true break } } if !exists { err = fmt.Errorf("wan't clean non-target file %s while divert in progress", name) return } } s, err := mod.GetManifest(name, c.Stdin) if err != nil { return } logx.Debugf("%s %s", name, s.ID) if c.Id { fmt.Fprintf(c.Stdout, "%s", s.ID) } else { err = s.Serialize(c.Stdout) } return }
func (c *GitPreCommitCmd) Run(args ...string) (err error) { var filenames []string var mod *model.Model if mod, err = model.New(c.WD, true, c.ChunkSize, c.PoolSize); err != nil { return } // In divert we need restrict check by target filenames divert := git.NewDivert(mod.Git) isInDivert, err := divert.IsInProgress() if err != nil { return } if isInDivert { var spec git.DivertSpec if spec, err = divert.ReadSpec(); err != nil { return } filenames = spec.TargetFiles } isDirty, dirty, err := mod.Check(filenames...) if err != nil { return } if isDirty { err = fmt.Errorf("dirty files in working tree %s", dirty) return } feedR, err := mod.Git.Diff(filenames...) if err != nil { return } blobs, err := mod.Git.ManifestsFromDiff(feedR) if err != nil { return } trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) err = trans.Upload(blobs) return }
func seed(t *testing.T, root string) (halt func(), tree *fixtures.Tree, ml *model.Model, srv *fixtures.FixtureServer, trans *transport.Transport) { tree = fixtures.NewTree(root, "") assert.NoError(t, tree.Populate()) ml, err := model.New(tree.CWD, false, proto.CHUNK_SIZE, 32) assert.NoError(t, err) srv, err = fixtures.NewFixtureServer(root) assert.NoError(t, err) trans = transport.NewTransport(ml, srv.HTTPEndpoint, srv.RPCEndpoints[0], 16) halt = func() { trans.Close() srv.Stop() tree.Squash() } return }
func Test_Model_FeedManifests_Many(t *testing.T) { if testing.Short() { t.Skip() } tree := fixtures.NewTree("collect-manifests-large", "") defer tree.Squash() assert.NoError(t, tree.Populate()) assert.NoError(t, tree.PopulateN(10, 300)) names := lists.NewFileList().ListDir(tree.CWD) m, err := model.New(tree.CWD, false, 1024*1024, 16) assert.NoError(t, err) lx, err := m.FeedManifests(true, true, true, names...) assert.NoError(t, err) assert.Len(t, lx.Names(), 316) }
func (c *GitInstallCmd) Run(args ...string) (err error) { var mod *model.Model if mod, err = model.New(c.WD, true, c.ChunkSize, c.PoolSize); err != nil { return } defer mod.Close() trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) defer trans.Close() info, err := trans.ServerInfo() if err != nil { return } config := git.NewConfig(info, mod.Git) err = config.Install(c.Log) return }
func Benchmark_Model_FeedManifests_Large(b *testing.B) { tree := fixtures.NewTree("collect-manifests-large-B", "") defer tree.Squash() assert.NoError(b, tree.Populate()) assert.NoError(b, tree.PopulateN(1024*1024*500, 5)) names := lists.NewFileList().ListDir(tree.CWD) b.ResetTimer() for i := 0; i < b.N; i++ { b.StartTimer() m, err := model.New(tree.CWD, false, 1024*1024, 16) assert.NoError(b, err) lx, err := m.FeedManifests(true, true, true, names...) b.StopTimer() assert.NoError(b, err) for _, man := range lx { b.SetBytes(man.Size) } } }
func Test_Storage_Upload_FinishUpload(t *testing.T) { logx.SetLevel(logx.DEBUG) tree := fixtures.NewTree("finish-upload", "") defer tree.Squash() assert.NoError(t, tree.Populate()) m, err := model.New(tree.CWD, false, proto.CHUNK_SIZE, 16) assert.NoError(t, err) os.RemoveAll("testdata/finish-upload-storage") stor := storage.NewBlockStorage(&storage.BlockStorageOptions{ "testdata/finish-upload-storage", 2, 16, 32, }) defer os.RemoveAll("testdata/finish-upload-storage") names := lists.NewFileList().ListDir(tree.CWD) mans, err := m.FeedManifests(true, false, true, names...) uID, _ := uuid.NewV4() missing, err := stor.CreateUploadSession(*uID, mans.GetManifestSlice(), time.Hour) assert.NoError(t, err) toUpload := mans.GetChunkLinkSlice(missing) for _, v := range toUpload { r, err := os.Open(tree.BlobFilename(v.Name)) assert.NoError(t, err) defer r.Close() buf := make([]byte, v.Size) _, err = r.ReadAt(buf, v.Offset) err = stor.UploadChunk(*uID, v.Chunk.ID, bytes.NewReader(buf)) assert.NoError(t, err) } err = stor.FinishUploadSession(*uID) assert.NoError(t, err) }
func (c *GitDivertStatusCmd) Run(args ...string) (err error) { mod, err := model.New(c.WD, true, c.ChunkSize, c.PoolSize) if err != nil { return } divert := git.NewDivert(mod.Git) isInProgress, err := divert.IsInProgress() if err != nil { return } if !isInProgress { fmt.Fprintln(c.Stdout, "divert not in progress") } spec, err := divert.ReadSpec() if err != nil { return } fmt.Fprintln(c.Stdout, "DIVERT IN PROGRESS") fmt.Fprintln(c.Stdout, spec) return }
func Test_Storage_Upload_CreateUpload(t *testing.T) { logx.SetLevel(logx.DEBUG) tree := fixtures.NewTree("create-upload", "") defer tree.Squash() assert.NoError(t, tree.Populate()) m, err := model.New(tree.CWD, false, proto.CHUNK_SIZE, 16) assert.NoError(t, err) os.RemoveAll("testdata/create-upload-storage") stor := storage.NewBlockStorage(&storage.BlockStorageOptions{ "testdata/create-upload-storage", 2, 16, 32, }) defer os.RemoveAll("testdata/create-upload-storage") names := lists.NewFileList().ListDir(tree.CWD) mans, err := m.FeedManifests(true, false, true, names...) uID, _ := uuid.NewV4() missing, err := stor.CreateUploadSession(*uID, mans.GetManifestSlice(), time.Hour) assert.NoError(t, err) assert.Len(t, missing, 4) }
func (c *SpecImportCmd) Run(args ...string) (err error) { var spec proto.Spec mod, err := model.New(c.WD, c.UseGit, c.ChunkSize, c.PoolSize) if err != nil { return } trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) if c.Raw { if err = json.NewDecoder(c.Stdin).Decode(&spec); err != nil { return } } else { // tree spec types id := proto.ID(args[0]) if spec, err = trans.GetSpec(id); err != nil { logx.Debug(spec, err) return } } idm := lists.IDMap{} for n, id := range spec.BLOBs { idm[id] = append(idm[id], n) } // request manifests and mans, err := trans.GetManifests(idm.IDs()) if err != nil { return } feed := idm.ToBlobMap(mans) names := feed.Names() if len(names) == 0 { logx.Fatalf("no manifests on server %s", names) } logx.Debugf("importing %s", names) if c.UseGit { // If git is used - check names for attrs byAttr, err := mod.Git.FilterByAttr("bar", names...) if err != nil { return err } diff := []string{} attrs := map[string]struct{}{} for _, x := range byAttr { attrs[x] = struct{}{} } for _, x := range names { if _, ok := attrs[x]; !ok { diff = append(diff, x) } } if len(diff) > 0 { return fmt.Errorf("some spec blobs is not under bar control %s", diff) } } // get stored links, ignore errors stored, _ := mod.FeedManifests(true, true, false, names...) logx.Debugf("already stored %s", stored.Names()) // squash present toSquash := lists.BlobMap{} for n, m := range feed { m1, ok := stored[filepath.FromSlash(n)] if !ok || m.ID != m1.ID { toSquash[n] = feed[n] } } if c.Squash { if err = mod.SquashBlobs(toSquash); err != nil { return } } for k, _ := range feed { fmt.Fprintf(c.Stdout, "%s ", filepath.FromSlash(k)) } return }
func (c *SpecExportCmd) Run(args ...string) (err error) { var mod *model.Model if mod, err = model.New(c.WD, c.UseGit, c.ChunkSize, c.PoolSize); err != nil { return } feed := lists.NewFileList(args...).ListDir(c.WD) isDirty, dirty, err := mod.Check(feed...) if err != nil { return } if isDirty { err = fmt.Errorf("dirty files in working tree %s", dirty) return } if c.UseGit { // filter by attrs feed, err = mod.Git.FilterByAttr("bar", feed...) } blobs, err := mod.FeedManifests(true, true, true, feed...) if err != nil { return } // make specmap nameMap := map[string]proto.ID{} for name, m := range blobs { nameMap[name] = m.ID } spec, err := proto.NewSpec(time.Now().UnixNano(), nameMap, []string{}) if err != nil { return } if c.DoCC { ccName := fmt.Sprintf("bar-spec-%d-%s.json", time.Now().UnixNano(), spec.ID) logx.Infof("storing carbon copy to %s", ccName) ccf, err := os.Create(lists.OSFromSlash(lists.OSJoin(c.WD, ccName))) if err != nil { return err } defer ccf.Close() if err = json.NewEncoder(ccf).Encode(&spec); err != nil { return err } } if !c.Upload { err = json.NewEncoder(c.Stdout).Encode(&spec) return } trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) if err = trans.UploadSpec(spec); err != nil { return } fmt.Fprint(c.Stdout, spec.ID) return }
func (c *LsCmd) Run(args ...string) (err error) { if c.NoBlobs && c.NoManifests { err = fmt.Errorf("both -no-blobs and -no-manifests are on") return } mod, err := model.New(c.WD, c.UseGit, proto.CHUNK_SIZE, c.PoolSize) if err != nil { return } feed := lists.NewFileList(args...).ListDir(c.WD) var dirty map[string]struct{} if c.UseGit { if feed, err = mod.Git.FilterByAttr("bar", feed...); err != nil { return } var dirtyst []string if dirtyst, err = mod.Git.DiffFiles(feed...); err != nil { return } dirty = map[string]struct{}{} for _, n := range dirtyst { dirty[n] = struct{}{} } } blobs, err := mod.FeedManifests(!c.NoBlobs, !c.NoManifests, true, feed...) if err != nil { return } missingOnRemote := map[proto.ID]struct{}{} if !c.NoRemote { var exists []proto.ID trans := transport.NewTransport(mod, "", c.Endpoint, c.PoolSize) if exists, err = trans.Check(blobs.IDMap().IDs()); err != nil { return } for _, id := range exists { missingOnRemote[id] = struct{}{} } } // print this stuff w := new(tabwriter.Writer) w.Init(c.Stdout, 0, 8, 2, '\t', 0) var line []string toLine := func(term string) { line = append(line, term) } flushLine := func() { fmt.Fprintln(w, strings.Join(line, "\t")) line = []string{} } if !c.NoHeader { if !c.NoName { toLine("NAME") } if !c.NoBlobs && !c.NoManifests { toLine("BLOB") } if !c.NoRemote { toLine("SYNC") } if c.UseGit { toLine("GIT") } if !c.NoID { toLine("ID") } if !c.NoSize { toLine("SIZE") } flushLine() } var names sort.StringSlice for n, _ := range blobs { names = append(names, n) } names.Sort() var blobMap map[string]bool if !c.NoBlobs && !c.NoManifests { if blobMap, err = mod.IsBlobs(names...); err != nil { return } } for _, name := range names { if !c.NoName { toLine(name) } if !c.NoBlobs && !c.NoManifests { if blobMap[name] { toLine("yes") } else { toLine("no") } } if !c.NoRemote { if _, missing := missingOnRemote[blobs[name].ID]; missing { toLine("no") } else { toLine("yes") } } if c.UseGit { if _, bad := dirty[name]; !bad { toLine("ok") } else { toLine("dirty") } } if !c.NoID { if !c.FullID { toLine(blobs[name].ID.String()[:12]) } else { toLine(blobs[name].ID.String()) } } if !c.NoSize { toLine(fmt.Sprintf("%d", blobs[name].Size)) } flushLine() } w.Flush() return }
func Test_Git_Divert1(t *testing.T) { tree := fixtures.NewTree("Git_divert", "") assert.NoError(t, tree.Populate()) defer tree.Squash() g, err := gitFixture(tree) assert.NoError(t, err) // logx.SetLevel(logx.TRACE) // get blobmap for further checks mod, err := model.New(tree.CWD, false, proto.CHUNK_SIZE, 16) assert.NoError(t, err) names := lists.NewFileList().ListDir(tree.CWD) mans1, err := mod.FeedManifests(true, true, true, names...) assert.NoError(t, err) // Run divert on "in-other" and "one" divert := git.NewDivert(g) spec, err := divert.PrepareBegin("other", "in-other", "one", "two/file-four with spaces.bin") assert.NoError(t, err) err = divert.Begin(spec) assert.NoError(t, err) // Make two blobs and collect their manifests bn1 := filepath.Join(tree.CWD, "in-other", "blob.bin") bn2 := filepath.Join(tree.CWD, "one", "file-one.bin") fixtures.MakeNamedBLOB(bn1, 110) fixtures.MakeNamedBLOB(bn2, 200) oMan1, err := fixtures.NewShadowFromFile(bn1) assert.NoError(t, err) oMan2, err := fixtures.NewShadowFromFile(bn2) assert.NoError(t, err) // commit spec1, err := divert.ReadSpec() assert.NoError(t, err) err = divert.Commit(spec1, "from-master") assert.NoError(t, err) err = divert.Cleanup(spec1) assert.NoError(t, err) err = divert.CleanSpec() assert.NoError(t, err) // Final checks branch, _, err := g.GetBranches() assert.NoError(t, err) assert.Equal(t, "master", branch) // check master files names = lists.NewFileList().ListDir(tree.CWD) mans2, err := mod.FeedManifests(true, true, true, names...) assert.NoError(t, err) assert.EqualValues(t, mans1, mans2) // check stored branch err = g.Checkout("other") assert.NoError(t, err) oMan1p, err := fixtures.NewShadowFromFile(bn1) assert.NoError(t, err) assert.EqualValues(t, oMan1, oMan1p) oMan2p, err := fixtures.NewShadowFromFile(bn2) assert.NoError(t, err) assert.EqualValues(t, oMan2, oMan2p) }