// ScanTree takes a ref and returns a slice of WrappedPointer objects in the tree at that ref // Differs from ScanRefs in that multiple files in the tree with the same content are all reported func ScanTree(ref string) ([]*WrappedPointer, error) { start := time.Now() defer func() { tracerx.PerformanceSince("scan", start) }() // We don't use the nameMap approach here since that's imprecise when >1 file // can be using the same content treeShas, err := lsTreeBlobs(ref) if err != nil { return nil, err } pointerc, err := catFileBatchTree(treeShas) if err != nil { return nil, err } pointers := make([]*WrappedPointer, 0) for p := range pointerc.Results { pointers = append(pointers, p) } err = pointerc.Wait() return pointers, err }
// ScanRefsToChan takes a ref and returns a channel of WrappedPointer objects // for all Git LFS pointers it finds for that ref. // Reports unique oids once only, not multiple times if >1 file uses the same content func ScanRefsToChan(refLeft, refRight string, opt *ScanRefsOptions) (*PointerChannelWrapper, error) { if opt == nil { opt = NewScanRefsOptions() } if refLeft == "" { opt.ScanMode = ScanAllMode } start := time.Now() defer func() { tracerx.PerformanceSince("scan", start) }() revs, err := revListShas(refLeft, refRight, opt) if err != nil { return nil, err } smallShas, err := catFileBatchCheck(revs) if err != nil { return nil, err } pointers, err := catFileBatch(smallShas) if err != nil { return nil, err } retchan := make(chan *WrappedPointer, chanBufSize) errchan := make(chan error, 1) go func() { for p := range pointers.Results { if name, ok := opt.GetName(p.Sha1); ok { p.Name = name } retchan <- p } err := pointers.Wait() if err != nil { errchan <- err } close(retchan) close(errchan) }() return NewPointerChannelWrapper(retchan, errchan), nil }
// ScanPreviousVersions scans changes reachable from ref (commit) back to since. // Returns pointers for *previous* versions that overlap that time. Does not // return pointers which were still in use at ref (use ScanRef for that) func ScanPreviousVersions(ref string, since time.Time) ([]*WrappedPointer, error) { start := time.Now() defer func() { tracerx.PerformanceSince("scan", start) }() pointerchan, err := ScanPreviousVersionsToChan(ref, since) if err != nil { return nil, err } pointers := make([]*WrappedPointer, 0, 10) for p := range pointerchan.Results { pointers = append(pointers, p) } err = pointerchan.Wait() return pointers, err }
// ScanUnpushed scans history for all LFS pointers which have been added but not // pushed to the named remote. remoteName can be left blank to mean 'any remote' func ScanUnpushed(remoteName string) ([]*WrappedPointer, error) { start := time.Now() defer func() { tracerx.PerformanceSince("scan", start) }() pointerchan, err := ScanUnpushedToChan(remoteName) if err != nil { return nil, err } pointers := make([]*WrappedPointer, 0, 10) for p := range pointerchan.Results { pointers = append(pointers, p) } err = pointerchan.Wait() return pointers, err }
// Fetch and report completion of each OID to a channel (optional, pass nil to skip) // Returns true if all completed with no errors, false if errors were written to stderr/log func fetchAndReportToChan(pointers []*lfs.WrappedPointer, include, exclude []string, out chan<- *lfs.WrappedPointer) bool { totalSize := int64(0) for _, p := range pointers { totalSize += p.Size } q := lfs.NewDownloadQueue(len(pointers), totalSize, false) if out != nil { dlwatch := q.Watch() go func() { // fetch only reports single OID, but OID *might* be referenced by multiple // WrappedPointers if same content is at multiple paths, so map oid->slice oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers)) for _, pointer := range pointers { plist := oidToPointers[pointer.Oid] oidToPointers[pointer.Oid] = append(plist, pointer) } for oid := range dlwatch { plist, ok := oidToPointers[oid] if !ok { continue } for _, p := range plist { out <- p } } close(out) }() } for _, p := range pointers { // Only add to download queue if local file is not the right size already // This avoids previous case of over-reporting a requirement for files we already have // which would only be skipped by PointerSmudgeObject later passFilter := lfs.FilenamePassesIncludeExcludeFilter(p.Name, include, exclude) lfs.LinkOrCopyFromReference(p.Oid, p.Size) if !lfs.ObjectExistsOfSize(p.Oid, p.Size) && passFilter { tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) q.Add(lfs.NewDownloadable(p)) } else { // Ensure progress matches q.Skip(p.Size) if !passFilter { tracerx.Printf("Skipping %v [%v], include/exclude filters applied", p.Name, p.Oid) } else { tracerx.Printf("Skipping %v [%v], already exists", p.Name, p.Oid) } // If we already have it, or it won't be fetched // report it to chan immediately to support pull/checkout if out != nil { out <- p } } } processQueue := time.Now() q.Wait() tracerx.PerformanceSince("process queue", processQueue) ok := true for _, err := range q.Errors() { ok = false ExitWithError(err) } return ok }
// ScanIndex returns a slice of WrappedPointer objects for all // Git LFS pointers it finds in the index. // Reports unique oids once only, not multiple times if >1 file uses the same content func ScanIndex() ([]*WrappedPointer, error) { indexMap := &indexFileMap{ nameMap: make(map[string]*indexFile, 0), mutex: &sync.Mutex{}, } start := time.Now() defer func() { tracerx.PerformanceSince("scan-staging", start) }() revs, err := revListIndex(false, indexMap) if err != nil { return nil, err } cachedRevs, err := revListIndex(true, indexMap) if err != nil { return nil, err } allRevsErr := make(chan error, 5) // can be multiple errors below allRevsChan := make(chan string, 1) allRevs := NewStringChannelWrapper(allRevsChan, allRevsErr) go func() { seenRevs := make(map[string]bool, 0) for rev := range revs.Results { seenRevs[rev] = true allRevsChan <- rev } err := revs.Wait() if err != nil { allRevsErr <- err } for rev := range cachedRevs.Results { if _, ok := seenRevs[rev]; !ok { allRevsChan <- rev } } err = cachedRevs.Wait() if err != nil { allRevsErr <- err } close(allRevsChan) close(allRevsErr) }() smallShas, err := catFileBatchCheck(allRevs) if err != nil { return nil, err } pointerc, err := catFileBatch(smallShas) if err != nil { return nil, err } pointers := make([]*WrappedPointer, 0) for p := range pointerc.Results { if e, ok := indexMap.Get(p.Sha1); ok { p.Name = e.Name p.Status = e.Status p.SrcName = e.SrcName } pointers = append(pointers, p) } err = pointerc.Wait() return pointers, err }
// Fetch and report completion of each OID to a channel (optional, pass nil to skip) // Returns true if all completed with no errors, false if errors were written to stderr/log func fetchAndReportToChan(allpointers []*lfs.WrappedPointer, include, exclude []string, out chan<- *lfs.WrappedPointer) bool { // Lazily initialize the current remote. if len(cfg.CurrentRemote) == 0 { // Actively find the default remote, don't just assume origin defaultRemote, err := git.DefaultRemote() if err != nil { Exit("No default remote") } cfg.CurrentRemote = defaultRemote } ready, pointers, totalSize := readyAndMissingPointers(allpointers, include, exclude) q := lfs.NewDownloadQueue(len(pointers), totalSize, false) if out != nil { // If we already have it, or it won't be fetched // report it to chan immediately to support pull/checkout for _, p := range ready { out <- p } dlwatch := q.Watch() go func() { // fetch only reports single OID, but OID *might* be referenced by multiple // WrappedPointers if same content is at multiple paths, so map oid->slice oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers)) for _, pointer := range pointers { plist := oidToPointers[pointer.Oid] oidToPointers[pointer.Oid] = append(plist, pointer) } for oid := range dlwatch { plist, ok := oidToPointers[oid] if !ok { continue } for _, p := range plist { out <- p } } close(out) }() } for _, p := range pointers { tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) q.Add(lfs.NewDownloadable(p)) } processQueue := time.Now() q.Wait() tracerx.PerformanceSince("process queue", processQueue) ok := true for _, err := range q.Errors() { ok = false FullError(err) } return ok }