// Fetch and report completion of each OID to a channel (optional, pass nil to skip) func fetchAndReportToChan(pointers []*lfs.WrappedPointer, include, exclude []string, out chan<- *lfs.WrappedPointer) { totalSize := int64(0) for _, p := range pointers { totalSize += p.Size } q := lfs.NewDownloadQueue(len(pointers), totalSize, false) for _, p := range pointers { // Only add to download queue if local file is not the right size already // This avoids previous case of over-reporting a requirement for files we already have // which would only be skipped by PointerSmudgeObject later passFilter := lfs.FilenamePassesIncludeExcludeFilter(p.Name, include, exclude) if !lfs.ObjectExistsOfSize(p.Oid, p.Size) && passFilter { q.Add(lfs.NewDownloadable(p)) } else { // If we already have it, or it won't be fetched // report it to chan immediately to support pull/checkout if out != nil { out <- p } } } if out != nil { dlwatch := q.Watch() go func() { // fetch only reports single OID, but OID *might* be referenced by multiple // WrappedPointers if same content is at multiple paths, so map oid->slice oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers)) for _, pointer := range pointers { plist := oidToPointers[pointer.Oid] oidToPointers[pointer.Oid] = append(plist, pointer) } for oid := range dlwatch { plist, ok := oidToPointers[oid] if !ok { continue } for _, p := range plist { out <- p } } close(out) }() } processQueue := time.Now() q.Wait() tracerx.PerformanceSince("process queue", processQueue) }
func fetchCommand(cmd *cobra.Command, args []string) { var ref string var err error if len(args) == 1 { ref = args[0] } else { ref, err = git.CurrentRef() if err != nil { Panic(err, "Could not fetch") } } pointers, err := lfs.ScanRefs(ref, "") if err != nil { Panic(err, "Could not scan for Git LFS files") } q := lfs.NewDownloadQueue(lfs.Config.ConcurrentTransfers(), len(pointers)) for _, p := range pointers { q.Add(lfs.NewDownloadable(p)) } target, err := git.ResolveRef(ref) if err != nil { Panic(err, "Could not resolve git ref") } current, err := git.CurrentRef() if err != nil { Panic(err, "Could not fetch the current git ref") } if target == current { // We just downloaded the files for the current ref, we can copy them into // the working directory and update the git index. We're doing this in a // goroutine so they can be copied as they come in, for efficiency. watch := q.Watch() go func() { files := make(map[string]*lfs.WrappedPointer, len(pointers)) for _, pointer := range pointers { files[pointer.Oid] = pointer } // Fire up the update-index command cmd := exec.Command("git", "update-index", "-q", "--refresh", "--stdin") stdin, err := cmd.StdinPipe() if err != nil { Panic(err, "Could not update the index") } if err := cmd.Start(); err != nil { Panic(err, "Could not update the index") } // As files come in, write them to the wd and update the index for oid := range watch { pointer, ok := files[oid] if !ok { continue } file, err := os.Create(pointer.Name) if err != nil { Panic(err, "Could not create working directory file") } if err := lfs.PointerSmudge(file, pointer.Pointer, pointer.Name, nil); err != nil { Panic(err, "Could not write working directory file") } file.Close() stdin.Write([]byte(pointer.Name + "\n")) } stdin.Close() if err := cmd.Wait(); err != nil { Panic(err, "Error updating the git index") } }() processQueue := time.Now() q.Process() tracerx.PerformanceSince("process queue", processQueue) } }
// Fetch and report completion of each OID to a channel (optional, pass nil to skip) // Returns true if all completed with no errors, false if errors were written to stderr/log func fetchAndReportToChan(allpointers []*lfs.WrappedPointer, include, exclude []string, out chan<- *lfs.WrappedPointer) bool { // Lazily initialize the current remote. if len(cfg.CurrentRemote) == 0 { // Actively find the default remote, don't just assume origin defaultRemote, err := git.DefaultRemote() if err != nil { Exit("No default remote") } cfg.CurrentRemote = defaultRemote } ready, pointers, totalSize := readyAndMissingPointers(allpointers, include, exclude) q := lfs.NewDownloadQueue(len(pointers), totalSize, false) if out != nil { // If we already have it, or it won't be fetched // report it to chan immediately to support pull/checkout for _, p := range ready { out <- p } dlwatch := q.Watch() go func() { // fetch only reports single OID, but OID *might* be referenced by multiple // WrappedPointers if same content is at multiple paths, so map oid->slice oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers)) for _, pointer := range pointers { plist := oidToPointers[pointer.Oid] oidToPointers[pointer.Oid] = append(plist, pointer) } for oid := range dlwatch { plist, ok := oidToPointers[oid] if !ok { continue } for _, p := range plist { out <- p } } close(out) }() } for _, p := range pointers { tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) q.Add(lfs.NewDownloadable(p)) } processQueue := time.Now() q.Wait() tracerx.PerformanceSince("process queue", processQueue) ok := true for _, err := range q.Errors() { ok = false FullError(err) } return ok }