func pruneTaskCollectRetained(outRetainedObjects *lfs.StringSet, retainChan chan string, progressChan PruneProgressChan, retainwait *sync.WaitGroup) { defer retainwait.Done() for oid := range retainChan { if outRetainedObjects.Add(oid) { progressChan <- PruneProgress{PruneProgressTypeRetain, 1} } } }
// Background task, must call waitg.Done() once at end func pruneTaskGetReachableObjects(outObjectSet *lfs.StringSet, errorChan chan error, waitg *sync.WaitGroup) { defer waitg.Done() // converts to `git rev-list --all` // We only pick up objects in real commits and not the reflog opts := lfs.NewScanRefsOptions() opts.ScanMode = lfs.ScanAllMode opts.SkipDeletedBlobs = false pointerchan, err := lfs.ScanRefsToChan("", "", opts) if err != nil { errorChan <- fmt.Errorf("Error scanning for reachable objects: %v", err) return } for p := range pointerchan { outObjectSet.Add(p.Oid) } }
func pruneCheckVerified(prunableObjects []string, reachableObjects, verifiedObjects lfs.StringSet) { // There's no issue if an object is not reachable and missing, only if reachable & missing var problems bytes.Buffer for _, oid := range prunableObjects { // Test verified first as most likely reachable if !verifiedObjects.Contains(oid) { if reachableObjects.Contains(oid) { problems.WriteString(fmt.Sprintf(" * %v\n", oid)) } else { // Just to indicate why it doesn't matter that we didn't verify tracerx.Printf("UNREACHABLE: %v", oid) } } } // technically we could still prune the other oids, but this indicates a // more serious issue because the local state implies that these can be // deleted but that's incorrect; bad state has occurred somehow, might need // push --all to resolve if problems.Len() > 0 { Exit("Abort: these objects to be pruned are missing on remote:\n%v", problems.String()) } }
func prune(verifyRemote, dryRun, verbose bool) { localObjects := make([]localstorage.Object, 0, 100) retainedObjects := lfs.NewStringSetWithCapacity(100) var reachableObjects lfs.StringSet var taskwait sync.WaitGroup // Add all the base funcs to the waitgroup before starting them, in case // one completes really fast & hits 0 unexpectedly // each main process can Add() to the wg itself if it subdivides the task taskwait.Add(4) // 1..4: localObjects, current & recent refs, unpushed, worktree if verifyRemote { taskwait.Add(1) // 5 } progressChan := make(PruneProgressChan, 100) // Collect errors errorChan := make(chan error, 10) var errorwait sync.WaitGroup errorwait.Add(1) var taskErrors []error go pruneTaskCollectErrors(&taskErrors, errorChan, &errorwait) // Populate the single list of local objects go pruneTaskGetLocalObjects(&localObjects, progressChan, &taskwait) // Now find files to be retained from many sources retainChan := make(chan string, 100) go pruneTaskGetRetainedCurrentAndRecentRefs(retainChan, errorChan, &taskwait) go pruneTaskGetRetainedUnpushed(retainChan, errorChan, &taskwait) go pruneTaskGetRetainedWorktree(retainChan, errorChan, &taskwait) if verifyRemote { reachableObjects = lfs.NewStringSetWithCapacity(100) go pruneTaskGetReachableObjects(&reachableObjects, errorChan, &taskwait) } // Now collect all the retained objects, on separate wait var retainwait sync.WaitGroup retainwait.Add(1) go pruneTaskCollectRetained(&retainedObjects, retainChan, progressChan, &retainwait) // Report progress var progresswait sync.WaitGroup progresswait.Add(1) go pruneTaskDisplayProgress(progressChan, &progresswait) taskwait.Wait() // wait for subtasks close(retainChan) // triggers retain collector to end now all tasks have retainwait.Wait() // make sure all retained objects added close(errorChan) // triggers error collector to end now all tasks have errorwait.Wait() // make sure all errors have been processed pruneCheckErrors(taskErrors) prunableObjects := make([]string, 0, len(localObjects)/2) // Build list of prunables (also queue for verify at same time if applicable) var verifyQueue *lfs.TransferQueue var verifiedObjects lfs.StringSet var totalSize int64 var verboseOutput bytes.Buffer if verifyRemote { lfs.Config.CurrentRemote = lfs.Config.FetchPruneConfig().PruneRemoteName // build queue now, no estimates or progress output verifyQueue = lfs.NewDownloadCheckQueue(0, 0, true) verifiedObjects = lfs.NewStringSetWithCapacity(len(localObjects) / 2) } for _, file := range localObjects { if !retainedObjects.Contains(file.Oid) { prunableObjects = append(prunableObjects, file.Oid) totalSize += file.Size if verbose { // Save up verbose output for the end, spinner still going verboseOutput.WriteString(fmt.Sprintf(" * %v (%v)\n", file.Oid, humanizeBytes(file.Size))) } if verifyRemote { tracerx.Printf("VERIFYING: %v", file.Oid) pointer := lfs.NewPointer(file.Oid, file.Size, nil) verifyQueue.Add(lfs.NewDownloadCheckable(&lfs.WrappedPointer{Pointer: pointer})) } } } if verifyRemote { // this channel is filled with oids for which Check() succeeded & Transfer() was called verifyc := verifyQueue.Watch() var verifywait sync.WaitGroup verifywait.Add(1) go func() { for oid := range verifyc { verifiedObjects.Add(oid) tracerx.Printf("VERIFIED: %v", oid) progressChan <- PruneProgress{PruneProgressTypeVerify, 1} } verifywait.Done() }() verifyQueue.Wait() verifywait.Wait() close(progressChan) // after verify (uses spinner) but before check progresswait.Wait() pruneCheckVerified(prunableObjects, reachableObjects, verifiedObjects) } else { close(progressChan) progresswait.Wait() } if len(prunableObjects) == 0 { Print("Nothing to prune") return } if dryRun { Print("%d files would be pruned (%v)", len(prunableObjects), humanizeBytes(totalSize)) if verbose { Print(verboseOutput.String()) } } else { Print("Pruning %d files, (%v)", len(prunableObjects), humanizeBytes(totalSize)) if verbose { Print(verboseOutput.String()) } pruneDeleteFiles(prunableObjects) } }
func prePushRef(left, right string) { // Just use scanner here scanOpt := lfs.NewScanRefsOptions() scanOpt.ScanMode = lfs.ScanLeftToRemoteMode scanOpt.RemoteName = lfs.Config.CurrentRemote pointers, err := lfs.ScanRefs(left, right, scanOpt) if err != nil { Panic(err, "Error scanning for Git LFS files") } totalSize := int64(0) for _, p := range pointers { totalSize += p.Size } // Objects to skip because they're missing locally but on server var skipObjects lfs.StringSet if !prePushDryRun { // Do this as a pre-flight check since upload queue starts immediately skipObjects = prePushCheckForMissingObjects(pointers) } uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, prePushDryRun) for _, pointer := range pointers { if prePushDryRun { Print("push %s => %s", pointer.Oid, pointer.Name) continue } if skipObjects.Contains(pointer.Oid) { // object missing locally but on server, don't bother continue } u, err := lfs.NewUploadable(pointer.Oid, pointer.Name) if err != nil { if lfs.IsCleanPointerError(err) { Exit(prePushMissingErrMsg, pointer.Name, lfs.ErrorGetContext(err, "pointer").(*lfs.Pointer).Oid) } else if Debugging || lfs.IsFatalError(err) { Panic(err, err.Error()) } else { Exit(err.Error()) } } uploadQueue.Add(u) } if !prePushDryRun { uploadQueue.Wait() for _, err := range uploadQueue.Errors() { if Debugging || lfs.IsFatalError(err) { LoggedError(err, err.Error()) } else { Error(err.Error()) } } if len(uploadQueue.Errors()) > 0 { os.Exit(2) } } }