Ejemplo n.º 1
0
func uploadPointers(pointers []*lfs.WrappedPointer) *lfs.TransferQueue {
	totalSize := int64(0)
	for _, p := range pointers {
		totalSize += p.Size
	}

	skipObjects := prePushCheckForMissingObjects(pointers)

	uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, pushDryRun)
	for i, pointer := range pointers {
		if pushDryRun {
			Print("push %s => %s", pointer.Oid, pointer.Name)
			continue
		}

		if _, skip := skipObjects[pointer.Oid]; skip {
			// object missing locally but on server, don't bother
			continue
		}

		tracerx.Printf("prepare upload: %s %s %d/%d", pointer.Oid, pointer.Name, i+1, len(pointers))

		u, err := lfs.NewUploadable(pointer.Oid, pointer.Name)
		if err != nil {
			if Debugging || lfs.IsFatalError(err) {
				Panic(err, err.Error())
			} else {
				Exit(err.Error())
			}
		}
		uploadQueue.Add(u)
	}

	return uploadQueue
}
Ejemplo n.º 2
0
func uploadsWithObjectIDs(oids []string) *lfs.TransferQueue {
	uploads := []*lfs.Uploadable{}
	totalSize := int64(0)

	for i, oid := range oids {
		if pushDryRun {
			Print("push object ID %s", oid)
			continue
		}
		tracerx.Printf("prepare upload: %s %d/%d", oid, i+1, len(oids))

		u, err := lfs.NewUploadable(oid, "")
		if err != nil {
			if Debugging || lfs.IsFatalError(err) {
				Panic(err, err.Error())
			} else {
				Exit(err.Error())
			}
		}
		uploads = append(uploads, u)
	}

	uploadQueue := lfs.NewUploadQueue(len(oids), totalSize, pushDryRun)

	for _, u := range uploads {
		uploadQueue.Add(u)
	}

	return uploadQueue
}
Ejemplo n.º 3
0
func uploadPointers(pointers []*lfs.WrappedPointer) *lfs.TransferQueue {

	totalSize := int64(0)
	for _, p := range pointers {
		totalSize += p.Size
	}

	uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, pushDryRun)
	for i, pointer := range pointers {
		if pushDryRun {
			Print("push %s [%s]", pointer.Name, pointer.Oid)
			continue
		}

		tracerx.Printf("prepare upload: %s %s %d/%d", pointer.Oid, pointer.Name, i+1, len(pointers))

		u, err := lfs.NewUploadable(pointer.Oid, pointer.Name)
		if err != nil {
			if Debugging || lfs.IsFatalError(err) {
				Panic(err, err.Error())
			} else {
				Exit(err.Error())
			}
		}
		uploadQueue.Add(u)
	}

	return uploadQueue
}
Ejemplo n.º 4
0
func ExitWithError(err error) {
	if Debugging || lfs.IsFatalError(err) {
		Panic(err, err.Error())
	} else {
		if inner := lfs.GetInnerError(err); inner != nil {
			Error(inner.Error())
		}
		Exit(err.Error())
	}
}
Ejemplo n.º 5
0
func buildTestData() (oidsExist, oidsMissing []TestObject, err error) {
	const oidCount = 50
	oidsExist = make([]TestObject, 0, oidCount)
	oidsMissing = make([]TestObject, 0, oidCount)

	// Build test data for existing files & upload
	// Use test repo for this to simplify the process of making sure data matches oid
	// We're not performing a real test at this point (although an upload fail will break it)
	var callback testDataCallback
	repo := test.NewRepo(&callback)
	repo.Pushd()
	defer repo.Cleanup()
	// just one commit
	commit := test.CommitInput{CommitterName: "A N Other", CommitterEmail: "*****@*****.**"}
	var totalSize int64
	for i := 0; i < oidCount; i++ {
		filename := fmt.Sprintf("file%d.dat", i)
		sz := int64(rand.Intn(200)) + 50
		commit.Files = append(commit.Files, &test.FileInput{Filename: filename, Size: sz})
		totalSize += sz
	}
	outputs := repo.AddCommits([]*test.CommitInput{&commit})

	// now upload
	uploadQueue := lfs.NewUploadQueue(len(oidsExist), totalSize, false)
	for _, f := range outputs[0].Files {
		oidsExist = append(oidsExist, TestObject{Oid: f.Oid, Size: f.Size})

		u, err := lfs.NewUploadable(f.Oid, "Test file")
		if err != nil {
			return nil, nil, err
		}
		uploadQueue.Add(u)
	}
	uploadQueue.Wait()

	for _, err := range uploadQueue.Errors() {
		if lfs.IsFatalError(err) {
			exit("Fatal error setting up test data: %s", err)
		}
	}

	// Generate SHAs for missing files, random but repeatable
	// No actual file content needed for these
	rand.Seed(int64(oidCount))
	runningSha := sha256.New()
	for i := 0; i < oidCount; i++ {
		runningSha.Write([]byte{byte(rand.Intn(256))})
		oid := hex.EncodeToString(runningSha.Sum(nil))
		sz := int64(rand.Intn(200)) + 50
		oidsMissing = append(oidsMissing, TestObject{Oid: oid, Size: sz})
	}
	return oidsExist, oidsMissing, nil
}
Ejemplo n.º 6
0
// Fetch and report completion of each OID to a channel (optional, pass nil to skip)
func fetchAndReportToChan(pointers []*lfs.WrappedPointer, include, exclude []string, out chan<- *lfs.WrappedPointer) {

	totalSize := int64(0)
	for _, p := range pointers {
		totalSize += p.Size
	}
	q := lfs.NewDownloadQueue(len(pointers), totalSize, false)

	for _, p := range pointers {
		// Only add to download queue if local file is not the right size already
		// This avoids previous case of over-reporting a requirement for files we already have
		// which would only be skipped by PointerSmudgeObject later
		passFilter := lfs.FilenamePassesIncludeExcludeFilter(p.Name, include, exclude)
		if !lfs.ObjectExistsOfSize(p.Oid, p.Size) && passFilter {
			tracerx.Printf("fetch %v [%v]", p.Name, p.Oid)
			q.Add(lfs.NewDownloadable(p))
		} else {
			if !passFilter {
				tracerx.Printf("Skipping %v [%v], include/exclude filters applied", p.Name, p.Oid)
			} else {
				tracerx.Printf("Skipping %v [%v], already exists", p.Name, p.Oid)
			}

			// If we already have it, or it won't be fetched
			// report it to chan immediately to support pull/checkout
			if out != nil {
				out <- p
			}

		}
	}

	if out != nil {
		dlwatch := q.Watch()

		go func() {
			// fetch only reports single OID, but OID *might* be referenced by multiple
			// WrappedPointers if same content is at multiple paths, so map oid->slice
			oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers))
			for _, pointer := range pointers {
				plist := oidToPointers[pointer.Oid]
				oidToPointers[pointer.Oid] = append(plist, pointer)
			}

			for oid := range dlwatch {
				plist, ok := oidToPointers[oid]
				if !ok {
					continue
				}
				for _, p := range plist {
					out <- p
				}
			}
			close(out)
		}()

	}
	processQueue := time.Now()
	q.Wait()
	tracerx.PerformanceSince("process queue", processQueue)

	for _, err := range q.Errors() {
		if Debugging || lfs.IsFatalError(err) {
			LoggedError(err, err.Error())
		} else {
			Error(err.Error())
		}
	}
}
Ejemplo n.º 7
0
// pushCommand pushes local objects to a Git LFS server.  It takes two
// arguments:
//
//   `<remote> <remote ref>`
//
// Both a remote name ("origin") or a remote URL are accepted.
//
// pushCommand calculates the git objects to send by looking comparing the range
// of commits between the local and remote git servers.
func pushCommand(cmd *cobra.Command, args []string) {
	var uploadQueue *lfs.TransferQueue

	if len(args) == 0 {
		Print("Specify a remote and a remote branch name (`git lfs push origin master`)")
		os.Exit(1)
	}

	lfs.Config.CurrentRemote = args[0]

	if useStdin {
		requireStdin("Run this command from the Git pre-push hook, or leave the --stdin flag off.")

		// called from a pre-push hook!  Update the existing pre-push hook if it's
		// one that git-lfs set.
		lfs.InstallHooks(false)

		refsData, err := ioutil.ReadAll(os.Stdin)
		if err != nil {
			Panic(err, "Error reading refs on stdin")
		}

		if len(refsData) == 0 {
			return
		}

		left, right := decodeRefs(string(refsData))
		if left == pushDeleteBranch {
			return
		}

		uploadQueue = uploadsBetweenRefs(left, right)
	} else if pushObjectIDs {
		if len(args) < 2 {
			Print("Usage: git lfs push --object-id <remote> <lfs-object-id> [lfs-object-id] ...")
			return
		}

		uploadQueue = uploadsWithObjectIDs(args[1:])
	} else {
		if len(args) < 1 {
			Print("Usage: git lfs push --dry-run <remote> [ref]")
			return
		}

		uploadQueue = uploadsBetweenRefAndRemote(args[0], args[1:])
	}

	if !pushDryRun {
		uploadQueue.Wait()
		for _, err := range uploadQueue.Errors() {
			if Debugging || lfs.IsFatalError(err) {
				LoggedError(err, err.Error())
			} else {
				Error(err.Error())
			}
		}

		if len(uploadQueue.Errors()) > 0 {
			os.Exit(2)
		}
	}
}
Ejemplo n.º 8
0
// prePushCommand is run through Git's pre-push hook. The pre-push hook passes
// two arguments on the command line:
//
//   1. Name of the remote to which the push is being done
//   2. URL to which the push is being done
//
// The hook receives commit information on stdin in the form:
//   <local ref> <local sha1> <remote ref> <remote sha1>
//
// In the typical case, prePushCommand will get a list of git objects being
// pushed by using the following:
//
//    git rev-list --objects <local sha1> ^<remote sha1>
//
// If any of those git objects are associated with Git LFS objects, those
// objects will be pushed to the Git LFS API.
//
// In the case of pushing a new branch, the list of git objects will be all of
// the git objects in this branch.
//
// In the case of deleting a branch, no attempts to push Git LFS objects will be
// made.
func prePushCommand(cmd *cobra.Command, args []string) {
	var left, right string

	if len(args) == 0 {
		Print("This should be run through Git's pre-push hook.  Run `git lfs update` to install it.")
		os.Exit(1)
	}

	lfs.Config.CurrentRemote = args[0]

	refsData, err := ioutil.ReadAll(os.Stdin)
	if err != nil {
		Panic(err, "Error reading refs on stdin")
	}

	if len(refsData) == 0 {
		return
	}

	left, right = decodeRefs(string(refsData))
	if left == prePushDeleteBranch {
		return
	}

	// Just use scanner here
	scanOpt := &lfs.ScanRefsOptions{ScanMode: lfs.ScanLeftToRemoteMode, RemoteName: lfs.Config.CurrentRemote}
	pointers, err := lfs.ScanRefs(left, right, scanOpt)
	if err != nil {
		Panic(err, "Error scanning for Git LFS files")
	}

	totalSize := int64(0)
	for _, p := range pointers {
		totalSize += p.Size
	}

	// Objects to skip because they're missing locally but on server
	var skipObjects map[string]struct{}

	if !prePushDryRun {
		// Do this as a pre-flight check since upload queue starts immediately
		skipObjects = prePushCheckForMissingObjects(pointers)
	}

	uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, prePushDryRun)

	for _, pointer := range pointers {
		if prePushDryRun {
			Print("push %s [%s]", pointer.Name, pointer.Oid)
			continue
		}

		if _, skip := skipObjects[pointer.Oid]; skip {
			// object missing locally but on server, don't bother
			continue
		}

		u, err := lfs.NewUploadable(pointer.Oid, pointer.Name)
		if err != nil {
			if lfs.IsCleanPointerError(err) {
				Exit(prePushMissingErrMsg, pointer.Name, lfs.ErrorGetContext(err, "pointer").(*lfs.Pointer).Oid)
			} else if Debugging || lfs.IsFatalError(err) {
				Panic(err, err.Error())
			} else {
				Exit(err.Error())
			}
		}

		uploadQueue.Add(u)
	}

	if !prePushDryRun {
		uploadQueue.Wait()
		for _, err := range uploadQueue.Errors() {
			if Debugging || lfs.IsFatalError(err) {
				LoggedError(err, err.Error())
			} else {
				Error(err.Error())
			}
		}

		if len(uploadQueue.Errors()) > 0 {
			os.Exit(2)
		}
	}
}
Ejemplo n.º 9
0
func prePushRef(left, right string) {
	// Just use scanner here
	scanOpt := lfs.NewScanRefsOptions()
	scanOpt.ScanMode = lfs.ScanLeftToRemoteMode
	scanOpt.RemoteName = lfs.Config.CurrentRemote

	pointers, err := lfs.ScanRefs(left, right, scanOpt)
	if err != nil {
		Panic(err, "Error scanning for Git LFS files")
	}

	totalSize := int64(0)
	for _, p := range pointers {
		totalSize += p.Size
	}

	// Objects to skip because they're missing locally but on server
	var skipObjects lfs.StringSet

	if !prePushDryRun {
		// Do this as a pre-flight check since upload queue starts immediately
		skipObjects = prePushCheckForMissingObjects(pointers)
	}

	uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, prePushDryRun)

	for _, pointer := range pointers {
		if prePushDryRun {
			Print("push %s => %s", pointer.Oid, pointer.Name)
			continue
		}

		if skipObjects.Contains(pointer.Oid) {
			// object missing locally but on server, don't bother
			continue
		}

		u, err := lfs.NewUploadable(pointer.Oid, pointer.Name)
		if err != nil {
			if lfs.IsCleanPointerError(err) {
				Exit(prePushMissingErrMsg, pointer.Name, lfs.ErrorGetContext(err, "pointer").(*lfs.Pointer).Oid)
			} else if Debugging || lfs.IsFatalError(err) {
				Panic(err, err.Error())
			} else {
				Exit(err.Error())
			}
		}

		uploadQueue.Add(u)
	}

	if !prePushDryRun {
		uploadQueue.Wait()
		for _, err := range uploadQueue.Errors() {
			if Debugging || lfs.IsFatalError(err) {
				LoggedError(err, err.Error())
			} else {
				Error(err.Error())
			}
		}

		if len(uploadQueue.Errors()) > 0 {
			os.Exit(2)
		}
	}

}