Ejemplo n.º 1
0
func shouldDeleteTempObject(path string) bool {
	info, err := os.Stat(path)
	if err != nil {
		return false
	}

	if info.IsDir() {
		return false
	}

	base := filepath.Base(path)
	parts := strings.SplitN(base, "-", 2)
	oid := parts[0]
	if len(parts) < 2 || len(oid) != 64 {
		tracerx.Printf("Removing invalid tmp object file: %s", path)
		return true
	}

	if FileExists(localMediaPathNoCreate(oid)) {
		tracerx.Printf("Removing existing tmp object file: %s", path)
		return true
	}

	if time.Since(info.ModTime()) > time.Hour {
		tracerx.Printf("Removing old tmp object file: %s", path)
		return true
	}

	return false
}
Ejemplo n.º 2
0
func Batch(objects []*ObjectResource, operation string) ([]*ObjectResource, error) {
	if len(objects) == 0 {
		return nil, nil
	}

	o := map[string]interface{}{"objects": objects, "operation": operation}

	by, err := json.Marshal(o)
	if err != nil {
		return nil, Error(err)
	}

	req, err := newBatchApiRequest(operation)
	if err != nil {
		return nil, Error(err)
	}

	req.Header.Set("Content-Type", mediaType)
	req.Header.Set("Content-Length", strconv.Itoa(len(by)))
	req.ContentLength = int64(len(by))
	req.Body = &byteCloser{bytes.NewReader(by)}

	tracerx.Printf("api: batch %d files", len(objects))

	res, objs, err := doApiBatchRequest(req)

	if err != nil {

		if res == nil {
			return nil, newRetriableError(err)
		}

		if res.StatusCode == 0 {
			return nil, newRetriableError(err)
		}

		if IsAuthError(err) {
			setAuthType(req, res)
			return Batch(objects, operation)
		}

		switch res.StatusCode {
		case 404, 410:
			tracerx.Printf("api: batch not implemented: %d", res.StatusCode)
			return nil, newNotImplementedError(nil)
		}

		tracerx.Printf("api error: %s", err)
		return nil, Error(err)
	}
	LogTransfer("lfs.api.batch", res)

	if res.StatusCode != 200 {
		return nil, Error(fmt.Errorf("Invalid status for %s: %d", traceHttpReq(req), res.StatusCode))
	}

	return objs, nil
}
Ejemplo n.º 3
0
func UploadCheck(oidPath string) (*objectResource, error) {
	oid := filepath.Base(oidPath)

	stat, err := os.Stat(oidPath)
	if err != nil {
		return nil, Error(err)
	}

	reqObj := &objectResource{
		Oid:  oid,
		Size: stat.Size(),
	}

	by, err := json.Marshal(reqObj)
	if err != nil {
		return nil, Error(err)
	}

	req, err := newApiRequest("POST", oid)
	if err != nil {
		return nil, Error(err)
	}

	req.Header.Set("Content-Type", mediaType)
	req.Header.Set("Content-Length", strconv.Itoa(len(by)))
	req.ContentLength = int64(len(by))
	req.Body = &byteCloser{bytes.NewReader(by)}

	tracerx.Printf("api: uploading (%s)", oid)
	res, obj, err := doLegacyApiRequest(req)
	if err != nil {
		if IsAuthError(err) {
			Config.SetAccess("basic")
			tracerx.Printf("api: upload check not authorized, submitting with auth")
			return UploadCheck(oidPath)
		}

		return nil, newRetriableError(err)
	}
	LogTransfer("lfs.api.upload", res)

	if res.StatusCode == 200 {
		return nil, nil
	}

	if obj.Oid == "" {
		obj.Oid = oid
	}
	if obj.Size == 0 {
		obj.Size = reqObj.Size
	}

	return obj, nil
}
Ejemplo n.º 4
0
// RecentBranches returns branches with commit dates on or after the given date/time
// Return full Ref type for easier detection of duplicate SHAs etc
// since: refs with commits on or after this date will be included
// includeRemoteBranches: true to include refs on remote branches
// onlyRemote: set to non-blank to only include remote branches on a single remote
func RecentBranches(since time.Time, includeRemoteBranches bool, onlyRemote string) ([]*Ref, error) {
	cmd := subprocess.ExecCommand("git", "for-each-ref",
		`--sort=-committerdate`,
		`--format=%(refname) %(objectname) %(committerdate:iso)`,
		"refs")
	outp, err := cmd.StdoutPipe()
	if err != nil {
		return nil, fmt.Errorf("Failed to call git for-each-ref: %v", err)
	}
	cmd.Start()
	defer cmd.Wait()

	scanner := bufio.NewScanner(outp)

	// Output is like this:
	// refs/heads/master f03686b324b29ff480591745dbfbbfa5e5ac1bd5 2015-08-19 16:50:37 +0100
	// refs/remotes/origin/master ad3b29b773e46ad6870fdf08796c33d97190fe93 2015-08-13 16:50:37 +0100

	// Output is ordered by latest commit date first, so we can stop at the threshold
	regex := regexp.MustCompile(`^(refs/[^/]+/\S+)\s+([0-9A-Za-z]{40})\s+(\d{4}-\d{2}-\d{2}\s+\d{2}\:\d{2}\:\d{2}\s+[\+\-]\d{4})`)
	tracerx.Printf("RECENT: Getting refs >= %v", since)
	var ret []*Ref
	for scanner.Scan() {
		line := scanner.Text()
		if match := regex.FindStringSubmatch(line); match != nil {
			fullref := match[1]
			sha := match[2]
			reftype, ref := ParseRefToTypeAndName(fullref)
			if reftype == RefTypeRemoteBranch || reftype == RefTypeRemoteTag {
				if !includeRemoteBranches {
					continue
				}
				if onlyRemote != "" && !strings.HasPrefix(ref, onlyRemote+"/") {
					continue
				}
			}
			// This is a ref we might use
			// Check the date
			commitDate, err := ParseGitDate(match[3])
			if err != nil {
				return ret, err
			}
			if commitDate.Before(since) {
				// the end
				break
			}
			tracerx.Printf("RECENT: %v (%v)", ref, commitDate)
			ret = append(ret, &Ref{ref, reftype, sha})
		}
	}

	return ret, nil

}
Ejemplo n.º 5
0
func Batch(objects []*objectResource, operation string) ([]*objectResource, error) {
	if len(objects) == 0 {
		return nil, nil
	}

	o := map[string]interface{}{"objects": objects, "operation": operation}

	by, err := json.Marshal(o)
	if err != nil {
		return nil, Error(err)
	}

	req, err := newBatchApiRequest(operation)
	if err != nil {
		return nil, Error(err)
	}

	req.Header.Set("Content-Type", mediaType)
	req.Header.Set("Content-Length", strconv.Itoa(len(by)))
	req.ContentLength = int64(len(by))
	req.Body = &byteCloser{bytes.NewReader(by)}

	tracerx.Printf("api: batch %d files", len(objects))

	res, objs, err := doApiBatchRequest(req)
	if err != nil {
		if res == nil {
			return nil, err
		}

		switch res.StatusCode {
		case 401:
			Config.SetAccess("basic")
			tracerx.Printf("api: batch not authorized, submitting with auth")
			return Batch(objects, operation)
		case 404, 410:
			tracerx.Printf("api: batch not implemented: %d", res.StatusCode)
			return nil, newNotImplementedError(nil)
		}

		tracerx.Printf("api error: %s", err)
	}
	LogTransfer("lfs.api.batch", res)

	if res.StatusCode != 200 {
		return nil, Error(fmt.Errorf("Invalid status for %s %s: %d", req.Method, req.URL, res.StatusCode))
	}

	return objs, nil
}
Ejemplo n.º 6
0
// Wait waits for the queue to finish processing all transfers. Once Wait is
// called, Add will no longer add transferables to the queue. Any failed
// transfers will be automatically retried once.
func (q *TransferQueue) Wait() {
	if q.batcher != nil {
		q.batcher.Exit()
	}

	q.wait.Wait()

	// Handle any retries
	close(q.retriesc)
	q.retrywait.Wait()
	atomic.StoreUint32(&q.retrying, 1)

	if len(q.retries) > 0 && q.batcher != nil {
		tracerx.Printf("tq: retrying %d failed transfers", len(q.retries))
		for _, t := range q.retries {
			q.Add(t)
		}
		q.batcher.Exit()
		q.wait.Wait()
	}

	atomic.StoreUint32(&q.retrying, 0)

	close(q.apic)
	close(q.transferc)
	close(q.errorc)

	for _, watcher := range q.watchers {
		close(watcher)
	}

	q.meter.Finish()
	q.errorwait.Wait()
}
Ejemplo n.º 7
0
func uploadPointers(pointers []*lfs.WrappedPointer) *lfs.TransferQueue {

	totalSize := int64(0)
	for _, p := range pointers {
		totalSize += p.Size
	}

	uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, pushDryRun)
	for i, pointer := range pointers {
		if pushDryRun {
			Print("push %s [%s]", pointer.Name, pointer.Oid)
			continue
		}

		tracerx.Printf("prepare upload: %s %s %d/%d", pointer.Oid, pointer.Name, i+1, len(pointers))

		u, wErr := lfs.NewUploadable(pointer.Oid, pointer.Name)
		if wErr != nil {
			if Debugging || wErr.Panic {
				Panic(wErr.Err, wErr.Error())
			} else {
				Exit(wErr.Error())
			}
		}
		uploadQueue.Add(u)
	}

	return uploadQueue
}
Ejemplo n.º 8
0
func uploadsBetweenRefs(left string, right string) *lfs.TransferQueue {
	// Just use scanner here
	pointers, err := lfs.ScanRefs(left, right, nil)
	if err != nil {
		Panic(err, "Error scanning for Git LFS files")
	}

	totalSize := int64(0)
	for _, p := range pointers {
		totalSize += p.Size
	}

	uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, pushDryRun)
	for i, pointer := range pointers {
		if pushDryRun {
			Print("push %s", pointer.Name)
			continue
		}

		tracerx.Printf("prepare upload: %s %s %d/%d", pointer.Oid, pointer.Name, i+1, len(pointers))

		u, wErr := lfs.NewUploadable(pointer.Oid, pointer.Name)
		if wErr != nil {
			if Debugging || wErr.Panic {
				Panic(wErr.Err, wErr.Error())
			} else {
				Exit(wErr.Error())
			}
		}
		uploadQueue.Add(u)
	}

	return uploadQueue
}
Ejemplo n.º 9
0
func fillCredentials(req *http.Request, u *url.URL) (Creds, error) {
	path := strings.TrimPrefix(u.Path, "/")
	input := Creds{"protocol": u.Scheme, "host": u.Host, "path": path}
	if u.User != nil && u.User.Username() != "" {
		input["username"] = u.User.Username()
	}

	creds, err := execCreds(input, "fill")
	if creds == nil || len(creds) < 1 {
		errmsg := fmt.Sprintf("Git credentials for %s not found", u)
		if err != nil {
			errmsg = errmsg + ":\n" + err.Error()
		} else {
			errmsg = errmsg + "."
		}
		err = errors.New(errmsg)
	}

	if err != nil {
		return nil, err
	}

	tracerx.Printf("Filled credentials for %s", u)
	setRequestAuth(req, creds["username"], creds["password"])

	return creds, err
}
Ejemplo n.º 10
0
func (c *countingReadCloser) Read(b []byte) (int, error) {
	n, err := c.ReadCloser.Read(b)
	if err != nil && err != io.EOF {
		return n, err
	}

	c.Count += n

	if c.isTraceableType {
		chunk := string(b[0:n])
		if c.useGitTrace {
			tracerx.Printf("HTTP: %s", chunk)
		}

		if Config.isTracingHttp {
			fmt.Fprint(os.Stderr, chunk)
		}
	}

	if err == io.EOF && Config.isLoggingStats {
		// This transfer is done, we're checking it this way so we can also
		// catch transfers where the caller forgets to Close() the Body.
		if c.response != nil {
			transfersLock.Lock()
			if transfer, ok := transfers[c.response]; ok {
				transfer.responseStats.BodySize = c.Count
				transfer.responseStats.Stop = time.Now()
			}
			transfersLock.Unlock()
		}
	}
	return n, err
}
Ejemplo n.º 11
0
// Refs returns all of the local and remote branches and tags for the current
// repository. Other refs (HEAD, refs/stash, git notes) are ignored.
func LocalRefs() ([]*Ref, error) {
	cmd := subprocess.ExecCommand("git", "show-ref", "--heads", "--tags")

	outp, err := cmd.StdoutPipe()
	if err != nil {
		return nil, fmt.Errorf("Failed to call git show-ref: %v", err)
	}
	cmd.Start()

	var refs []*Ref
	scanner := bufio.NewScanner(outp)
	for scanner.Scan() {
		line := strings.TrimSpace(scanner.Text())
		parts := strings.SplitN(line, " ", 2)
		if len(parts) != 2 || len(parts[0]) != 40 || len(parts[1]) < 1 {
			tracerx.Printf("Invalid line from git show-ref: %q", line)
			continue
		}

		rtype, name := ParseRefToTypeAndName(parts[1])
		if rtype != RefTypeLocalBranch && rtype != RefTypeLocalTag {
			continue
		}

		refs = append(refs, &Ref{name, rtype, parts[0]})
	}

	return refs, cmd.Wait()
}
Ejemplo n.º 12
0
// startCommand starts up a command and creates a stdin pipe and a buffered
// stdout & stderr pipes, wrapped in a wrappedCmd. The stdout buffer will be of stdoutBufSize
// bytes.
func startCommand(command string, args ...string) (*wrappedCmd, error) {
	cmd := exec.Command(command, args...)
	stdout, err := cmd.StdoutPipe()
	if err != nil {
		return nil, err
	}
	stderr, err := cmd.StderrPipe()
	if err != nil {
		return nil, err
	}

	stdin, err := cmd.StdinPipe()
	if err != nil {
		return nil, err
	}

	tracerx.Printf("run_command: %s %s", command, strings.Join(args, " "))
	if err := cmd.Start(); err != nil {
		return nil, err
	}

	return &wrappedCmd{
		stdin,
		bufio.NewReaderSize(stdout, stdoutBufSize),
		bufio.NewReaderSize(stderr, stdoutBufSize),
		cmd,
	}, nil
}
Ejemplo n.º 13
0
func scanStorageDir(dir string, c chan *Pointer) {
	// ioutil.ReadDir and filepath.Walk do sorting which is unnecessary & inefficient
	dirf, err := os.Open(dir)
	if err != nil {
		return
	}
	defer dirf.Close()

	direntries, err := dirf.Readdir(0)
	if err != nil {
		tracerx.Printf("Problem with Readdir in %v: %v", dir, err)
		return
	}
	for _, dirfi := range direntries {
		if dirfi.IsDir() {
			subpath := filepath.Join(dir, dirfi.Name())
			scanStorageDir(subpath, c)
		} else {
			// Make sure it's really an object file & not .DS_Store etc
			if oidRE.MatchString(dirfi.Name()) {
				c <- NewPointer(dirfi.Name(), dirfi.Size(), nil)
			}
		}
	}
}
Ejemplo n.º 14
0
func ResolveDirs() {
	var err error
	LocalGitDir, LocalWorkingDir, err = git.GitAndRootDirs()
	if err == nil {
		LocalGitStorageDir = resolveGitStorageDir(LocalGitDir)
		LocalMediaDir = filepath.Join(LocalGitStorageDir, "lfs", "objects")
		LocalLogDir = filepath.Join(LocalMediaDir, "logs")
		TempDir = filepath.Join(LocalGitDir, "lfs", "tmp") // temp files per worktree
		if err := os.MkdirAll(LocalMediaDir, localMediaDirPerms); err != nil {
			panic(fmt.Errorf("Error trying to create objects directory in '%s': %s", LocalMediaDir, err))
		}

		if err := os.MkdirAll(LocalLogDir, localLogDirPerms); err != nil {
			panic(fmt.Errorf("Error trying to create log directory in '%s': %s", LocalLogDir, err))
		}

		LocalObjectTempDir = filepath.Join(TempDir, "objects")
		if err := os.MkdirAll(LocalObjectTempDir, tempDirPerms); err != nil {
			panic(fmt.Errorf("Error trying to create temp directory in '%s': %s", TempDir, err))
		}
	} else {
		errMsg := err.Error()
		tracerx.Printf("Error running 'git rev-parse': %s", errMsg)
		if !strings.Contains(errMsg, "Not a git repository") {
			fmt.Fprintf(os.Stderr, "Error: %s\n", errMsg)
		}
	}
}
Ejemplo n.º 15
0
func PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, cb CopyCallback) error {
	mediafile, err := LocalMediaPath(ptr.Oid)
	if err != nil {
		return err
	}

	stat, statErr := os.Stat(mediafile)
	if statErr == nil && stat != nil {
		fileSize := stat.Size()
		if fileSize == 0 || fileSize != ptr.Size {
			tracerx.Printf("Removing %s, size %d is invalid", mediafile, fileSize)
			os.RemoveAll(mediafile)
			stat = nil
		}
	}

	var wErr *WrappedError
	if statErr != nil || stat == nil {
		wErr = downloadFile(writer, ptr, workingfile, mediafile, cb)
	} else {
		wErr = readLocalFile(writer, ptr, mediafile, cb)
	}

	if wErr != nil {
		return &SmudgeError{ptr.Oid, mediafile, wErr}
	}

	return nil
}
Ejemplo n.º 16
0
func traceHttpResponse(c *Configuration, res *http.Response) {
	if res == nil {
		return
	}

	tracerx.Printf("HTTP: %d", res.StatusCode)

	if c.isTracingHttp == false {
		return
	}

	fmt.Fprintf(os.Stderr, "\n")
	fmt.Fprintf(os.Stderr, "< %s %s\n", res.Proto, res.Status)
	for key, _ := range res.Header {
		fmt.Fprintf(os.Stderr, "< %s: %s\n", key, res.Header.Get(key))
	}

	traceBody := false
	ctype := strings.ToLower(strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0])
	for _, tracedType := range tracedTypes {
		if strings.Contains(ctype, tracedType) {
			traceBody = true
		}
	}

	res.Body = newCountedResponse(res)
	if traceBody {
		res.Body = newTracedBody(res.Body)
	}

	fmt.Fprintf(os.Stderr, "\n")
}
Ejemplo n.º 17
0
func (q *TransferQueue) transferWorker() {
	for transfer := range q.transferc {
		cb := func(total, read int64, current int) error {
			q.meter.TransferBytes(q.transferKind, transfer.Name(), read, total, current)
			return nil
		}

		if err := transfer.Transfer(cb); err != nil {
			if q.canRetry(err) {
				tracerx.Printf("tq: retrying object %s", transfer.Oid())
				q.retry(transfer)
			} else {
				q.errorc <- err
			}
		} else {
			oid := transfer.Oid()
			for _, c := range q.watchers {
				c <- oid
			}
		}

		q.meter.FinishTransfer(transfer.Name())

		q.wait.Done()
	}
}
Ejemplo n.º 18
0
func newBatchApiRequest(operation string) (*http.Request, error) {
	endpoint := Config.Endpoint()

	res, err := sshAuthenticate(endpoint, operation, "")
	if err != nil {
		tracerx.Printf("ssh: %s attempted with %s.  Error: %s",
			operation, endpoint.SshUserAndHost, err.Error(),
		)
	}

	if len(res.Href) > 0 {
		endpoint.Url = res.Href
	}

	u, err := ObjectUrl(endpoint, "batch")
	if err != nil {
		return nil, err
	}

	req, err := newBatchClientRequest("POST", u.String())
	if err != nil {
		return nil, err
	}

	req.Header.Set("Accept", mediaType)
	if res.Header != nil {
		for key, value := range res.Header {
			req.Header.Set(key, value)
		}
	}

	return req, nil
}
Ejemplo n.º 19
0
func scanObjects(dir string, ch chan<- Object) {
	dirf, err := os.Open(dir)
	if err != nil {
		return
	}
	defer dirf.Close()

	direntries, err := dirf.Readdir(0)
	if err != nil {
		tracerx.Printf("Problem with Readdir in %q: %s", dir, err)
		return
	}

	for _, dirfi := range direntries {
		if dirfi.IsDir() {
			subpath := filepath.Join(dir, dirfi.Name())
			scanObjects(subpath, ch)
		} else {
			// Make sure it's really an object file & not .DS_Store etc
			if oidRE.MatchString(dirfi.Name()) {
				ch <- Object{dirfi.Name(), dirfi.Size()}
			}
		}
	}
}
Ejemplo n.º 20
0
func newApiRequest(method, oid string) (*http.Request, error) {
	endpoint := Config.Endpoint()
	objectOid := oid
	operation := "download"
	if method == "POST" {
		if oid != "batch" {
			objectOid = ""
			operation = "upload"
		}
	}

	res, err := sshAuthenticate(endpoint, operation, oid)
	if err != nil {
		tracerx.Printf("ssh: attempted with %s.  Error: %s",
			endpoint.SshUserAndHost, err.Error(),
		)
	}

	if len(res.Href) > 0 {
		endpoint.Url = res.Href
	}

	u, err := ObjectUrl(endpoint, objectOid)
	if err != nil {
		return nil, err
	}

	req, err := newClientRequest(method, u.String(), res.Header)
	if err != nil {
		return nil, err
	}

	req.Header.Set("Accept", mediaType)
	return req, nil
}
Ejemplo n.º 21
0
func sshAuthenticate(endpoint Endpoint, operation, oid string) (sshAuthResponse, error) {

	// This is only used as a fallback where the Git URL is SSH but server doesn't support a full SSH binary protocol
	// and therefore we derive a HTTPS endpoint for binaries instead; but check authentication here via SSH

	res := sshAuthResponse{}
	if len(endpoint.SshUserAndHost) == 0 {
		return res, nil
	}

	tracerx.Printf("ssh: %s git-lfs-authenticate %s %s %s",
		endpoint.SshUserAndHost, endpoint.SshPath, operation, oid)

	exe, args := sshGetExeAndArgs(endpoint)
	args = append(args,
		"git-lfs-authenticate",
		endpoint.SshPath,
		operation, oid)

	cmd := exec.Command(exe, args...)

	out, err := cmd.CombinedOutput()

	if err != nil {
		res.Message = string(out)
	} else {
		err = json.Unmarshal(out, &res)
	}

	return res, err
}
Ejemplo n.º 22
0
func uploadPointers(pointers []*lfs.WrappedPointer) *lfs.TransferQueue {
	totalSize := int64(0)
	for _, p := range pointers {
		totalSize += p.Size
	}

	skipObjects := prePushCheckForMissingObjects(pointers)

	uploadQueue := lfs.NewUploadQueue(len(pointers), totalSize, pushDryRun)
	for i, pointer := range pointers {
		if pushDryRun {
			Print("push %s => %s", pointer.Oid, pointer.Name)
			continue
		}

		if _, skip := skipObjects[pointer.Oid]; skip {
			// object missing locally but on server, don't bother
			continue
		}

		tracerx.Printf("prepare upload: %s %s %d/%d", pointer.Oid, pointer.Name, i+1, len(pointers))

		u, err := lfs.NewUploadable(pointer.Oid, pointer.Name)
		if err != nil {
			if Debugging || lfs.IsFatalError(err) {
				Panic(err, err.Error())
			} else {
				Exit(err.Error())
			}
		}
		uploadQueue.Add(u)
	}

	return uploadQueue
}
Ejemplo n.º 23
0
func uploadsBetweenRefAndRemote(ctx *uploadContext, refnames []string) {
	tracerx.Printf("Upload refs %v to remote %v", refnames, lfs.Config.CurrentRemote)

	scanOpt := lfs.NewScanRefsOptions()
	scanOpt.ScanMode = lfs.ScanLeftToRemoteMode
	scanOpt.RemoteName = lfs.Config.CurrentRemote

	if pushAll {
		scanOpt.ScanMode = lfs.ScanRefsMode
	}

	refs, err := refsByNames(refnames)
	if err != nil {
		Error(err.Error())
		Exit("Error getting local refs.")
	}

	for _, ref := range refs {
		pointers, err := lfs.ScanRefs(ref.Name, "", scanOpt)
		if err != nil {
			Panic(err, "Error scanning for Git LFS files in the %q ref", ref.Name)
		}

		upload(ctx, pointers)
	}
}
Ejemplo n.º 24
0
func uploadsWithObjectIDs(oids []string) *lfs.TransferQueue {
	uploads := []*lfs.Uploadable{}
	totalSize := int64(0)

	for i, oid := range oids {
		if pushDryRun {
			Print("push object ID %s", oid)
			continue
		}
		tracerx.Printf("prepare upload: %s %d/%d", oid, i+1, len(oids))

		u, err := lfs.NewUploadable(oid, "")
		if err != nil {
			if Debugging || lfs.IsFatalError(err) {
				Panic(err, err.Error())
			} else {
				Exit(err.Error())
			}
		}
		uploads = append(uploads, u)
	}

	uploadQueue := lfs.NewUploadQueue(len(oids), totalSize, pushDryRun)

	for _, u := range uploads {
		uploadQueue.Add(u)
	}

	return uploadQueue
}
Ejemplo n.º 25
0
// PointerSmudgeObject uses a Pointer and ObjectResource to download the object to the
// media directory. It does not write the file to the working directory.
func PointerSmudgeObject(ptr *Pointer, obj *ObjectResource, cb CopyCallback) error {
	mediafile, err := LocalMediaPath(obj.Oid)
	if err != nil {
		return err
	}

	stat, statErr := os.Stat(mediafile)
	if statErr == nil && stat != nil {
		fileSize := stat.Size()
		if fileSize == 0 || fileSize != obj.Size {
			tracerx.Printf("Removing %s, size %d is invalid", mediafile, fileSize)
			os.RemoveAll(mediafile)
			stat = nil
		}
	}

	if statErr != nil || stat == nil {
		err := downloadObject(ptr, obj, mediafile, cb)

		if err != nil {
			return newSmudgeError(err, obj.Oid, mediafile)
		}
	}

	return nil
}
Ejemplo n.º 26
0
func PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download bool, cb CopyCallback) error {
	mediafile, err := LocalMediaPath(ptr.Oid)
	if err != nil {
		return err
	}

	stat, statErr := os.Stat(mediafile)
	if statErr == nil && stat != nil {
		fileSize := stat.Size()
		if fileSize == 0 || fileSize != ptr.Size {
			tracerx.Printf("Removing %s, size %d is invalid", mediafile, fileSize)
			os.RemoveAll(mediafile)
			stat = nil
		}
	}

	if statErr != nil || stat == nil {
		if download {
			err = downloadFile(writer, ptr, workingfile, mediafile, cb)
		} else {
			return newDownloadDeclinedError(nil)
		}
	} else {
		err = readLocalFile(writer, ptr, mediafile, workingfile, cb)
	}

	if err != nil {
		return newSmudgeError(err, ptr.Oid, mediafile)
	}

	return nil
}
Ejemplo n.º 27
0
// batchApiRoutine processes the queue of transfers using the batch endpoint,
// making only one POST call for all objects. The results are then handed
// off to the transfer workers.
func (q *TransferQueue) batchApiRoutine() {
	var startProgress sync.Once

	for {
		batch := q.batcher.Next()
		if batch == nil {
			break
		}

		tracerx.Printf("tq: sending batch of size %d", len(batch))

		transfers := make([]*objectResource, 0, len(batch))
		for _, t := range batch {
			transfers = append(transfers, &objectResource{Oid: t.Oid(), Size: t.Size()})
		}

		objects, err := Batch(transfers, q.transferKind)
		if err != nil {
			if IsNotImplementedError(err) {
				configFile := filepath.Join(LocalGitDir, "config")
				git.Config.SetLocal(configFile, "lfs.batch", "false")

				go q.legacyFallback(batch)
				return
			}

			q.errorc <- err
			q.wait.Add(-len(transfers))
			continue
		}

		startProgress.Do(q.meter.Start)

		for _, o := range objects {
			if _, ok := o.Rel(q.transferKind); ok {
				// This object has an error
				if o.Error != nil {
					q.errorc <- Error(o.Error)
					q.meter.Skip(o.Size)
					q.wait.Done()
					continue
				}

				// This object needs to be transferred
				if transfer, ok := q.transferables[o.Oid]; ok {
					transfer.SetObject(o)
					q.meter.Add(transfer.Name())
					q.transferc <- transfer
				} else {
					q.meter.Skip(transfer.Size())
					q.wait.Done()
				}
			} else {
				q.meter.Skip(o.Size)
				q.wait.Done()
			}
		}
	}
}
Ejemplo n.º 28
0
// IsVersionAtLeast returns whether the git version is the one specified or higher
// argument is plain version string separated by '.' e.g. "2.3.1" but can omit minor/patch
func (c *gitConfig) IsGitVersionAtLeast(ver string) bool {
	gitver, err := c.Version()
	if err != nil {
		tracerx.Printf("Error getting git version: %v", err)
		return false
	}
	return IsVersionAtLeast(gitver, ver)
}
// run starts the transfer queue, doing individual or batch transfers depending
// on the Config.BatchTransfer() value. run will transfer files sequentially or
// concurrently depending on the Config.ConcurrentTransfers() value.
func (q *TransferQueue) run() {
	go q.errorCollector()

	tracerx.Printf("tq: starting %d transfer workers", q.workers)
	for i := 0; i < q.workers; i++ {
		go q.transferWorker()
	}

	if Config.BatchTransfer() {
		tracerx.Printf("tq: running as batched queue, batch size of %d", batchSize)
		q.batcher = NewBatcher(batchSize)
		go q.batchApiRoutine()
	} else {
		tracerx.Printf("tq: running as individual queue")
		q.launchIndividualApiRoutines()
	}
}
Ejemplo n.º 30
0
// GetAllWorkTreeHEADs returns the refs that all worktrees are using as HEADs
// This returns all worktrees plus the master working copy, and works even if
// working dir is actually in a worktree right now
// Pass in the git storage dir (parent of 'objects') to work from
func GetAllWorkTreeHEADs(storageDir string) ([]*Ref, error) {
	worktreesdir := filepath.Join(storageDir, "worktrees")
	dirf, err := os.Open(worktreesdir)
	if err != nil && !os.IsNotExist(err) {
		return nil, err
	}

	var worktrees []*Ref
	if err == nil {
		// There are some worktrees
		defer dirf.Close()
		direntries, err := dirf.Readdir(0)
		if err != nil {
			return nil, err
		}
		for _, dirfi := range direntries {
			if dirfi.IsDir() {
				// to avoid having to chdir and run git commands to identify the commit
				// just read the HEAD file & git rev-parse if necessary
				// Since the git repo is shared the same rev-parse will work from this location
				headfile := filepath.Join(worktreesdir, dirfi.Name(), "HEAD")
				ref, err := parseRefFile(headfile)
				if err != nil {
					tracerx.Printf("Error reading %v for worktree, skipping: %v", headfile, err)
					continue
				}
				worktrees = append(worktrees, ref)
			}
		}
	}

	// This has only established the separate worktrees, not the original checkout
	// If the storageDir contains a HEAD file then there is a main checkout
	// as well; this mus tbe resolveable whether you're in the main checkout or
	// a worktree
	headfile := filepath.Join(storageDir, "HEAD")
	ref, err := parseRefFile(headfile)
	if err == nil {
		worktrees = append(worktrees, ref)
	} else if !os.IsNotExist(err) { // ok if not exists, probably bare repo
		tracerx.Printf("Error reading %v for main checkout, skipping: %v", headfile, err)
	}

	return worktrees, nil
}