Ejemplo n.º 1
0
func setCredURLFromNetrc(req *http.Request) bool {
	hostname := req.URL.Host
	var host string

	if strings.Contains(hostname, ":") {
		var err error
		host, _, err = net.SplitHostPort(hostname)
		if err != nil {
			tracerx.Printf("netrc: error parsing %q: %s", hostname, err)
			return false
		}
	} else {
		host = hostname
	}

	machine, err := config.Config.FindNetrcHost(host)
	if err != nil {
		tracerx.Printf("netrc: error finding match for %q: %s", hostname, err)
		return false
	}

	if machine == nil {
		return false
	}

	setRequestAuth(req, machine.Login, machine.Password)
	return true
}
Ejemplo n.º 2
0
func shouldDeleteTempObject(s *LocalStorage, path string) bool {
	info, err := os.Stat(path)
	if err != nil {
		return false
	}

	if info.IsDir() {
		return false
	}

	base := filepath.Base(path)
	parts := strings.SplitN(base, "-", 2)
	oid := parts[0]
	if len(parts) < 2 || len(oid) != 64 {
		tracerx.Printf("Removing invalid tmp object file: %s", path)
		return true
	}

	fi, err := os.Stat(s.ObjectPath(oid))
	if err == nil && !fi.IsDir() {
		tracerx.Printf("Removing existing tmp object file: %s", path)
		return true
	}

	if time.Since(info.ModTime()) > time.Hour {
		tracerx.Printf("Removing old tmp object file: %s", path)
		return true
	}

	return false
}
Ejemplo n.º 3
0
// Batch calls the batch API and returns object results
func Batch(objects []*ObjectResource, operation string, transferAdapters []string) (objs []*ObjectResource, transferAdapter string, e error) {
	if len(objects) == 0 {
		return nil, "", nil
	}

	o := &batchRequest{Operation: operation, Objects: objects, TransferAdapterNames: transferAdapters}
	by, err := json.Marshal(o)
	if err != nil {
		return nil, "", errutil.Error(err)
	}

	req, err := NewBatchRequest(operation)
	if err != nil {
		return nil, "", errutil.Error(err)
	}

	req.Header.Set("Content-Type", MediaType)
	req.Header.Set("Content-Length", strconv.Itoa(len(by)))
	req.ContentLength = int64(len(by))
	req.Body = tools.NewReadSeekCloserWrapper(bytes.NewReader(by))

	tracerx.Printf("api: batch %d files", len(objects))

	res, bresp, err := DoBatchRequest(req)

	if err != nil {

		if res == nil {
			return nil, "", errutil.NewRetriableError(err)
		}

		if res.StatusCode == 0 {
			return nil, "", errutil.NewRetriableError(err)
		}

		if errutil.IsAuthError(err) {
			httputil.SetAuthType(req, res)
			return Batch(objects, operation, transferAdapters)
		}

		switch res.StatusCode {
		case 404, 410:
			tracerx.Printf("api: batch not implemented: %d", res.StatusCode)
			return nil, "", errutil.NewNotImplementedError(nil)
		}

		tracerx.Printf("api error: %s", err)
		return nil, "", errutil.Error(err)
	}
	httputil.LogTransfer("lfs.batch", res)

	if res.StatusCode != 200 {
		return nil, "", errutil.Error(fmt.Errorf("Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode))
	}

	return bresp.Objects, bresp.TransferAdapterName, nil
}
Ejemplo n.º 4
0
func (a *adapterBase) End() {
	tracerx.Printf("xfer: adapter %q End()", a.Name())
	close(a.jobChan)
	// wait for all transfers to complete
	a.workerWait.Wait()
	if a.outChan != nil {
		close(a.outChan)
	}
	tracerx.Printf("xfer: adapter %q stopped", a.Name())
}
Ejemplo n.º 5
0
// RecentBranches returns branches with commit dates on or after the given date/time
// Return full Ref type for easier detection of duplicate SHAs etc
// since: refs with commits on or after this date will be included
// includeRemoteBranches: true to include refs on remote branches
// onlyRemote: set to non-blank to only include remote branches on a single remote
func RecentBranches(since time.Time, includeRemoteBranches bool, onlyRemote string) ([]*Ref, error) {
	cmd := subprocess.ExecCommand("git", "for-each-ref",
		`--sort=-committerdate`,
		`--format=%(refname) %(objectname) %(committerdate:iso)`,
		"refs")
	outp, err := cmd.StdoutPipe()
	if err != nil {
		return nil, fmt.Errorf("Failed to call git for-each-ref: %v", err)
	}
	cmd.Start()
	defer cmd.Wait()

	scanner := bufio.NewScanner(outp)

	// Output is like this:
	// refs/heads/master f03686b324b29ff480591745dbfbbfa5e5ac1bd5 2015-08-19 16:50:37 +0100
	// refs/remotes/origin/master ad3b29b773e46ad6870fdf08796c33d97190fe93 2015-08-13 16:50:37 +0100

	// Output is ordered by latest commit date first, so we can stop at the threshold
	regex := regexp.MustCompile(`^(refs/[^/]+/\S+)\s+([0-9A-Za-z]{40})\s+(\d{4}-\d{2}-\d{2}\s+\d{2}\:\d{2}\:\d{2}\s+[\+\-]\d{4})`)
	tracerx.Printf("RECENT: Getting refs >= %v", since)
	var ret []*Ref
	for scanner.Scan() {
		line := scanner.Text()
		if match := regex.FindStringSubmatch(line); match != nil {
			fullref := match[1]
			sha := match[2]
			reftype, ref := ParseRefToTypeAndName(fullref)
			if reftype == RefTypeRemoteBranch || reftype == RefTypeRemoteTag {
				if !includeRemoteBranches {
					continue
				}
				if onlyRemote != "" && !strings.HasPrefix(ref, onlyRemote+"/") {
					continue
				}
			}
			// This is a ref we might use
			// Check the date
			commitDate, err := ParseGitDate(match[3])
			if err != nil {
				return ret, err
			}
			if commitDate.Before(since) {
				// the end
				break
			}
			tracerx.Printf("RECENT: %v (%v)", ref, commitDate)
			ret = append(ret, &Ref{ref, reftype, sha})
		}
	}

	return ret, nil

}
Ejemplo n.º 6
0
func (a *customAdapter) WorkerEnding(workerNum int, ctx interface{}) {
	customCtx, ok := ctx.(*customAdapterWorkerContext)
	if !ok {
		tracerx.Printf("Context object for custom transfer %q was of the wrong type", a.name)
		return
	}

	err := a.shutdownWorkerProcess(customCtx)
	if err != nil {
		tracerx.Printf("xfer: error finishing up custom transfer process %q worker %d, aborting: %v", a.path, customCtx.workerNum, err)
		a.abortWorkerProcess(customCtx)
	}
}
Ejemplo n.º 7
0
// run starts the transfer queue, doing individual or batch transfers depending
// on the Config.BatchTransfer() value. run will transfer files sequentially or
// concurrently depending on the Config.ConcurrentTransfers() value.
func (q *TransferQueue) run() {
	go q.errorCollector()
	go q.retryCollector()

	if config.Config.BatchTransfer() {
		tracerx.Printf("tq: running as batched queue, batch size of %d", batchSize)
		q.batcher = NewBatcher(batchSize)
		go q.batchApiRoutine()
	} else {
		tracerx.Printf("tq: running as individual queue")
		q.launchIndividualApiRoutines()
	}
}
Ejemplo n.º 8
0
func (a *adapterBase) Begin(maxConcurrency int, cb TransferProgressCallback, completion chan TransferResult) error {
	a.cb = cb
	a.outChan = completion
	a.jobChan = make(chan *Transfer, 100)

	tracerx.Printf("xfer: adapter %q Begin() with %d workers", a.Name(), maxConcurrency)

	a.workerWait.Add(maxConcurrency)
	a.authWait.Add(1)
	for i := 0; i < maxConcurrency; i++ {
		go a.worker(i)
	}
	tracerx.Printf("xfer: adapter %q started", a.Name())
	return nil
}
Ejemplo n.º 9
0
func uploadsBetweenRefAndRemote(ctx *uploadContext, refnames []string) {
	tracerx.Printf("Upload refs %v to remote %v", refnames, cfg.CurrentRemote)

	scanOpt := lfs.NewScanRefsOptions()
	scanOpt.ScanMode = lfs.ScanLeftToRemoteMode
	scanOpt.RemoteName = cfg.CurrentRemote

	if pushAll {
		scanOpt.ScanMode = lfs.ScanRefsMode
	}

	refs, err := refsByNames(refnames)
	if err != nil {
		Error(err.Error())
		Exit("Error getting local refs.")
	}

	for _, ref := range refs {
		pointers, err := lfs.ScanRefs(ref.Name, "", scanOpt)
		if err != nil {
			Panic(err, "Error scanning for Git LFS files in the %q ref", ref.Name)
		}

		upload(ctx, pointers)
	}
}
Ejemplo n.º 10
0
// SimpleExec is a small wrapper around os/exec.Command.
func SimpleExec(name string, args ...string) (string, error) {
	tracerx.Printf("run_command: '%s' %s", name, strings.Join(args, " "))
	cmd := ExecCommand(name, args...)

	//start copied from Go 1.6 exec.go
	captureErr := cmd.Stderr == nil
	if captureErr {
		cmd.Stderr = &prefixSuffixSaver{N: 32 << 10}
	}
	//end copied from Go 1.6 exec.go

	output, err := cmd.Output()

	if exitError, ok := err.(*exec.ExitError); ok {
		// TODO for min Go 1.6+, replace with ExitError.Stderr
		errorOutput := strings.TrimSpace(string(cmd.Stderr.(*prefixSuffixSaver).Bytes()))
		if errorOutput == "" {
			// some commands might write nothing to stderr but something to stdout in error-conditions, in which case, we'll use that
			// in the error string
			errorOutput = strings.TrimSpace(string(output))
		}
		formattedErr := fmt.Errorf("Error running %s %s: '%s' '%s'", name, args, errorOutput, strings.TrimSpace(exitError.Error()))

		// return "" as output in error case, for callers that don't care about errors but rely on "" returned, in-case stdout != ""
		return "", formattedErr
	}

	return strings.Trim(string(output), " \n"), err
}
Ejemplo n.º 11
0
func NewRequest(cfg *config.Configuration, method, oid string) (*http.Request, error) {
	objectOid := oid
	operation := "download"
	if method == "POST" {
		if oid != "batch" {
			objectOid = ""
			operation = "upload"
		}
	}

	res, endpoint, err := auth.SshAuthenticate(cfg, operation, oid)
	if err != nil {
		tracerx.Printf("ssh: attempted with %s.  Error: %s",
			endpoint.SshUserAndHost, err.Error(),
		)
		return nil, err
	}

	if len(res.Href) > 0 {
		endpoint.Url = res.Href
	}

	u, err := ObjectUrl(endpoint, objectOid)
	if err != nil {
		return nil, err
	}

	req, err := httputil.NewHttpRequest(method, u.String(), res.Header)
	if err != nil {
		return nil, err
	}

	req.Header.Set("Accept", MediaType)
	return req, nil
}
Ejemplo n.º 12
0
// Refs returns all of the local and remote branches and tags for the current
// repository. Other refs (HEAD, refs/stash, git notes) are ignored.
func LocalRefs() ([]*Ref, error) {
	cmd := subprocess.ExecCommand("git", "show-ref", "--heads", "--tags")

	outp, err := cmd.StdoutPipe()
	if err != nil {
		return nil, fmt.Errorf("Failed to call git show-ref: %v", err)
	}

	var refs []*Ref

	if err := cmd.Start(); err != nil {
		return refs, err
	}

	scanner := bufio.NewScanner(outp)
	for scanner.Scan() {
		line := strings.TrimSpace(scanner.Text())
		parts := strings.SplitN(line, " ", 2)
		if len(parts) != 2 || len(parts[0]) != 40 || len(parts[1]) < 1 {
			tracerx.Printf("Invalid line from git show-ref: %q", line)
			continue
		}

		rtype, name := ParseRefToTypeAndName(parts[1])
		if rtype != RefTypeLocalBranch && rtype != RefTypeLocalTag {
			continue
		}

		refs = append(refs, &Ref{name, rtype, parts[0]})
	}

	return refs, cmd.Wait()
}
Ejemplo n.º 13
0
func scanObjects(dir string, ch chan<- Object) {
	dirf, err := os.Open(dir)
	if err != nil {
		return
	}
	defer dirf.Close()

	direntries, err := dirf.Readdir(0)
	if err != nil {
		tracerx.Printf("Problem with Readdir in %q: %s", dir, err)
		return
	}

	for _, dirfi := range direntries {
		if dirfi.IsDir() {
			subpath := filepath.Join(dir, dirfi.Name())
			scanObjects(subpath, ch)
		} else {
			// Make sure it's really an object file & not .DS_Store etc
			if oidRE.MatchString(dirfi.Name()) {
				ch <- Object{dirfi.Name(), dirfi.Size()}
			}
		}
	}
}
Ejemplo n.º 14
0
func PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download bool, manifest *transfer.Manifest, cb progress.CopyCallback) error {
	mediafile, err := LocalMediaPath(ptr.Oid)
	if err != nil {
		return err
	}

	LinkOrCopyFromReference(ptr.Oid, ptr.Size)

	stat, statErr := os.Stat(mediafile)
	if statErr == nil && stat != nil {
		fileSize := stat.Size()
		if fileSize == 0 || fileSize != ptr.Size {
			tracerx.Printf("Removing %s, size %d is invalid", mediafile, fileSize)
			os.RemoveAll(mediafile)
			stat = nil
		}
	}

	if statErr != nil || stat == nil {
		if download {
			err = downloadFile(writer, ptr, workingfile, mediafile, manifest, cb)
		} else {
			return errors.NewDownloadDeclinedError(statErr, "smudge")
		}
	} else {
		err = readLocalFile(writer, ptr, mediafile, workingfile, cb)
	}

	if err != nil {
		return errors.NewSmudgeError(err, ptr.Oid, mediafile)
	}

	return nil
}
Ejemplo n.º 15
0
func NewBatchRequest(operation string) (*http.Request, error) {
	endpoint := config.Config.Endpoint(operation)

	res, err := auth.SshAuthenticate(endpoint, operation, "")
	if err != nil {
		tracerx.Printf("ssh: %s attempted with %s.  Error: %s",
			operation, endpoint.SshUserAndHost, err.Error(),
		)
		return nil, err
	}

	if len(res.Href) > 0 {
		endpoint.Url = res.Href
	}

	u, err := ObjectUrl(endpoint, "batch")
	if err != nil {
		return nil, err
	}

	req, err := httputil.NewHttpRequest("POST", u.String(), nil)
	if err != nil {
		return nil, err
	}

	req.Header.Set("Accept", MediaType)
	if res.Header != nil {
		for key, value := range res.Header {
			req.Header.Set(key, value)
		}
	}

	return req, nil
}
Ejemplo n.º 16
0
// startCommand starts up a command and creates a stdin pipe and a buffered
// stdout & stderr pipes, wrapped in a wrappedCmd. The stdout buffer will be of stdoutBufSize
// bytes.
func startCommand(command string, args ...string) (*wrappedCmd, error) {
	cmd := exec.Command(command, args...)
	stdout, err := cmd.StdoutPipe()
	if err != nil {
		return nil, err
	}
	stderr, err := cmd.StderrPipe()
	if err != nil {
		return nil, err
	}

	stdin, err := cmd.StdinPipe()
	if err != nil {
		return nil, err
	}

	tracerx.Printf("run_command: %s %s", command, strings.Join(args, " "))
	if err := cmd.Start(); err != nil {
		return nil, err
	}

	return &wrappedCmd{
		stdin,
		bufio.NewReaderSize(stdout, stdoutBufSize),
		bufio.NewReaderSize(stderr, stdoutBufSize),
		cmd,
	}, nil
}
Ejemplo n.º 17
0
// handleTransferResult is responsible for dealing with the result of a
// successful or failed transfer.
//
// If there was an error assosicated with the given transfer, "res.Error", and
// it is retriable (see: `q.canRetryObject`), it will be placed in the next
// batch and be retried. If that error is not retriable for any reason, the
// transfer will be marked as having failed, and the error will be reported.
//
// If the transfer was successful, the watchers of this transfer queue will be
// notified, and the transfer will be marked as having been completed.
func (q *TransferQueue) handleTransferResult(res transfer.TransferResult) {
	oid := res.Transfer.Object.Oid

	if res.Error != nil {
		if q.canRetryObject(oid, res.Error) {
			tracerx.Printf("tq: retrying object %s", oid)
			q.trMutex.Lock()
			t, ok := q.transferables[oid]
			q.trMutex.Unlock()
			if ok {
				q.retry(t)
			} else {
				q.errorc <- res.Error
			}
		} else {
			q.errorc <- res.Error
			q.wait.Done()
		}
	} else {
		for _, c := range q.watchers {
			c <- oid
		}

		q.meter.FinishTransfer(res.Transfer.Name)
		q.wait.Done()
	}
}
Ejemplo n.º 18
0
func (c *CountingReadCloser) Read(b []byte) (int, error) {
	n, err := c.ReadCloser.Read(b)
	if err != nil && err != io.EOF {
		return n, err
	}

	c.Count += n

	if c.isTraceableType {
		chunk := string(b[0:n])
		if c.useGitTrace {
			tracerx.Printf("HTTP: %s", chunk)
		}

		if config.Config.IsTracingHttp {
			fmt.Fprint(os.Stderr, chunk)
		}
	}

	if err == io.EOF && config.Config.IsLoggingStats {
		// This httpTransfer is done, we're checking it this way so we can also
		// catch httpTransfers where the caller forgets to Close() the Body.
		if c.response != nil {
			httpTransfersLock.Lock()
			if httpTransfer, ok := httpTransfers[c.response]; ok {
				httpTransfer.responseStats.BodySize = c.Count
				httpTransfer.responseStats.Stop = time.Now()
			}
			httpTransfersLock.Unlock()
		}
	}
	return n, err
}
Ejemplo n.º 19
0
func (q *TransferQueue) ensureAdapterBegun() error {
	q.adapterInitMutex.Lock()
	defer q.adapterInitMutex.Unlock()

	if q.adapterInProgress {
		return nil
	}

	adapterResultChan := make(chan transfer.TransferResult, 20)

	// Progress callback - receives byte updates
	cb := func(name string, total, read int64, current int) error {
		q.meter.TransferBytes(q.transferKind(), name, read, total, current)
		return nil
	}

	tracerx.Printf("tq: starting transfer adapter %q", q.adapter.Name())
	err := q.adapter.Begin(config.Config.ConcurrentTransfers(), cb, adapterResultChan)
	if err != nil {
		return err
	}
	q.adapterInProgress = true

	// Collector for completed transfers
	// q.wait.Done() in handleTransferResult is enough to know when this is complete for all transfers
	go func() {
		for res := range adapterResultChan {
			q.handleTransferResult(res)
		}
	}()

	return nil
}
Ejemplo n.º 20
0
func fillCredentials(req *http.Request, u *url.URL) (Creds, error) {
	path := strings.TrimPrefix(u.Path, "/")
	input := Creds{"protocol": u.Scheme, "host": u.Host, "path": path}
	if u.User != nil && u.User.Username() != "" {
		input["username"] = u.User.Username()
	}

	creds, err := execCreds(input, "fill")
	if creds == nil || len(creds) < 1 {
		errmsg := fmt.Sprintf("Git credentials for %s not found", u)
		if err != nil {
			errmsg = errmsg + ":\n" + err.Error()
		} else {
			errmsg = errmsg + "."
		}
		err = errors.New(errmsg)
	}

	if err != nil {
		return nil, err
	}

	tracerx.Printf("Filled credentials for %s", u)
	setRequestAuth(req, creds["username"], creds["password"])

	return creds, err
}
Ejemplo n.º 21
0
// retryCollector collects objects to retry, increments the number of times that
// they have been retried, and then enqueues them in the next batch, or legacy
// API channel. If the transfer queue is using a batcher, the batch will be
// flushed immediately.
//
// retryCollector runs in its own goroutine.
func (q *TransferQueue) retryCollector() {
	for t := range q.retriesc {
		q.rmu.Lock()
		q.retryCount[t.Oid()]++
		count := q.retryCount[t.Oid()]
		q.rmu.Unlock()

		tracerx.Printf("tq: enqueue retry #%d for %q (size: %d)", count, t.Oid(), t.Size())

		q.Add(t)
		if q.batcher != nil {
			tracerx.Printf("tq: flushing batch in response to retry #%d for %q", count, t.Oid(), t.Size())
			q.batcher.Flush()
		}
	}
	q.retrywait.Done()
}
Ejemplo n.º 22
0
// IsVersionAtLeast returns whether the git version is the one specified or higher
// argument is plain version string separated by '.' e.g. "2.3.1" but can omit minor/patch
func (c *gitConfig) IsGitVersionAtLeast(ver string) bool {
	gitver, err := c.Version()
	if err != nil {
		tracerx.Printf("Error getting git version: %v", err)
		return false
	}
	return IsVersionAtLeast(gitver, ver)
}
Ejemplo n.º 23
0
// GetAllWorkTreeHEADs returns the refs that all worktrees are using as HEADs
// This returns all worktrees plus the master working copy, and works even if
// working dir is actually in a worktree right now
// Pass in the git storage dir (parent of 'objects') to work from
func GetAllWorkTreeHEADs(storageDir string) ([]*Ref, error) {
	worktreesdir := filepath.Join(storageDir, "worktrees")
	dirf, err := os.Open(worktreesdir)
	if err != nil && !os.IsNotExist(err) {
		return nil, err
	}

	var worktrees []*Ref
	if err == nil {
		// There are some worktrees
		defer dirf.Close()
		direntries, err := dirf.Readdir(0)
		if err != nil {
			return nil, err
		}
		for _, dirfi := range direntries {
			if dirfi.IsDir() {
				// to avoid having to chdir and run git commands to identify the commit
				// just read the HEAD file & git rev-parse if necessary
				// Since the git repo is shared the same rev-parse will work from this location
				headfile := filepath.Join(worktreesdir, dirfi.Name(), "HEAD")
				ref, err := parseRefFile(headfile)
				if err != nil {
					tracerx.Printf("Error reading %v for worktree, skipping: %v", headfile, err)
					continue
				}
				worktrees = append(worktrees, ref)
			}
		}
	}

	// This has only established the separate worktrees, not the original checkout
	// If the storageDir contains a HEAD file then there is a main checkout
	// as well; this mus tbe resolveable whether you're in the main checkout or
	// a worktree
	headfile := filepath.Join(storageDir, "HEAD")
	ref, err := parseRefFile(headfile)
	if err == nil {
		worktrees = append(worktrees, ref)
	} else if !os.IsNotExist(err) { // ok if not exists, probably bare repo
		tracerx.Printf("Error reading %v for main checkout, skipping: %v", headfile, err)
	}

	return worktrees, nil
}
Ejemplo n.º 24
0
func appendRootCAsFromKeychain(pool *x509.CertPool, name, keychain string) *x509.CertPool {
	cmd := subprocess.ExecCommand("/usr/bin/security", "find-certificate", "-a", "-p", "-c", name, keychain)
	data, err := cmd.Output()
	if err != nil {
		tracerx.Printf("Error reading keychain %q: %v", keychain, err)
		return pool
	}
	return appendCertsFromPEMData(pool, data)
}
Ejemplo n.º 25
0
func (t *traceWriter) Flush() {
	var err error
	for err == nil {
		var s string
		s, err = t.buf.ReadString('\n')
		if len(s) > 0 {
			tracerx.Printf("xfer[%v]: %v", t.processName, strings.TrimSpace(s))
		}
	}
}
Ejemplo n.º 26
0
func (a *customAdapter) readResponse(ctx *customAdapterWorkerContext) (*customAdapterResponseMessage, error) {
	line, err := ctx.bufferedOut.ReadString('\n')
	if err != nil {
		return nil, err
	}
	tracerx.Printf("xfer: Custom adapter worker %d received response: %v", ctx.workerNum, strings.TrimSpace(line))
	resp := &customAdapterResponseMessage{}
	err = json.Unmarshal([]byte(line), resp)
	return resp, err
}
Ejemplo n.º 27
0
// worker function, many of these run per adapter
func (a *adapterBase) worker(workerNum int, ctx interface{}) {

	tracerx.Printf("xfer: adapter %q worker %d starting", a.Name(), workerNum)
	waitForAuth := workerNum > 0
	signalAuthOnResponse := workerNum == 0

	// First worker is the only one allowed to start immediately
	// The rest wait until successful response from 1st worker to
	// make sure only 1 login prompt is presented if necessary
	// Deliberately outside jobChan processing so we know worker 0 will process 1st item
	if waitForAuth {
		tracerx.Printf("xfer: adapter %q worker %d waiting for Auth", a.Name(), workerNum)
		a.authWait.Wait()
		tracerx.Printf("xfer: adapter %q worker %d auth signal received", a.Name(), workerNum)
	}

	for t := range a.jobChan {
		var authCallback func()
		if signalAuthOnResponse {
			authCallback = func() {
				a.authWait.Done()
				signalAuthOnResponse = false
			}
		}
		tracerx.Printf("xfer: adapter %q worker %d processing job for %q", a.Name(), workerNum, t.Object.Oid)

		// Actual transfer happens here
		var err error
		if t.Object.IsExpired(time.Now().Add(objectExpirationGracePeriod)) {
			tracerx.Printf("xfer: adapter %q worker %d found job for %q expired, retrying...", a.Name(), workerNum, t.Object.Oid)
			err = errors.NewRetriableError(errors.Errorf("lfs/transfer: object %q has expired", t.Object.Oid))
		} else if t.Object.Size < 0 {
			tracerx.Printf("xfer: adapter %q worker %d found invalid size for %q (got: %d), retrying...", a.Name(), workerNum, t.Object.Oid, t.Object.Size)
			err = fmt.Errorf("Git LFS: object %q has invalid size (got: %d)", t.Object.Oid, t.Object.Size)
		} else {
			err = a.transferImpl.DoTransfer(ctx, t, a.cb, authCallback)
		}

		if a.outChan != nil {
			res := TransferResult{t, err}
			a.outChan <- res
		}

		tracerx.Printf("xfer: adapter %q worker %d finished job for %q", a.Name(), workerNum, t.Object.Oid)
	}
	// This will only happen if no jobs were submitted; just wake up all workers to finish
	if signalAuthOnResponse {
		a.authWait.Done()
	}
	tracerx.Printf("xfer: adapter %q worker %d stopping", a.Name(), workerNum)
	a.transferImpl.WorkerEnding(workerNum, ctx)
	a.workerWait.Done()
}
Ejemplo n.º 28
0
func (a *customAdapter) WorkerStarting(workerNum int) (interface{}, error) {

	// Start a process per worker
	// If concurrent = false we have already dialled back workers to 1
	tracerx.Printf("xfer: starting up custom transfer process %q for worker %d", a.name, workerNum)
	cmd := subprocess.ExecCommand(a.path, a.args)
	outp, err := cmd.StdoutPipe()
	if err != nil {
		return nil, fmt.Errorf("Failed to get stdout for custom transfer command %q remote: %v", a.path, err)
	}
	inp, err := cmd.StdinPipe()
	if err != nil {
		return nil, fmt.Errorf("Failed to get stdin for custom transfer command %q remote: %v", a.path, err)
	}
	// Capture stderr to trace
	tracer := &traceWriter{}
	tracer.processName = filepath.Base(a.path)
	cmd.Stderr = tracer
	err = cmd.Start()
	if err != nil {
		return nil, fmt.Errorf("Failed to start custom transfer command %q remote: %v", a.path, err)
	}
	// Set up buffered reader/writer since we operate on lines
	ctx := &customAdapterWorkerContext{workerNum, cmd, outp, bufio.NewReader(outp), inp, tracer}

	// send initiate message
	initReq := NewCustomAdapterInitRequest(a.getOperationName(), a.concurrent, a.originalConcurrency)
	resp, err := a.exchangeMessage(ctx, initReq)
	if err != nil {
		a.abortWorkerProcess(ctx)
		return nil, err
	}
	if resp.Error != nil {
		a.abortWorkerProcess(ctx)
		return nil, fmt.Errorf("Error initializing custom adapter %q worker %d: %v", a.name, workerNum, resp.Error)
	}

	tracerx.Printf("xfer: started custom adapter process %q for worker %d OK", a.path, workerNum)

	// Save this process context and use in future callbacks
	return ctx, nil
}
Ejemplo n.º 29
0
func appendCertsFromFilesInDir(pool *x509.CertPool, dir string) *x509.CertPool {
	files, err := ioutil.ReadDir(dir)
	if err != nil {
		tracerx.Printf("Error reading cert dir %q: %v", dir, err)
		return pool
	}
	for _, f := range files {
		pool = appendCertsFromFile(pool, filepath.Join(dir, f.Name()))
	}
	return pool
}
Ejemplo n.º 30
0
// sendMessage sends a JSON message to the custom adapter process
func (a *customAdapter) sendMessage(ctx *customAdapterWorkerContext, req interface{}) error {
	b, err := json.Marshal(req)
	if err != nil {
		return err
	}
	tracerx.Printf("xfer: Custom adapter worker %d sending message: %v", ctx.workerNum, string(b))
	// Line oriented JSON
	b = append(b, '\n')
	_, err = ctx.stdin.Write(b)
	return err
}