Exemplo n.º 1
0
// worker function, many of these run per adapter
func (a *adapterBase) worker(workerNum int, ctx interface{}) {

	tracerx.Printf("xfer: adapter %q worker %d starting", a.Name(), workerNum)
	waitForAuth := workerNum > 0
	signalAuthOnResponse := workerNum == 0

	// First worker is the only one allowed to start immediately
	// The rest wait until successful response from 1st worker to
	// make sure only 1 login prompt is presented if necessary
	// Deliberately outside jobChan processing so we know worker 0 will process 1st item
	if waitForAuth {
		tracerx.Printf("xfer: adapter %q worker %d waiting for Auth", a.Name(), workerNum)
		a.authWait.Wait()
		tracerx.Printf("xfer: adapter %q worker %d auth signal received", a.Name(), workerNum)
	}

	for t := range a.jobChan {
		var authCallback func()
		if signalAuthOnResponse {
			authCallback = func() {
				a.authWait.Done()
				signalAuthOnResponse = false
			}
		}
		tracerx.Printf("xfer: adapter %q worker %d processing job for %q", a.Name(), workerNum, t.Object.Oid)

		// Actual transfer happens here
		var err error
		if t.Object.IsExpired(time.Now().Add(objectExpirationGracePeriod)) {
			tracerx.Printf("xfer: adapter %q worker %d found job for %q expired, retrying...", a.Name(), workerNum, t.Object.Oid)
			err = errors.NewRetriableError(errors.Errorf("lfs/transfer: object %q has expired", t.Object.Oid))
		} else if t.Object.Size < 0 {
			tracerx.Printf("xfer: adapter %q worker %d found invalid size for %q (got: %d), retrying...", a.Name(), workerNum, t.Object.Oid, t.Object.Size)
			err = fmt.Errorf("Git LFS: object %q has invalid size (got: %d)", t.Object.Oid, t.Object.Size)
		} else {
			err = a.transferImpl.DoTransfer(ctx, t, a.cb, authCallback)
		}

		if a.outChan != nil {
			res := TransferResult{t, err}
			a.outChan <- res
		}

		tracerx.Printf("xfer: adapter %q worker %d finished job for %q", a.Name(), workerNum, t.Object.Oid)
	}
	// This will only happen if no jobs were submitted; just wake up all workers to finish
	if signalAuthOnResponse {
		a.authWait.Done()
	}
	tracerx.Printf("xfer: adapter %q worker %d stopping", a.Name(), workerNum)
	a.transferImpl.WorkerEnding(workerNum, ctx)
	a.workerWait.Done()
}
Exemplo n.º 2
0
func (r *RetriableReader) Read(b []byte) (int, error) {
	n, err := r.reader.Read(b)

	// EOF is a successful response as it is used to signal a graceful end
	// of input c.f. https://git.io/v6riQ
	//
	// Otherwise, if the error is non-nil and already retriable (in the
	// case that the underlying reader `r.reader` is itself a
	// `*RetriableReader`, return the error wholesale:
	if err == nil || err == io.EOF || errors.IsRetriableError(err) {
		return n, err
	}

	return n, errors.NewRetriableError(err)
}
Exemplo n.º 3
0
// TODO LEGACY API: remove when legacy API removed
func UploadCheck(cfg *config.Configuration, oid string, size int64) (*ObjectResource, error) {
	reqObj := &ObjectResource{
		Oid:  oid,
		Size: size,
	}

	by, err := json.Marshal(reqObj)
	if err != nil {
		return nil, errors.Wrap(err, "upload check")
	}

	req, err := NewRequest(cfg, "POST", oid)
	if err != nil {
		return nil, errors.Wrap(err, "upload check")
	}

	req.Header.Set("Content-Type", MediaType)
	req.Header.Set("Content-Length", strconv.Itoa(len(by)))
	req.ContentLength = int64(len(by))
	req.Body = tools.NewReadSeekCloserWrapper(bytes.NewReader(by))

	tracerx.Printf("api: uploading (%s)", oid)
	res, obj, err := DoLegacyRequest(cfg, req)

	if err != nil {
		if errors.IsAuthError(err) {
			httputil.SetAuthType(cfg, req, res)
			return UploadCheck(cfg, oid, size)
		}

		return nil, errors.NewRetriableError(err)
	}
	httputil.LogTransfer(cfg, "lfs.upload", res)

	if res.StatusCode == 200 {
		return nil, nil
	}

	if obj.Oid == "" {
		obj.Oid = oid
	}
	if obj.Size == 0 {
		obj.Size = reqObj.Size
	}

	return obj, nil
}
Exemplo n.º 4
0
func TestRetriableReaderDoesNotRewrap(t *testing.T) {
	// expected is already "retriable", as would be the case if the
	// underlying reader was a *RetriableReader itself.
	expected := errors.NewRetriableError(errors.New("example error"))

	r := tools.NewRetriableReader(&ErrReader{expected})

	var buf [1]byte
	n, err := r.Read(buf[:])

	assert.Equal(t, 0, n)
	// errors.NewRetriableError wraps the given error with the prefix
	// message "LFS", so these two errors should be equal, indicating that
	// the RetriableReader did not re-wrap the error it received.
	assert.EqualError(t, err, expected.Error())
	assert.True(t, errors.IsRetriableError(err))

}
Exemplo n.º 5
0
func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb TransferProgressCallback, authOkFunc func()) error {
	rel, ok := t.Object.Rel("upload")
	if !ok {
		return fmt.Errorf("No upload action for this object.")
	}

	// Note not supporting the Creation extension since the batch API generates URLs
	// Also not supporting Concatenation to support parallel uploads of chunks; forward only

	// 1. Send HEAD request to determine upload start point
	//    Request must include Tus-Resumable header (version)
	tracerx.Printf("xfer: sending tus.io HEAD request for %q", t.Object.Oid)
	req, err := httputil.NewHttpRequest("HEAD", rel.Href, rel.Header)
	if err != nil {
		return err
	}
	req.Header.Set("Tus-Resumable", TusVersion)
	res, err := httputil.DoHttpRequest(config.Config, req, false)
	if err != nil {
		return errors.NewRetriableError(err)
	}

	//    Response will contain Upload-Offset if supported
	offHdr := res.Header.Get("Upload-Offset")
	if len(offHdr) == 0 {
		return fmt.Errorf("Missing Upload-Offset header from tus.io HEAD response at %q, contact server admin", rel.Href)
	}
	offset, err := strconv.ParseInt(offHdr, 10, 64)
	if err != nil || offset < 0 {
		return fmt.Errorf("Invalid Upload-Offset value %q in response from tus.io HEAD at %q, contact server admin", offHdr, rel.Href)
	}
	// Upload-Offset=size means already completed (skip)
	// Batch API will probably already detect this, but handle just in case
	if offset >= t.Object.Size {
		tracerx.Printf("xfer: tus.io HEAD offset %d indicates %q is already fully uploaded, skipping", offset, t.Object.Oid)
		advanceCallbackProgress(cb, t, t.Object.Size)
		return nil
	}

	// Open file for uploading
	f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644)
	if err != nil {
		return errors.Wrap(err, "tus upload")
	}
	defer f.Close()

	// Upload-Offset=0 means start from scratch, but still send PATCH
	if offset == 0 {
		tracerx.Printf("xfer: tus.io uploading %q from start", t.Object.Oid)
	} else {
		tracerx.Printf("xfer: tus.io resuming upload %q from %d", t.Object.Oid, offset)
		advanceCallbackProgress(cb, t, offset)
		_, err := f.Seek(offset, os.SEEK_CUR)
		if err != nil {
			return errors.Wrap(err, "tus upload")
		}
	}

	// 2. Send PATCH request with byte start point (even if 0) in Upload-Offset
	//    Response status must be 204
	//    Response Upload-Offset must be request Upload-Offset plus sent bytes
	//    Response may include Upload-Expires header in which case check not passed

	tracerx.Printf("xfer: sending tus.io PATCH request for %q", t.Object.Oid)
	req, err = httputil.NewHttpRequest("PATCH", rel.Href, rel.Header)
	if err != nil {
		return err
	}
	req.Header.Set("Tus-Resumable", TusVersion)
	req.Header.Set("Upload-Offset", strconv.FormatInt(offset, 10))
	req.Header.Set("Content-Type", "application/offset+octet-stream")
	req.Header.Set("Content-Length", strconv.FormatInt(t.Object.Size-offset, 10))
	req.ContentLength = t.Object.Size - offset

	// Ensure progress callbacks made while uploading
	// Wrap callback to give name context
	ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error {
		if cb != nil {
			return cb(t.Name, totalSize, readSoFar, readSinceLast)
		}
		return nil
	}
	var reader io.Reader
	reader = &progress.CallbackReader{
		C:         ccb,
		TotalSize: t.Object.Size,
		Reader:    f,
	}

	// Signal auth was ok on first read; this frees up other workers to start
	if authOkFunc != nil {
		reader = newStartCallbackReader(reader, func(*startCallbackReader) {
			authOkFunc()
		})
	}

	req.Body = ioutil.NopCloser(reader)

	res, err = httputil.DoHttpRequest(config.Config, req, false)
	if err != nil {
		return errors.NewRetriableError(err)
	}
	httputil.LogTransfer(config.Config, "lfs.data.upload", res)

	// A status code of 403 likely means that an authentication token for the
	// upload has expired. This can be safely retried.
	if res.StatusCode == 403 {
		err = errors.New("http: received status 403")
		return errors.NewRetriableError(err)
	}

	if res.StatusCode > 299 {
		return errors.Wrapf(nil, "Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode)
	}

	io.Copy(ioutil.Discard, res.Body)
	res.Body.Close()

	return api.VerifyUpload(config.Config, t.Object)
}
Exemplo n.º 6
0
func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb TransferProgressCallback, authOkFunc func()) error {
	rel, ok := t.Object.Rel("upload")
	if !ok {
		return fmt.Errorf("No upload action for this object.")
	}

	req, err := httputil.NewHttpRequest("PUT", rel.Href, rel.Header)
	if err != nil {
		return err
	}

	if len(req.Header.Get("Content-Type")) == 0 {
		req.Header.Set("Content-Type", "application/octet-stream")
	}

	if req.Header.Get("Transfer-Encoding") == "chunked" {
		req.TransferEncoding = []string{"chunked"}
	} else {
		req.Header.Set("Content-Length", strconv.FormatInt(t.Object.Size, 10))
	}

	req.ContentLength = t.Object.Size

	f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644)
	if err != nil {
		return errors.Wrap(err, "basic upload")
	}
	defer f.Close()

	// Ensure progress callbacks made while uploading
	// Wrap callback to give name context
	ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error {
		if cb != nil {
			return cb(t.Name, totalSize, readSoFar, readSinceLast)
		}
		return nil
	}
	var reader io.Reader
	reader = &progress.CallbackReader{
		C:         ccb,
		TotalSize: t.Object.Size,
		Reader:    f,
	}

	// Signal auth was ok on first read; this frees up other workers to start
	if authOkFunc != nil {
		reader = newStartCallbackReader(reader, func(*startCallbackReader) {
			authOkFunc()
		})
	}

	req.Body = ioutil.NopCloser(reader)

	res, err := httputil.DoHttpRequest(config.Config, req, t.Object.NeedsAuth())
	if err != nil {
		return errors.NewRetriableError(err)
	}
	httputil.LogTransfer(config.Config, "lfs.data.upload", res)

	// A status code of 403 likely means that an authentication token for the
	// upload has expired. This can be safely retried.
	if res.StatusCode == 403 {
		err = errors.New("http: received status 403")
		return errors.NewRetriableError(err)
	}

	if res.StatusCode > 299 {
		return errors.Wrapf(nil, "Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode)
	}

	io.Copy(ioutil.Discard, res.Body)
	res.Body.Close()

	return api.VerifyUpload(config.Config, t.Object)
}
Exemplo n.º 7
0
// Batch calls the batch API and returns object results
func Batch(cfg *config.Configuration, objects []*ObjectResource, operation string, transferAdapters []string) (objs []*ObjectResource, transferAdapter string, e error) {
	if len(objects) == 0 {
		return nil, "", nil
	}

	// Compatibility; omit transfers list when only basic
	// older schemas included `additionalproperties=false`
	if len(transferAdapters) == 1 && transferAdapters[0] == "basic" {
		transferAdapters = nil
	}

	o := &batchRequest{Operation: operation, Objects: objects, TransferAdapterNames: transferAdapters}
	by, err := json.Marshal(o)
	if err != nil {
		return nil, "", errors.Wrap(err, "batch request")
	}

	req, err := NewBatchRequest(cfg, operation)
	if err != nil {
		return nil, "", errors.Wrap(err, "batch request")
	}

	req.Header.Set("Content-Type", MediaType)
	req.Header.Set("Content-Length", strconv.Itoa(len(by)))
	req.ContentLength = int64(len(by))
	req.Body = tools.NewReadSeekCloserWrapper(bytes.NewReader(by))

	tracerx.Printf("api: batch %d files", len(objects))

	res, bresp, err := DoBatchRequest(cfg, req)

	if err != nil {
		if res == nil {
			return nil, "", errors.NewRetriableError(err)
		}

		if res.StatusCode == 0 {
			return nil, "", errors.NewRetriableError(err)
		}

		if errors.IsAuthError(err) {
			httputil.SetAuthType(cfg, req, res)
			return Batch(cfg, objects, operation, transferAdapters)
		}

		switch res.StatusCode {
		case 404, 410:
			return nil, "", errors.NewNotImplementedError(errors.Errorf("api: batch not implemented: %d", res.StatusCode))
		}

		tracerx.Printf("api error: %s", err)
		return nil, "", errors.Wrap(err, "batch response")
	}
	httputil.LogTransfer(cfg, "lfs.batch", res)

	if res.StatusCode != 200 {
		return nil, "", errors.Errorf("Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode)
	}

	return bresp.Objects, bresp.TransferAdapterName, nil
}
Exemplo n.º 8
0
// download starts or resumes and download. Always closes dlFile if non-nil
func (a *basicDownloadAdapter) download(t *Transfer, cb TransferProgressCallback, authOkFunc func(), dlFile *os.File, fromByte int64, hash hash.Hash) error {
	if dlFile != nil {
		// ensure we always close dlFile. Note that this does not conflict with the
		// early close below, as close is idempotent.
		defer dlFile.Close()
	}

	rel, ok := t.Object.Rel("download")
	if !ok {
		return errors.New("Object not found on the server.")
	}

	req, err := httputil.NewHttpRequest("GET", rel.Href, rel.Header)
	if err != nil {
		return err
	}

	if fromByte > 0 {
		if dlFile == nil || hash == nil {
			return fmt.Errorf("Cannot restart %v from %d without a file & hash", t.Object.Oid, fromByte)
		}
		// We could just use a start byte, but since we know the length be specific
		req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", fromByte, t.Object.Size-1))
	}

	res, err := httputil.DoHttpRequest(config.Config, req, t.Object.NeedsAuth())
	if err != nil {
		// Special-case status code 416 () - fall back
		if fromByte > 0 && dlFile != nil && res.StatusCode == 416 {
			tracerx.Printf("xfer: server rejected resume download request for %q from byte %d; re-downloading from start", t.Object.Oid, fromByte)
			dlFile.Close()
			os.Remove(dlFile.Name())
			return a.download(t, cb, authOkFunc, nil, 0, nil)
		}
		return errors.NewRetriableError(err)
	}
	httputil.LogTransfer(config.Config, "lfs.data.download", res)
	defer res.Body.Close()

	// Range request must return 206 & content range to confirm
	if fromByte > 0 {
		rangeRequestOk := false
		var failReason string
		// check 206 and Content-Range, fall back if either not as expected
		if res.StatusCode == 206 {
			// Probably a successful range request, check Content-Range
			if rangeHdr := res.Header.Get("Content-Range"); rangeHdr != "" {
				regex := regexp.MustCompile(`bytes (\d+)\-.*`)
				match := regex.FindStringSubmatch(rangeHdr)
				if match != nil && len(match) > 1 {
					contentStart, _ := strconv.ParseInt(match[1], 10, 64)
					if contentStart == fromByte {
						rangeRequestOk = true
					} else {
						failReason = fmt.Sprintf("Content-Range start byte incorrect: %s expected %d", match[1], fromByte)
					}
				} else {
					failReason = fmt.Sprintf("badly formatted Content-Range header: %q", rangeHdr)
				}
			} else {
				failReason = "missing Content-Range header in response"
			}
		} else {
			failReason = fmt.Sprintf("expected status code 206, received %d", res.StatusCode)
		}
		if rangeRequestOk {
			tracerx.Printf("xfer: server accepted resume download request: %q from byte %d", t.Object.Oid, fromByte)
			advanceCallbackProgress(cb, t, fromByte)
		} else {
			// Abort resume, perform regular download
			tracerx.Printf("xfer: failed to resume download for %q from byte %d: %s. Re-downloading from start", t.Object.Oid, fromByte, failReason)
			dlFile.Close()
			os.Remove(dlFile.Name())
			if res.StatusCode == 200 {
				// If status code was 200 then server just ignored Range header and
				// sent everything. Don't re-request, use this one from byte 0
				dlFile = nil
				fromByte = 0
				hash = nil
			} else {
				// re-request needed
				return a.download(t, cb, authOkFunc, nil, 0, nil)
			}
		}
	}

	// Signal auth OK on success response, before starting download to free up
	// other workers immediately
	if authOkFunc != nil {
		authOkFunc()
	}

	var hasher *tools.HashingReader
	httpReader := tools.NewRetriableReader(res.Body)

	if fromByte > 0 && hash != nil {
		// pre-load hashing reader with previous content
		hasher = tools.NewHashingReaderPreloadHash(httpReader, hash)
	} else {
		hasher = tools.NewHashingReader(httpReader)
	}

	if dlFile == nil {
		// New file start
		dlFile, err = os.OpenFile(a.downloadFilename(t), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
		if err != nil {
			return err
		}
		defer dlFile.Close()
	}
	dlfilename := dlFile.Name()
	// Wrap callback to give name context
	ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error {
		if cb != nil {
			return cb(t.Name, totalSize, readSoFar+fromByte, readSinceLast)
		}
		return nil
	}
	written, err := tools.CopyWithCallback(dlFile, hasher, res.ContentLength, ccb)
	if err != nil {
		return errors.Wrapf(err, "cannot write data to tempfile %q", dlfilename)
	}
	if err := dlFile.Close(); err != nil {
		return fmt.Errorf("can't close tempfile %q: %v", dlfilename, err)
	}

	if actual := hasher.Hash(); actual != t.Object.Oid {
		return fmt.Errorf("Expected OID %s, got %s after %d bytes written", t.Object.Oid, actual, written)
	}

	return tools.RenameFileCopyPermissions(dlfilename, t.Path)
}