Пример #1
0
// DoHttpRequestWithRedirects runs a HTTP request and responds to redirects
func DoHttpRequestWithRedirects(cfg *config.Configuration, req *http.Request, via []*http.Request, useCreds bool) (*http.Response, error) {
	var creds auth.Creds
	if useCreds {
		c, err := auth.GetCreds(cfg, req)
		if err != nil {
			return nil, err
		}
		creds = c
	}

	res, err := doHttpRequest(cfg, req, creds)
	if err != nil {
		return res, err
	}

	if res.StatusCode == 307 {
		redirectTo := res.Header.Get("Location")
		locurl, err := url.Parse(redirectTo)
		if err == nil && !locurl.IsAbs() {
			locurl = req.URL.ResolveReference(locurl)
			redirectTo = locurl.String()
		}

		redirectedReq, err := NewHttpRequest(req.Method, redirectTo, nil)
		if err != nil {
			return res, errors.Wrapf(err, err.Error())
		}

		via = append(via, req)

		// Avoid seeking and re-wrapping the CountingReadCloser, just get the "real" body
		realBody := req.Body
		if wrappedBody, ok := req.Body.(*CountingReadCloser); ok {
			realBody = wrappedBody.ReadCloser
		}

		seeker, ok := realBody.(io.Seeker)
		if !ok {
			return res, errors.Wrapf(nil, "Request body needs to be an io.Seeker to handle redirects.")
		}

		if _, err := seeker.Seek(0, 0); err != nil {
			return res, errors.Wrap(err, "request retry")
		}
		redirectedReq.Body = realBody
		redirectedReq.ContentLength = req.ContentLength

		if err = CheckRedirect(redirectedReq, via); err != nil {
			return res, errors.Wrapf(err, err.Error())
		}

		return DoHttpRequestWithRedirects(cfg, redirectedReq, via, useCreds)
	}

	return res, nil
}
Пример #2
0
// NewUploadable builds the Uploadable from the given information.
// "filename" can be empty if a raw object is pushed (see "object-id" flag in push command)/
func NewUploadable(oid, filename string) (*Uploadable, error) {
	localMediaPath, err := LocalMediaPath(oid)
	if err != nil {
		return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid)
	}

	if len(filename) > 0 {
		if err := ensureFile(filename, localMediaPath); err != nil {
			return nil, err
		}
	}

	fi, err := os.Stat(localMediaPath)
	if err != nil {
		return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid)
	}

	return &Uploadable{oid: oid, OidPath: localMediaPath, Filename: filename, size: fi.Size()}, nil
}
Пример #3
0
func downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, manifest *transfer.Manifest, cb progress.CopyCallback) error {
	fmt.Fprintf(os.Stderr, "Downloading %s (%s)\n", workingfile, pb.FormatBytes(ptr.Size))

	xfers := manifest.GetDownloadAdapterNames()
	obj, adapterName, err := api.BatchOrLegacySingle(config.Config, &api.ObjectResource{Oid: ptr.Oid, Size: ptr.Size}, "download", xfers)
	if err != nil {
		return errors.Wrapf(err, "Error downloading %s: %s", filepath.Base(mediafile), err)
	}

	if ptr.Size == 0 {
		ptr.Size = obj.Size
	}

	adapter := manifest.NewDownloadAdapter(adapterName)
	var tcb transfer.TransferProgressCallback
	if cb != nil {
		tcb = func(name string, totalSize, readSoFar int64, readSinceLast int) error {
			return cb(totalSize, readSoFar, readSinceLast)
		}
	}
	// Single download
	adapterResultChan := make(chan transfer.TransferResult, 1)
	err = adapter.Begin(1, tcb, adapterResultChan)
	if err != nil {
		return err
	}
	adapter.Add(transfer.NewTransfer(filepath.Base(workingfile), obj, mediafile))
	adapter.End()
	res := <-adapterResultChan

	if res.Error != nil {
		return errors.Wrapf(err, "Error buffering media file: %s", res.Error)
	}

	return readLocalFile(writer, ptr, mediafile, workingfile, nil)
}
Пример #4
0
// DecodeResponse attempts to decode the contents of the response as a JSON object
func DecodeResponse(res *http.Response, obj interface{}) error {
	ctype := res.Header.Get("Content-Type")
	if !(lfsMediaTypeRE.MatchString(ctype) || jsonMediaTypeRE.MatchString(ctype)) {
		return nil
	}

	err := json.NewDecoder(res.Body).Decode(obj)
	io.Copy(ioutil.Discard, res.Body)
	res.Body.Close()

	if err != nil {
		return errors.Wrapf(err, "Unable to parse HTTP response for %s", TraceHttpReq(res.Request))
	}

	return nil
}
Пример #5
0
func logsBoomtownCommand(cmd *cobra.Command, args []string) {
	Debug("Debug message")
	err := errors.Wrapf(errors.New("Inner error message!"), "Error")
	Panic(err, "Welcome to Boomtown")
	Debug("Never seen")
}
Пример #6
0
// batchApiRoutine processes the queue of transfers using the batch endpoint,
// making only one POST call for all objects. The results are then handed
// off to the transfer workers.
func (q *TransferQueue) batchApiRoutine() {
	var startProgress sync.Once

	transferAdapterNames := q.manifest.GetAdapterNames(q.direction)

	for {
		batch := q.batcher.Next()
		if batch == nil {
			break
		}

		tracerx.Printf("tq: sending batch of size %d", len(batch))

		transfers := make([]*api.ObjectResource, 0, len(batch))
		for _, i := range batch {
			t := i.(Transferable)
			transfers = append(transfers, &api.ObjectResource{Oid: t.Oid(), Size: t.Size()})
		}

		if len(transfers) == 0 {
			continue
		}

		objs, adapterName, err := api.Batch(config.Config, transfers, q.transferKind(), transferAdapterNames)
		if err != nil {
			if errors.IsNotImplementedError(err) {
				git.Config.SetLocal("", "lfs.batch", "false")
				go q.legacyFallback(batch)
				return
			}

			var errOnce sync.Once
			for _, o := range batch {
				t := o.(Transferable)

				if q.canRetryObject(t.Oid(), err) {
					q.retry(t)
				} else {
					q.wait.Done()
					errOnce.Do(func() { q.errorc <- err })
				}
			}

			continue
		}

		q.useAdapter(adapterName)
		startProgress.Do(q.meter.Start)

		for _, o := range objs {
			if o.Error != nil {
				q.errorc <- errors.Wrapf(o.Error, "[%v] %v", o.Oid, o.Error.Message)
				q.Skip(o.Size)
				q.wait.Done()
				continue
			}

			if _, ok := o.Rel(q.transferKind()); ok {
				// This object needs to be transferred
				q.trMutex.Lock()
				transfer, ok := q.transferables[o.Oid]
				q.trMutex.Unlock()

				if ok {
					transfer.SetObject(o)
					q.meter.Add(transfer.Name())
					q.addToAdapter(transfer)
				} else {
					q.Skip(transfer.Size())
					q.wait.Done()
				}
			} else {
				q.Skip(o.Size)
				q.wait.Done()
			}
		}
	}
}
Пример #7
0
func readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile string, cb progress.CopyCallback) error {
	reader, err := os.Open(mediafile)
	if err != nil {
		return errors.Wrapf(err, "Error opening media file.")
	}
	defer reader.Close()

	if ptr.Size == 0 {
		if stat, _ := os.Stat(mediafile); stat != nil {
			ptr.Size = stat.Size()
		}
	}

	if len(ptr.Extensions) > 0 {
		registeredExts := config.Config.Extensions()
		extensions := make(map[string]config.Extension)
		for _, ptrExt := range ptr.Extensions {
			ext, ok := registeredExts[ptrExt.Name]
			if !ok {
				err := fmt.Errorf("Extension '%s' is not configured.", ptrExt.Name)
				return errors.Wrap(err, "smudge")
			}
			ext.Priority = ptrExt.Priority
			extensions[ext.Name] = ext
		}
		exts, err := config.SortExtensions(extensions)
		if err != nil {
			return errors.Wrap(err, "smudge")
		}

		// pipe extensions in reverse order
		var extsR []config.Extension
		for i := range exts {
			ext := exts[len(exts)-1-i]
			extsR = append(extsR, ext)
		}

		request := &pipeRequest{"smudge", reader, workingfile, extsR}

		response, err := pipeExtensions(request)
		if err != nil {
			return errors.Wrap(err, "smudge")
		}

		actualExts := make(map[string]*pipeExtResult)
		for _, result := range response.results {
			actualExts[result.name] = result
		}

		// verify name, order, and oids
		oid := response.results[0].oidIn
		if ptr.Oid != oid {
			err = fmt.Errorf("Actual oid %s during smudge does not match expected %s", oid, ptr.Oid)
			return errors.Wrap(err, "smudge")
		}

		for _, expected := range ptr.Extensions {
			actual := actualExts[expected.Name]
			if actual.name != expected.Name {
				err = fmt.Errorf("Actual extension name '%s' does not match expected '%s'", actual.name, expected.Name)
				return errors.Wrap(err, "smudge")
			}
			if actual.oidOut != expected.Oid {
				err = fmt.Errorf("Actual oid %s for extension '%s' does not match expected %s", actual.oidOut, expected.Name, expected.Oid)
				return errors.Wrap(err, "smudge")
			}
		}

		// setup reader
		reader, err = os.Open(response.file.Name())
		if err != nil {
			return errors.Wrapf(err, "Error opening smudged file: %s", err)
		}
		defer reader.Close()
	}

	_, err = tools.CopyWithCallback(writer, reader, ptr.Size, cb)
	if err != nil {
		return errors.Wrapf(err, "Error reading from media file: %s", err)
	}

	return nil
}
Пример #8
0
func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb TransferProgressCallback, authOkFunc func()) error {
	rel, ok := t.Object.Rel("upload")
	if !ok {
		return fmt.Errorf("No upload action for this object.")
	}

	// Note not supporting the Creation extension since the batch API generates URLs
	// Also not supporting Concatenation to support parallel uploads of chunks; forward only

	// 1. Send HEAD request to determine upload start point
	//    Request must include Tus-Resumable header (version)
	tracerx.Printf("xfer: sending tus.io HEAD request for %q", t.Object.Oid)
	req, err := httputil.NewHttpRequest("HEAD", rel.Href, rel.Header)
	if err != nil {
		return err
	}
	req.Header.Set("Tus-Resumable", TusVersion)
	res, err := httputil.DoHttpRequest(config.Config, req, false)
	if err != nil {
		return errors.NewRetriableError(err)
	}

	//    Response will contain Upload-Offset if supported
	offHdr := res.Header.Get("Upload-Offset")
	if len(offHdr) == 0 {
		return fmt.Errorf("Missing Upload-Offset header from tus.io HEAD response at %q, contact server admin", rel.Href)
	}
	offset, err := strconv.ParseInt(offHdr, 10, 64)
	if err != nil || offset < 0 {
		return fmt.Errorf("Invalid Upload-Offset value %q in response from tus.io HEAD at %q, contact server admin", offHdr, rel.Href)
	}
	// Upload-Offset=size means already completed (skip)
	// Batch API will probably already detect this, but handle just in case
	if offset >= t.Object.Size {
		tracerx.Printf("xfer: tus.io HEAD offset %d indicates %q is already fully uploaded, skipping", offset, t.Object.Oid)
		advanceCallbackProgress(cb, t, t.Object.Size)
		return nil
	}

	// Open file for uploading
	f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644)
	if err != nil {
		return errors.Wrap(err, "tus upload")
	}
	defer f.Close()

	// Upload-Offset=0 means start from scratch, but still send PATCH
	if offset == 0 {
		tracerx.Printf("xfer: tus.io uploading %q from start", t.Object.Oid)
	} else {
		tracerx.Printf("xfer: tus.io resuming upload %q from %d", t.Object.Oid, offset)
		advanceCallbackProgress(cb, t, offset)
		_, err := f.Seek(offset, os.SEEK_CUR)
		if err != nil {
			return errors.Wrap(err, "tus upload")
		}
	}

	// 2. Send PATCH request with byte start point (even if 0) in Upload-Offset
	//    Response status must be 204
	//    Response Upload-Offset must be request Upload-Offset plus sent bytes
	//    Response may include Upload-Expires header in which case check not passed

	tracerx.Printf("xfer: sending tus.io PATCH request for %q", t.Object.Oid)
	req, err = httputil.NewHttpRequest("PATCH", rel.Href, rel.Header)
	if err != nil {
		return err
	}
	req.Header.Set("Tus-Resumable", TusVersion)
	req.Header.Set("Upload-Offset", strconv.FormatInt(offset, 10))
	req.Header.Set("Content-Type", "application/offset+octet-stream")
	req.Header.Set("Content-Length", strconv.FormatInt(t.Object.Size-offset, 10))
	req.ContentLength = t.Object.Size - offset

	// Ensure progress callbacks made while uploading
	// Wrap callback to give name context
	ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error {
		if cb != nil {
			return cb(t.Name, totalSize, readSoFar, readSinceLast)
		}
		return nil
	}
	var reader io.Reader
	reader = &progress.CallbackReader{
		C:         ccb,
		TotalSize: t.Object.Size,
		Reader:    f,
	}

	// Signal auth was ok on first read; this frees up other workers to start
	if authOkFunc != nil {
		reader = newStartCallbackReader(reader, func(*startCallbackReader) {
			authOkFunc()
		})
	}

	req.Body = ioutil.NopCloser(reader)

	res, err = httputil.DoHttpRequest(config.Config, req, false)
	if err != nil {
		return errors.NewRetriableError(err)
	}
	httputil.LogTransfer(config.Config, "lfs.data.upload", res)

	// A status code of 403 likely means that an authentication token for the
	// upload has expired. This can be safely retried.
	if res.StatusCode == 403 {
		err = errors.New("http: received status 403")
		return errors.NewRetriableError(err)
	}

	if res.StatusCode > 299 {
		return errors.Wrapf(nil, "Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode)
	}

	io.Copy(ioutil.Discard, res.Body)
	res.Body.Close()

	return api.VerifyUpload(config.Config, t.Object)
}
Пример #9
0
func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb TransferProgressCallback, authOkFunc func()) error {
	rel, ok := t.Object.Rel("upload")
	if !ok {
		return fmt.Errorf("No upload action for this object.")
	}

	req, err := httputil.NewHttpRequest("PUT", rel.Href, rel.Header)
	if err != nil {
		return err
	}

	if len(req.Header.Get("Content-Type")) == 0 {
		req.Header.Set("Content-Type", "application/octet-stream")
	}

	if req.Header.Get("Transfer-Encoding") == "chunked" {
		req.TransferEncoding = []string{"chunked"}
	} else {
		req.Header.Set("Content-Length", strconv.FormatInt(t.Object.Size, 10))
	}

	req.ContentLength = t.Object.Size

	f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644)
	if err != nil {
		return errors.Wrap(err, "basic upload")
	}
	defer f.Close()

	// Ensure progress callbacks made while uploading
	// Wrap callback to give name context
	ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error {
		if cb != nil {
			return cb(t.Name, totalSize, readSoFar, readSinceLast)
		}
		return nil
	}
	var reader io.Reader
	reader = &progress.CallbackReader{
		C:         ccb,
		TotalSize: t.Object.Size,
		Reader:    f,
	}

	// Signal auth was ok on first read; this frees up other workers to start
	if authOkFunc != nil {
		reader = newStartCallbackReader(reader, func(*startCallbackReader) {
			authOkFunc()
		})
	}

	req.Body = ioutil.NopCloser(reader)

	res, err := httputil.DoHttpRequest(config.Config, req, t.Object.NeedsAuth())
	if err != nil {
		return errors.NewRetriableError(err)
	}
	httputil.LogTransfer(config.Config, "lfs.data.upload", res)

	// A status code of 403 likely means that an authentication token for the
	// upload has expired. This can be safely retried.
	if res.StatusCode == 403 {
		err = errors.New("http: received status 403")
		return errors.NewRetriableError(err)
	}

	if res.StatusCode > 299 {
		return errors.Wrapf(nil, "Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode)
	}

	io.Copy(ioutil.Discard, res.Body)
	res.Body.Close()

	return api.VerifyUpload(config.Config, t.Object)
}
Пример #10
0
// download starts or resumes and download. Always closes dlFile if non-nil
func (a *basicDownloadAdapter) download(t *Transfer, cb TransferProgressCallback, authOkFunc func(), dlFile *os.File, fromByte int64, hash hash.Hash) error {
	if dlFile != nil {
		// ensure we always close dlFile. Note that this does not conflict with the
		// early close below, as close is idempotent.
		defer dlFile.Close()
	}

	rel, ok := t.Object.Rel("download")
	if !ok {
		return errors.New("Object not found on the server.")
	}

	req, err := httputil.NewHttpRequest("GET", rel.Href, rel.Header)
	if err != nil {
		return err
	}

	if fromByte > 0 {
		if dlFile == nil || hash == nil {
			return fmt.Errorf("Cannot restart %v from %d without a file & hash", t.Object.Oid, fromByte)
		}
		// We could just use a start byte, but since we know the length be specific
		req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", fromByte, t.Object.Size-1))
	}

	res, err := httputil.DoHttpRequest(config.Config, req, t.Object.NeedsAuth())
	if err != nil {
		// Special-case status code 416 () - fall back
		if fromByte > 0 && dlFile != nil && res.StatusCode == 416 {
			tracerx.Printf("xfer: server rejected resume download request for %q from byte %d; re-downloading from start", t.Object.Oid, fromByte)
			dlFile.Close()
			os.Remove(dlFile.Name())
			return a.download(t, cb, authOkFunc, nil, 0, nil)
		}
		return errors.NewRetriableError(err)
	}
	httputil.LogTransfer(config.Config, "lfs.data.download", res)
	defer res.Body.Close()

	// Range request must return 206 & content range to confirm
	if fromByte > 0 {
		rangeRequestOk := false
		var failReason string
		// check 206 and Content-Range, fall back if either not as expected
		if res.StatusCode == 206 {
			// Probably a successful range request, check Content-Range
			if rangeHdr := res.Header.Get("Content-Range"); rangeHdr != "" {
				regex := regexp.MustCompile(`bytes (\d+)\-.*`)
				match := regex.FindStringSubmatch(rangeHdr)
				if match != nil && len(match) > 1 {
					contentStart, _ := strconv.ParseInt(match[1], 10, 64)
					if contentStart == fromByte {
						rangeRequestOk = true
					} else {
						failReason = fmt.Sprintf("Content-Range start byte incorrect: %s expected %d", match[1], fromByte)
					}
				} else {
					failReason = fmt.Sprintf("badly formatted Content-Range header: %q", rangeHdr)
				}
			} else {
				failReason = "missing Content-Range header in response"
			}
		} else {
			failReason = fmt.Sprintf("expected status code 206, received %d", res.StatusCode)
		}
		if rangeRequestOk {
			tracerx.Printf("xfer: server accepted resume download request: %q from byte %d", t.Object.Oid, fromByte)
			advanceCallbackProgress(cb, t, fromByte)
		} else {
			// Abort resume, perform regular download
			tracerx.Printf("xfer: failed to resume download for %q from byte %d: %s. Re-downloading from start", t.Object.Oid, fromByte, failReason)
			dlFile.Close()
			os.Remove(dlFile.Name())
			if res.StatusCode == 200 {
				// If status code was 200 then server just ignored Range header and
				// sent everything. Don't re-request, use this one from byte 0
				dlFile = nil
				fromByte = 0
				hash = nil
			} else {
				// re-request needed
				return a.download(t, cb, authOkFunc, nil, 0, nil)
			}
		}
	}

	// Signal auth OK on success response, before starting download to free up
	// other workers immediately
	if authOkFunc != nil {
		authOkFunc()
	}

	var hasher *tools.HashingReader
	httpReader := tools.NewRetriableReader(res.Body)

	if fromByte > 0 && hash != nil {
		// pre-load hashing reader with previous content
		hasher = tools.NewHashingReaderPreloadHash(httpReader, hash)
	} else {
		hasher = tools.NewHashingReader(httpReader)
	}

	if dlFile == nil {
		// New file start
		dlFile, err = os.OpenFile(a.downloadFilename(t), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
		if err != nil {
			return err
		}
		defer dlFile.Close()
	}
	dlfilename := dlFile.Name()
	// Wrap callback to give name context
	ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error {
		if cb != nil {
			return cb(t.Name, totalSize, readSoFar+fromByte, readSinceLast)
		}
		return nil
	}
	written, err := tools.CopyWithCallback(dlFile, hasher, res.ContentLength, ccb)
	if err != nil {
		return errors.Wrapf(err, "cannot write data to tempfile %q", dlfilename)
	}
	if err := dlFile.Close(); err != nil {
		return fmt.Errorf("can't close tempfile %q: %v", dlfilename, err)
	}

	if actual := hasher.Hash(); actual != t.Object.Oid {
		return fmt.Errorf("Expected OID %s, got %s after %d bytes written", t.Object.Oid, actual, written)
	}

	return tools.RenameFileCopyPermissions(dlfilename, t.Path)
}