func NewBatchRequest(operation string) (*http.Request, error) { endpoint := config.Config.Endpoint(operation) res, err := auth.SshAuthenticate(endpoint, operation, "") if err != nil { tracerx.Printf("ssh: %s attempted with %s. Error: %s", operation, endpoint.SshUserAndHost, err.Error(), ) return nil, err } if len(res.Href) > 0 { endpoint.Url = res.Href } u, err := ObjectUrl(endpoint, "batch") if err != nil { return nil, err } req, err := httputil.NewHttpRequest("POST", u.String(), nil) if err != nil { return nil, err } req.Header.Set("Accept", MediaType) if res.Header != nil { for key, value := range res.Header { req.Header.Set(key, value) } } return req, nil }
func NewRequest(cfg *config.Configuration, method, oid string) (*http.Request, error) { objectOid := oid operation := "download" if method == "POST" { if oid != "batch" { objectOid = "" operation = "upload" } } res, endpoint, err := auth.SshAuthenticate(cfg, operation, oid) if err != nil { tracerx.Printf("ssh: attempted with %s. Error: %s", endpoint.SshUserAndHost, err.Error(), ) return nil, err } if len(res.Href) > 0 { endpoint.Url = res.Href } u, err := ObjectUrl(endpoint, objectOid) if err != nil { return nil, err } req, err := httputil.NewHttpRequest(method, u.String(), res.Header) if err != nil { return nil, err } req.Header.Set("Accept", MediaType) return req, nil }
func performDownload(oid string, size int64, a *action, writer, errWriter *bufio.Writer) { // We just use the URLs we're given, so we're just a proxy for the direct method // but this is enough to test intermediate custom adapters req, err := httputil.NewHttpRequest("GET", a.Href, a.Header) if err != nil { sendTransferError(oid, 2, err.Error(), writer, errWriter) return } res, err := httputil.DoHttpRequest(cfg, req, true) if err != nil { sendTransferError(oid, res.StatusCode, err.Error(), writer, errWriter) return } defer res.Body.Close() dlFile, err := ioutil.TempFile("", "lfscustomdl") if err != nil { sendTransferError(oid, 3, err.Error(), writer, errWriter) return } defer dlFile.Close() dlfilename := dlFile.Name() // Turn callback into progress messages cb := func(totalSize int64, readSoFar int64, readSinceLast int) error { sendProgress(oid, readSoFar, readSinceLast, writer, errWriter) return nil } _, err = tools.CopyWithCallback(dlFile, res.Body, res.ContentLength, cb) if err != nil { sendTransferError(oid, 4, fmt.Sprintf("cannot write data to tempfile %q: %v", dlfilename, err), writer, errWriter) os.Remove(dlfilename) return } if err := dlFile.Close(); err != nil { sendTransferError(oid, 5, fmt.Sprintf("can't close tempfile %q: %v", dlfilename, err), writer, errWriter) os.Remove(dlfilename) return } // completed complete := &transferResponse{"complete", oid, dlfilename, nil} err = sendResponse(complete, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send completion message: %v\n", err), errWriter) } }
// TODO LEGACY API: remove when legacy API removed func (o *ObjectResource) NewRequest(relation, method string) (*http.Request, error) { rel, ok := o.Rel(relation) if !ok { if relation == "download" { return nil, errors.New("Object not found on the server.") } return nil, fmt.Errorf("No %q action for this object.", relation) } req, err := httputil.NewHttpRequest(method, rel.Href, rel.Header) if err != nil { return nil, err } return req, nil }
func performUpload(oid string, size int64, a *action, fromPath string, writer, errWriter *bufio.Writer) { // We just use the URLs we're given, so we're just a proxy for the direct method // but this is enough to test intermediate custom adapters req, err := httputil.NewHttpRequest("PUT", a.Href, a.Header) if err != nil { sendTransferError(oid, 2, err.Error(), writer, errWriter) return } if len(req.Header.Get("Content-Type")) == 0 { req.Header.Set("Content-Type", "application/octet-stream") } if req.Header.Get("Transfer-Encoding") == "chunked" { req.TransferEncoding = []string{"chunked"} } else { req.Header.Set("Content-Length", strconv.FormatInt(size, 10)) } req.ContentLength = size f, err := os.OpenFile(fromPath, os.O_RDONLY, 0644) if err != nil { sendTransferError(oid, 3, fmt.Sprintf("Cannot read data from %q: %v", fromPath, err), writer, errWriter) return } defer f.Close() // Turn callback into progress messages cb := func(totalSize int64, readSoFar int64, readSinceLast int) error { sendProgress(oid, readSoFar, readSinceLast, writer, errWriter) return nil } var reader io.Reader reader = &progress.CallbackReader{ C: cb, TotalSize: size, Reader: f, } req.Body = ioutil.NopCloser(reader) res, err := httputil.DoHttpRequest(cfg, req, true) if err != nil { sendTransferError(oid, res.StatusCode, fmt.Sprintf("Error uploading data for %s: %v", oid, err), writer, errWriter) return } if res.StatusCode > 299 { sendTransferError(oid, res.StatusCode, fmt.Sprintf("Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode), writer, errWriter) return } io.Copy(ioutil.Discard, res.Body) res.Body.Close() // completed complete := &transferResponse{"complete", oid, "", nil} err = sendResponse(complete, writer, errWriter) if err != nil { writeToStderr(fmt.Sprintf("Unable to send completion message: %v\n", err), errWriter) } }
// download starts or resumes and download. Always closes dlFile if non-nil func (a *basicDownloadAdapter) download(t *Transfer, cb TransferProgressCallback, authOkFunc func(), dlFile *os.File, fromByte int64, hash hash.Hash) error { if dlFile != nil { // ensure we always close dlFile. Note that this does not conflict with the // early close below, as close is idempotent. defer dlFile.Close() } rel, ok := t.Object.Rel("download") if !ok { return errors.New("Object not found on the server.") } req, err := httputil.NewHttpRequest("GET", rel.Href, rel.Header) if err != nil { return err } if fromByte > 0 { if dlFile == nil || hash == nil { return fmt.Errorf("Cannot restart %v from %d without a file & hash", t.Object.Oid, fromByte) } // We could just use a start byte, but since we know the length be specific req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", fromByte, t.Object.Size-1)) } res, err := httputil.DoHttpRequest(req, true) if err != nil { // Special-case status code 416 () - fall back if fromByte > 0 && dlFile != nil && res.StatusCode == 416 { tracerx.Printf("xfer: server rejected resume download request for %q from byte %d; re-downloading from start", t.Object.Oid, fromByte) dlFile.Close() os.Remove(dlFile.Name()) return a.download(t, cb, authOkFunc, nil, 0, nil) } return errutil.NewRetriableError(err) } httputil.LogTransfer("lfs.data.download", res) defer res.Body.Close() // Range request must return 206 & content range to confirm if fromByte > 0 { rangeRequestOk := false var failReason string // check 206 and Content-Range, fall back if either not as expected if res.StatusCode == 206 { // Probably a successful range request, check Content-Range if rangeHdr := res.Header.Get("Content-Range"); rangeHdr != "" { regex := regexp.MustCompile(`bytes (\d+)\-.*`) match := regex.FindStringSubmatch(rangeHdr) if match != nil && len(match) > 1 { contentStart, _ := strconv.ParseInt(match[1], 10, 64) if contentStart == fromByte { rangeRequestOk = true } else { failReason = fmt.Sprintf("Content-Range start byte incorrect: %s expected %d", match[1], fromByte) } } else { failReason = fmt.Sprintf("badly formatted Content-Range header: %q", rangeHdr) } } else { failReason = "missing Content-Range header in response" } } else { failReason = fmt.Sprintf("expected status code 206, received %d", res.StatusCode) } if rangeRequestOk { tracerx.Printf("xfer: server accepted resume download request: %q from byte %d", t.Object.Oid, fromByte) // Advance progress callback; must split into max int sizes though if cb != nil { const maxInt = int(^uint(0) >> 1) for read := int64(0); read < fromByte; { remainder := fromByte - read if remainder > int64(maxInt) { read += int64(maxInt) cb(t.Name, t.Object.Size, read, maxInt) } else { read += remainder cb(t.Name, t.Object.Size, read, int(remainder)) } } } } else { // Abort resume, perform regular download tracerx.Printf("xfer: failed to resume download for %q from byte %d: %s. Re-downloading from start", t.Object.Oid, fromByte, failReason) dlFile.Close() os.Remove(dlFile.Name()) if res.StatusCode == 200 { // If status code was 200 then server just ignored Range header and // sent everything. Don't re-request, use this one from byte 0 dlFile = nil fromByte = 0 hash = nil } else { // re-request needed return a.download(t, cb, authOkFunc, nil, 0, nil) } } } // Signal auth OK on success response, before starting download to free up // other workers immediately if authOkFunc != nil { authOkFunc() } var hasher *tools.HashingReader if fromByte > 0 && hash != nil { // pre-load hashing reader with previous content hasher = tools.NewHashingReaderPreloadHash(res.Body, hash) } else { hasher = tools.NewHashingReader(res.Body) } if dlFile == nil { // New file start dlFile, err = os.OpenFile(a.downloadFilename(t), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return err } defer dlFile.Close() } dlfilename := dlFile.Name() // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar+fromByte, readSinceLast) } return nil } written, err := tools.CopyWithCallback(dlFile, hasher, res.ContentLength, ccb) if err != nil { return fmt.Errorf("cannot write data to tempfile %q: %v", dlfilename, err) } if err := dlFile.Close(); err != nil { return fmt.Errorf("can't close tempfile %q: %v", dlfilename, err) } if actual := hasher.Hash(); actual != t.Object.Oid { return fmt.Errorf("Expected OID %s, got %s after %d bytes written", t.Object.Oid, actual, written) } return tools.RenameFileCopyPermissions(dlfilename, t.Path) }
func (a *basicUploadAdapter) DoTransfer(t *Transfer, cb TransferProgressCallback, authOkFunc func()) error { rel, ok := t.Object.Rel("upload") if !ok { return fmt.Errorf("No upload action for this object.") } req, err := httputil.NewHttpRequest("PUT", rel.Href, rel.Header) if err != nil { return err } if len(req.Header.Get("Content-Type")) == 0 { req.Header.Set("Content-Type", "application/octet-stream") } if req.Header.Get("Transfer-Encoding") == "chunked" { req.TransferEncoding = []string{"chunked"} } else { req.Header.Set("Content-Length", strconv.FormatInt(t.Object.Size, 10)) } req.ContentLength = t.Object.Size f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644) if err != nil { return errutil.Error(err) } defer f.Close() // Ensure progress callbacks made while uploading // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar, readSinceLast) } return nil } var reader io.Reader reader = &progress.CallbackReader{ C: ccb, TotalSize: t.Object.Size, Reader: f, } // Signal auth was ok on first read; this frees up other workers to start if authOkFunc != nil { reader = newStartCallbackReader(reader, func(*startCallbackReader) { authOkFunc() }) } req.Body = ioutil.NopCloser(reader) res, err := httputil.DoHttpRequest(req, true) if err != nil { return errutil.NewRetriableError(err) } httputil.LogTransfer("lfs.data.upload", res) // A status code of 403 likely means that an authentication token for the // upload has expired. This can be safely retried. if res.StatusCode == 403 { return errutil.NewRetriableError(err) } if res.StatusCode > 299 { return errutil.Errorf(nil, "Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode) } io.Copy(ioutil.Discard, res.Body) res.Body.Close() return api.VerifyUpload(t.Object) }
func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb TransferProgressCallback, authOkFunc func()) error { rel, ok := t.Object.Rel("upload") if !ok { return fmt.Errorf("No upload action for this object.") } // Note not supporting the Creation extension since the batch API generates URLs // Also not supporting Concatenation to support parallel uploads of chunks; forward only // 1. Send HEAD request to determine upload start point // Request must include Tus-Resumable header (version) tracerx.Printf("xfer: sending tus.io HEAD request for %q", t.Object.Oid) req, err := httputil.NewHttpRequest("HEAD", rel.Href, rel.Header) if err != nil { return err } req.Header.Set("Tus-Resumable", TusVersion) res, err := httputil.DoHttpRequest(config.Config, req, false) if err != nil { return errors.NewRetriableError(err) } // Response will contain Upload-Offset if supported offHdr := res.Header.Get("Upload-Offset") if len(offHdr) == 0 { return fmt.Errorf("Missing Upload-Offset header from tus.io HEAD response at %q, contact server admin", rel.Href) } offset, err := strconv.ParseInt(offHdr, 10, 64) if err != nil || offset < 0 { return fmt.Errorf("Invalid Upload-Offset value %q in response from tus.io HEAD at %q, contact server admin", offHdr, rel.Href) } // Upload-Offset=size means already completed (skip) // Batch API will probably already detect this, but handle just in case if offset >= t.Object.Size { tracerx.Printf("xfer: tus.io HEAD offset %d indicates %q is already fully uploaded, skipping", offset, t.Object.Oid) advanceCallbackProgress(cb, t, t.Object.Size) return nil } // Open file for uploading f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644) if err != nil { return errors.Wrap(err, "tus upload") } defer f.Close() // Upload-Offset=0 means start from scratch, but still send PATCH if offset == 0 { tracerx.Printf("xfer: tus.io uploading %q from start", t.Object.Oid) } else { tracerx.Printf("xfer: tus.io resuming upload %q from %d", t.Object.Oid, offset) advanceCallbackProgress(cb, t, offset) _, err := f.Seek(offset, os.SEEK_CUR) if err != nil { return errors.Wrap(err, "tus upload") } } // 2. Send PATCH request with byte start point (even if 0) in Upload-Offset // Response status must be 204 // Response Upload-Offset must be request Upload-Offset plus sent bytes // Response may include Upload-Expires header in which case check not passed tracerx.Printf("xfer: sending tus.io PATCH request for %q", t.Object.Oid) req, err = httputil.NewHttpRequest("PATCH", rel.Href, rel.Header) if err != nil { return err } req.Header.Set("Tus-Resumable", TusVersion) req.Header.Set("Upload-Offset", strconv.FormatInt(offset, 10)) req.Header.Set("Content-Type", "application/offset+octet-stream") req.Header.Set("Content-Length", strconv.FormatInt(t.Object.Size-offset, 10)) req.ContentLength = t.Object.Size - offset // Ensure progress callbacks made while uploading // Wrap callback to give name context ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { if cb != nil { return cb(t.Name, totalSize, readSoFar, readSinceLast) } return nil } var reader io.Reader reader = &progress.CallbackReader{ C: ccb, TotalSize: t.Object.Size, Reader: f, } // Signal auth was ok on first read; this frees up other workers to start if authOkFunc != nil { reader = newStartCallbackReader(reader, func(*startCallbackReader) { authOkFunc() }) } req.Body = ioutil.NopCloser(reader) res, err = httputil.DoHttpRequest(config.Config, req, false) if err != nil { return errors.NewRetriableError(err) } httputil.LogTransfer(config.Config, "lfs.data.upload", res) // A status code of 403 likely means that an authentication token for the // upload has expired. This can be safely retried. if res.StatusCode == 403 { err = errors.New("http: received status 403") return errors.NewRetriableError(err) } if res.StatusCode > 299 { return errors.Wrapf(nil, "Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode) } io.Copy(ioutil.Discard, res.Body) res.Body.Close() return api.VerifyUpload(config.Config, t.Object) }