コード例 #1
0
ファイル: download.go プロジェクト: bbc/mozart-api-common
// downloadPart is an individual goroutine worker reading from the ch channel
// and performing a GetObject request on the data with a given byte range.
//
// If this is the first worker, this operation also resolves the total number
// of bytes to be read so that the worker manager knows when it is finished.
func (d *downloader) downloadPart(ch chan dlchunk) {
	defer d.wg.Done()

	for {
		chunk, ok := <-ch

		if !ok {
			break
		}

		if d.geterr() == nil {
			// Get the next byte range of data
			in := &s3.GetObjectInput{}
			awsutil.Copy(in, d.in)
			rng := fmt.Sprintf("bytes=%d-%d",
				chunk.start, chunk.start+chunk.size-1)
			in.Range = &rng

			resp, err := d.opts.S3.GetObject(in)
			if err != nil {
				d.seterr(err)
			} else {
				d.setTotalBytes(resp) // Set total if not yet set.

				n, err := io.Copy(&chunk, resp.Body)
				resp.Body.Close()

				if err != nil {
					d.seterr(err)
				}
				d.incrwritten(n)
			}
		}
	}
}
コード例 #2
0
ファイル: upload.go プロジェクト: bbc/mozart-api-common
// singlePart contains upload logic for uploading a single chunk via
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
	params := &s3.PutObjectInput{}
	awsutil.Copy(params, u.in)
	params.Body = buf

	req, out := u.opts.S3.PutObjectRequest(params)
	if err := req.Send(); err != nil {
		return nil, err
	}

	url := req.HTTPRequest.URL.String()
	return &UploadOutput{
		Location:  url,
		VersionID: out.VersionId,
	}, nil
}
コード例 #3
0
ファイル: upload.go プロジェクト: bbc/mozart-api-common
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
	params := &s3.CreateMultipartUploadInput{}
	awsutil.Copy(params, u.in)

	// Create the multipart
	resp, err := u.opts.S3.CreateMultipartUpload(params)
	if err != nil {
		return nil, err
	}
	u.uploadID = *resp.UploadId

	// Create the workers
	ch := make(chan chunk, u.opts.Concurrency)
	for i := 0; i < u.opts.Concurrency; i++ {
		u.wg.Add(1)
		go u.readChunk(ch)
	}

	// Send part 1 to the workers
	var num int64 = 1
	ch <- chunk{buf: firstBuf, num: num}

	// Read and queue the rest of the parts
	for u.geterr() == nil {
		// This upload exceeded maximum number of supported parts, error now.
		if num > int64(MaxUploadParts) {
			msg := fmt.Sprintf("exceeded total allowed parts (%d). "+
				"Adjust PartSize to fit in this limit", MaxUploadParts)
			u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
			break
		}

		num++

		buf, err := u.nextReader()
		if err == io.EOF {
			break
		}

		ch <- chunk{buf: buf, num: num}

		if err != nil && err != io.ErrUnexpectedEOF {
			u.seterr(awserr.New(
				"ReadRequestBody",
				"read multipart upload data failed",
				err))
			break
		}
	}

	// Close the channel, wait for workers, and complete upload
	close(ch)
	u.wg.Wait()
	complete := u.complete()

	if err := u.geterr(); err != nil {
		return nil, &multiUploadError{
			awsError: awserr.New(
				"MultipartUpload",
				"upload multipart failed",
				err),
			uploadID: u.uploadID,
		}
	}
	return &UploadOutput{
		Location:  *complete.Location,
		VersionID: complete.VersionId,
		UploadID:  u.uploadID,
	}, nil
}