Exemple #1
0
// downloadChunk downloads the chunk froom s3
func (d *downloader) downloadChunk(chunk dlchunk) {
	if d.getErr() != nil {
		return
	}
	// Get the next byte range of data
	in := &s3.GetObjectInput{}
	awsutil.Copy(in, d.in)
	rng := fmt.Sprintf("bytes=%d-%d",
		chunk.start, chunk.start+chunk.size-1)
	in.Range = &rng

	req, resp := d.ctx.S3.GetObjectRequest(in)
	req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
	err := req.Send()

	if err != nil {
		d.setErr(err)
	} else {
		d.setTotalBytes(resp) // Set total if not yet set.

		n, err := io.Copy(&chunk, resp.Body)
		resp.Body.Close()

		if err != nil {
			d.setErr(err)
		}
		d.incrWritten(n)
	}
}
Exemple #2
0
func TestMakeAddtoUserAgentFreeFormHandler(t *testing.T) {
	fn := request.MakeAddToUserAgentFreeFormHandler("name/version (extra1; extra2)")
	r := &request.Request{HTTPRequest: &http.Request{Header: http.Header{}}}
	r.HTTPRequest.Header.Set("User-Agent", "foo/bar")
	fn(r)

	assert.Equal(t, "foo/bar name/version (extra1; extra2)", r.HTTPRequest.Header.Get("User-Agent"))
}
Exemple #3
0
// fail will abort the multipart unless LeavePartsOnError is set to true.
func (u *multiuploader) fail() {
	if u.ctx.LeavePartsOnError {
		return
	}

	req, _ := u.ctx.S3.AbortMultipartUploadRequest(&s3.AbortMultipartUploadInput{
		Bucket:   u.in.Bucket,
		Key:      u.in.Key,
		UploadId: &u.uploadID,
	})
	req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
	req.Send()
}
Exemple #4
0
// singlePart contains upload logic for uploading a single chunk via
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
	params := &s3.PutObjectInput{}
	awsutil.Copy(params, u.in)
	params.Body = buf

	req, out := u.ctx.S3.PutObjectRequest(params)
	req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
	if err := req.Send(); err != nil {
		return nil, err
	}

	url := req.HTTPRequest.URL.String()
	return &UploadOutput{
		Location:  url,
		VersionID: out.VersionId,
	}, nil
}
Exemple #5
0
// send performs an UploadPart request and keeps track of the completed
// part information.
func (u *multiuploader) send(c chunk) error {
	req, resp := u.ctx.S3.UploadPartRequest(&s3.UploadPartInput{
		Bucket:     u.in.Bucket,
		Key:        u.in.Key,
		Body:       c.buf,
		UploadId:   &u.uploadID,
		PartNumber: &c.num,
	})
	req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
	if err := req.Send(); err != nil {
		return err
	}

	n := c.num
	completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}

	u.m.Lock()
	u.parts = append(u.parts, completed)
	u.m.Unlock()

	return nil
}
Exemple #6
0
// complete successfully completes a multipart upload and returns the response.
func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
	if u.geterr() != nil {
		u.fail()
		return nil
	}

	// Parts must be sorted in PartNumber order.
	sort.Sort(u.parts)

	req, resp := u.ctx.S3.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
		Bucket:          u.in.Bucket,
		Key:             u.in.Key,
		UploadId:        &u.uploadID,
		MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
	})
	req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
	if err := req.Send(); err != nil {
		u.seterr(err)
		u.fail()
	}

	return resp
}
Exemple #7
0
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
	params := &s3.CreateMultipartUploadInput{}
	awsutil.Copy(params, u.in)

	// Create the multipart
	req, resp := u.ctx.S3.CreateMultipartUploadRequest(params)
	req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
	if err := req.Send(); err != nil {
		return nil, err
	}
	u.uploadID = *resp.UploadId

	// Create the workers
	ch := make(chan chunk, u.ctx.Concurrency)
	for i := 0; i < u.ctx.Concurrency; i++ {
		u.wg.Add(1)
		go u.readChunk(ch)
	}

	// Send part 1 to the workers
	var num int64 = 1
	ch <- chunk{buf: firstBuf, num: num}

	// Read and queue the rest of the parts
	for u.geterr() == nil {
		// This upload exceeded maximum number of supported parts, error now.
		if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) {
			var msg string
			if num > int64(u.ctx.MaxUploadParts) {
				msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
					u.ctx.MaxUploadParts)
			} else {
				msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
					MaxUploadParts)
			}
			u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
			break
		}
		num++

		buf, err := u.nextReader()
		if err == io.EOF {
			break
		}

		ch <- chunk{buf: buf, num: num}

		if err != nil && err != io.ErrUnexpectedEOF {
			u.seterr(awserr.New(
				"ReadRequestBody",
				"read multipart upload data failed",
				err))
			break
		}
	}

	// Close the channel, wait for workers, and complete upload
	close(ch)
	u.wg.Wait()
	complete := u.complete()

	if err := u.geterr(); err != nil {
		return nil, &multiUploadError{
			awsError: awserr.New(
				"MultipartUpload",
				"upload multipart failed",
				err),
			uploadID: u.uploadID,
		}
	}
	return &UploadOutput{
		Location:  *complete.Location,
		VersionID: complete.VersionId,
		UploadID:  u.uploadID,
	}, nil
}
Exemple #8
0
// Wait waits for an operation to complete, expire max attempts, or fail. Error
// is returned if the operation fails.
func (w *Waiter) Wait() error {
	client := reflect.ValueOf(w.Client)
	in := reflect.ValueOf(w.Input)
	method := client.MethodByName(w.Config.Operation + "Request")

	for i := 0; i < w.MaxAttempts; i++ {
		res := method.Call([]reflect.Value{in})
		req := res[0].Interface().(*request.Request)
		req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter"))

		err := req.Send()
		for _, a := range w.Acceptors {
			result := false
			var vals []interface{}
			switch a.Matcher {
			case "pathAll", "path":
				// Require all matches to be equal for result to match
				vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
				if len(vals) == 0 {
					break
				}
				result = true
				for _, val := range vals {
					if !awsutil.DeepEqual(val, a.Expected) {
						result = false
						break
					}
				}
			case "pathAny":
				// Only a single match needs to equal for the result to match
				vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
				for _, val := range vals {
					if awsutil.DeepEqual(val, a.Expected) {
						result = true
						break
					}
				}
			case "status":
				s := a.Expected.(int)
				result = s == req.HTTPResponse.StatusCode
			case "error":
				if aerr, ok := err.(awserr.Error); ok {
					result = aerr.Code() == a.Expected.(string)
				}
			case "pathList":
				// ignored matcher
			default:
				logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s",
					w.Config.Operation, a.Matcher)
			}

			if !result {
				// If there was no matching result found there is nothing more to do
				// for this response, retry the request.
				continue
			}

			switch a.State {
			case "success":
				// waiter completed
				return nil
			case "failure":
				// Waiter failure state triggered
				return awserr.New("ResourceNotReady",
					fmt.Sprintf("failed waiting for successful resource state"), err)
			case "retry":
				// clear the error and retry the operation
				err = nil
			default:
				logf(client, "WARNING: Waiter for %s encountered unexpected state: %s",
					w.Config.Operation, a.State)
			}
		}
		if err != nil {
			return err
		}

		time.Sleep(time.Second * time.Duration(w.Delay))
	}

	return awserr.New("ResourceNotReady",
		fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil)
}