Beispiel #1
0
// downloadPart is an individual goroutine worker reading from the ch channel
// and performing a GetObject request on the data with a given byte range.
//
// If this is the first worker, this operation also resolves the total number
// of bytes to be read so that the worker manager knows when it is finished.
func (d *downloader) downloadPart(ch chan dlchunk) {
	defer d.wg.Done()

	for {
		chunk, ok := <-ch

		if !ok {
			break
		}

		if d.geterr() == nil {
			// Get the next byte range of data
			in := &s3.GetObjectInput{}
			awsutil.Copy(in, d.in)
			rng := fmt.Sprintf("bytes=%d-%d",
				chunk.start, chunk.start+chunk.size-1)
			in.Range = &rng

			resp, err := d.opts.S3.GetObject(in)
			if err != nil {
				d.seterr(err)
			} else {
				d.setTotalBytes(resp) // Set total if not yet set.

				n, err := io.Copy(chunk, resp.Body)
				resp.Body.Close()

				if err != nil {
					d.seterr(err)
				}
				d.incrwritten(n)
			}
		}
	}
}
Beispiel #2
0
// downloadChunk downloads the chunk froom s3
func (d *downloader) downloadChunk(chunk dlchunk) {
	if d.getErr() != nil {
		return
	}
	// Get the next byte range of data
	in := &s3.GetObjectInput{}
	awsutil.Copy(in, d.in)
	rng := fmt.Sprintf("bytes=%d-%d",
		chunk.start, chunk.start+chunk.size-1)
	in.Range = &rng

	req, resp := d.ctx.S3.GetObjectRequest(in)
	req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
	err := req.Send()

	if err != nil {
		d.setErr(err)
	} else {
		d.setTotalBytes(resp) // Set total if not yet set.

		n, err := io.Copy(&chunk, resp.Body)
		resp.Body.Close()

		if err != nil {
			d.setErr(err)
		}
		d.incrWritten(n)
	}
}
Beispiel #3
0
func TestCopyDifferentStructs(t *testing.T) {
	type SrcFoo struct {
		A                int
		B                []*string
		C                map[string]*int
		SrcUnique        string
		SameNameDiffType int
		unexportedPtr    *int
		ExportedPtr      *int
	}
	type DstFoo struct {
		A                int
		B                []*string
		C                map[string]*int
		DstUnique        int
		SameNameDiffType string
		unexportedPtr    *int
		ExportedPtr      *int
	}

	// Create the initial value
	str1 := "hello"
	str2 := "bye bye"
	int1 := 1
	int2 := 2
	f1 := &SrcFoo{
		A: 1,
		B: []*string{&str1, &str2},
		C: map[string]*int{
			"A": &int1,
			"B": &int2,
		},
		SrcUnique:        "unique",
		SameNameDiffType: 1,
		unexportedPtr:    &int1,
		ExportedPtr:      &int2,
	}

	// Do the copy
	var f2 DstFoo
	awsutil.Copy(&f2, f1)

	// Values are equal
	assert.Equal(t, f2.A, f1.A)
	assert.Equal(t, f2.B, f1.B)
	assert.Equal(t, f2.C, f1.C)
	assert.Equal(t, "unique", f1.SrcUnique)
	assert.Equal(t, 1, f1.SameNameDiffType)
	assert.Equal(t, 0, f2.DstUnique)
	assert.Equal(t, "", f2.SameNameDiffType)
	assert.Equal(t, int1, *f1.unexportedPtr)
	assert.Nil(t, f2.unexportedPtr)
	assert.Equal(t, int2, *f1.ExportedPtr)
	assert.Equal(t, int2, *f2.ExportedPtr)
}
Beispiel #4
0
// singlePart contains upload logic for uploading a single chunk via
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
	params := &s3.PutObjectInput{}
	awsutil.Copy(params, u.in)
	params.Body = buf

	req, _ := u.opts.S3.PutObjectRequest(params)
	if err := req.Send(); err != nil {
		return nil, err
	}

	url := req.HTTPRequest.URL.String()
	return &UploadOutput{Location: url}, nil
}
Beispiel #5
0
func TestCopyReader(t *testing.T) {
	var buf io.Reader = bytes.NewReader([]byte("hello world"))
	var r io.Reader
	awsutil.Copy(&r, buf)
	b, err := ioutil.ReadAll(r)
	assert.NoError(t, err)
	assert.Equal(t, []byte("hello world"), b)

	// empty bytes because this is not a deep copy
	b, err = ioutil.ReadAll(buf)
	assert.NoError(t, err)
	assert.Equal(t, []byte(""), b)
}
Beispiel #6
0
func TestCopyIgnoreNilMembers(t *testing.T) {
	type Foo struct {
		A *string
	}

	f := &Foo{}
	assert.Nil(t, f.A)

	var f2 Foo
	awsutil.Copy(&f2, f)
	assert.Nil(t, f2.A)

	fcopy := awsutil.CopyOf(f)
	f3 := fcopy.(*Foo)
	assert.Nil(t, f3.A)
}
Beispiel #7
0
// singlePart contains upload logic for uploading a single chunk via
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
	params := &s3.PutObjectInput{}
	awsutil.Copy(params, u.in)
	params.Body = buf

	req, out := u.ctx.S3.PutObjectRequest(params)
	req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
	if err := req.Send(); err != nil {
		return nil, err
	}

	url := req.HTTPRequest.URL.String()
	return &UploadOutput{
		Location:  url,
		VersionID: out.VersionId,
	}, nil
}
Beispiel #8
0
func TestCopyNestedWithUnexported(t *testing.T) {
	type Bar struct {
		a int
		B int
	}
	type Foo struct {
		A string
		B Bar
	}

	f1 := &Foo{A: "string", B: Bar{a: 1, B: 2}}

	var f2 Foo
	awsutil.Copy(&f2, f1)

	// Values match
	assert.Equal(t, f2.A, f1.A)
	assert.NotEqual(t, f2.B, f1.B)
	assert.NotEqual(t, f2.B.a, f1.B.a)
	assert.Equal(t, f2.B.B, f2.B.B)
}
Beispiel #9
0
func TestCopy(t *testing.T) {
	type Foo struct {
		A int
		B []*string
		C map[string]*int
	}

	// Create the initial value
	str1 := "hello"
	str2 := "bye bye"
	int1 := 1
	int2 := 2
	f1 := &Foo{
		A: 1,
		B: []*string{&str1, &str2},
		C: map[string]*int{
			"A": &int1,
			"B": &int2,
		},
	}

	// Do the copy
	var f2 Foo
	awsutil.Copy(&f2, f1)

	// Values are equal
	assert.Equal(t, f2.A, f1.A)
	assert.Equal(t, f2.B, f1.B)
	assert.Equal(t, f2.C, f1.C)

	// But pointers are not!
	str3 := "nothello"
	int3 := 57
	f2.A = 100
	f2.B[0] = &str3
	f2.C["B"] = &int3
	assert.NotEqual(t, f2.A, f1.A)
	assert.NotEqual(t, f2.B, f1.B)
	assert.NotEqual(t, f2.C, f1.C)
}
Beispiel #10
0
func ExampleCopy() {
	type Foo struct {
		A int
		B []*string
	}

	// Create the initial value
	str1 := "hello"
	str2 := "bye bye"
	f1 := &Foo{A: 1, B: []*string{&str1, &str2}}

	// Do the copy
	var f2 Foo
	awsutil.Copy(&f2, f1)

	// Print the result
	fmt.Println(awsutil.Prettify(f2))

	// Output:
	// {
	//   A: 1,
	//   B: ["hello","bye bye"]
	// }
}
Beispiel #11
0
// downloadChunk downloads the chunk froom s3
func (d *downloader) downloadChunk(chunk dlchunk) error {
	in := &s3.GetObjectInput{}
	awsutil.Copy(in, d.in)

	// Get the next byte range of data
	rng := fmt.Sprintf("bytes=%d-%d", chunk.start, chunk.start+chunk.size-1)
	in.Range = &rng

	var n int64
	var err error
	for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
		req, resp := d.ctx.S3.GetObjectRequest(in)
		req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))

		err = req.Send()
		if err != nil {
			return err
		}
		d.setTotalBytes(resp) // Set total if not yet set.

		n, err = io.Copy(&chunk, resp.Body)
		resp.Body.Close()
		if err == nil {
			break
		}

		chunk.cur = 0
		logMessage(d.ctx.S3, aws.LogDebugWithRequestRetries,
			fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d",
				aws.StringValue(in.Key), err, retry))
	}

	d.incrWritten(n)

	return err
}
Beispiel #12
0
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
	params := &s3.CreateMultipartUploadInput{}
	awsutil.Copy(params, u.in)

	// Create the multipart
	req, resp := u.ctx.S3.CreateMultipartUploadRequest(params)
	req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
	if err := req.Send(); err != nil {
		return nil, err
	}
	u.uploadID = *resp.UploadId

	// Create the workers
	ch := make(chan chunk, u.ctx.Concurrency)
	for i := 0; i < u.ctx.Concurrency; i++ {
		u.wg.Add(1)
		go u.readChunk(ch)
	}

	// Send part 1 to the workers
	var num int64 = 1
	ch <- chunk{buf: firstBuf, num: num}

	// Read and queue the rest of the parts
	var err error
	for u.geterr() == nil && err == nil {
		num++
		// This upload exceeded maximum number of supported parts, error now.
		if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) {
			var msg string
			if num > int64(u.ctx.MaxUploadParts) {
				msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
					u.ctx.MaxUploadParts)
			} else {
				msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
					MaxUploadParts)
			}
			u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
			break
		}

		var reader io.ReadSeeker
		var nextChunkLen int
		reader, nextChunkLen, err = u.nextReader()

		if err != nil && err != io.EOF {
			u.seterr(awserr.New(
				"ReadRequestBody",
				"read multipart upload data failed",
				err))
			break
		}

		if nextChunkLen == 0 {
			// No need to upload empty part, if file was empty to start
			// with empty single part would of been created and never
			// started multipart upload.
			break
		}

		ch <- chunk{buf: reader, num: num}
	}

	// Close the channel, wait for workers, and complete upload
	close(ch)
	u.wg.Wait()
	complete := u.complete()

	if err := u.geterr(); err != nil {
		return nil, &multiUploadError{
			awsError: awserr.New(
				"MultipartUpload",
				"upload multipart failed",
				err),
			uploadID: u.uploadID,
		}
	}
	return &UploadOutput{
		Location:  aws.StringValue(complete.Location),
		VersionID: complete.VersionId,
		UploadID:  u.uploadID,
	}, nil
}
Beispiel #13
0
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
	params := &s3.CreateMultipartUploadInput{}
	awsutil.Copy(params, u.in)

	// Create the multipart
	resp, err := u.ctx.S3.CreateMultipartUpload(params)
	if err != nil {
		return nil, err
	}
	u.uploadID = *resp.UploadId

	// Create the workers
	ch := make(chan chunk, u.ctx.Concurrency)
	for i := 0; i < u.ctx.Concurrency; i++ {
		u.wg.Add(1)
		go u.readChunk(ch)
	}

	// Send part 1 to the workers
	var num int64 = 1
	ch <- chunk{buf: firstBuf, num: num}

	// Read and queue the rest of the parts
	for u.geterr() == nil {
		// This upload exceeded maximum number of supported parts, error now.
		if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) {
			var msg string
			if num > int64(u.ctx.MaxUploadParts) {
				msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
					u.ctx.MaxUploadParts)
			} else {
				msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
					MaxUploadParts)
			}
			u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
			break
		}
		num++

		buf, err := u.nextReader()
		if err == io.EOF {
			break
		}

		ch <- chunk{buf: buf, num: num}

		if err != nil && err != io.ErrUnexpectedEOF {
			u.seterr(awserr.New(
				"ReadRequestBody",
				"read multipart upload data failed",
				err))
			break
		}
	}

	// Close the channel, wait for workers, and complete upload
	close(ch)
	u.wg.Wait()
	complete := u.complete()

	if err := u.geterr(); err != nil {
		return nil, &multiUploadError{
			awsError: awserr.New(
				"MultipartUpload",
				"upload multipart failed",
				err),
			uploadID: u.uploadID,
		}
	}
	return &UploadOutput{
		Location:  *complete.Location,
		VersionID: complete.VersionId,
		UploadID:  u.uploadID,
	}, nil
}
Beispiel #14
0
func TestCopy1(t *testing.T) {
	type Bar struct {
		a *int
		B *int
		c int
		D int
	}
	type Foo struct {
		A int
		B []*string
		C map[string]*int
		D *time.Time
		E *Bar
	}

	// Create the initial value
	str1 := "hello"
	str2 := "bye bye"
	int1 := 1
	int2 := 2
	intPtr1 := 1
	intPtr2 := 2
	now := time.Now()
	f1 := &Foo{
		A: 1,
		B: []*string{&str1, &str2},
		C: map[string]*int{
			"A": &int1,
			"B": &int2,
		},
		D: &now,
		E: &Bar{
			&intPtr1,
			&intPtr2,
			2,
			3,
		},
	}

	// Do the copy
	var f2 Foo
	awsutil.Copy(&f2, f1)

	// Values are equal
	assert.Equal(t, f2.A, f1.A)
	assert.Equal(t, f2.B, f1.B)
	assert.Equal(t, f2.C, f1.C)
	assert.Equal(t, f2.D, f1.D)
	assert.Equal(t, f2.E.B, f1.E.B)
	assert.Equal(t, f2.E.D, f1.E.D)

	// But pointers are not!
	str3 := "nothello"
	int3 := 57
	f2.A = 100
	*f2.B[0] = str3
	*f2.C["B"] = int3
	*f2.D = time.Now()
	f2.E.a = &int3
	*f2.E.B = int3
	f2.E.c = 5
	f2.E.D = 5
	assert.NotEqual(t, f2.A, f1.A)
	assert.NotEqual(t, f2.B, f1.B)
	assert.NotEqual(t, f2.C, f1.C)
	assert.NotEqual(t, f2.D, f1.D)
	assert.NotEqual(t, f2.E.a, f1.E.a)
	assert.NotEqual(t, f2.E.B, f1.E.B)
	assert.NotEqual(t, f2.E.c, f1.E.c)
	assert.NotEqual(t, f2.E.D, f1.E.D)
}
Beispiel #15
0
func TestCopyNil(t *testing.T) {
	var s string
	awsutil.Copy(&s, nil)
	assert.Equal(t, "", s)
}
Beispiel #16
0
func TestCopyPrimitive(t *testing.T) {
	str := "hello"
	var s string
	awsutil.Copy(&s, &str)
	assert.Equal(t, "hello", s)
}