예제 #1
0
// singlePart contains upload logic for uploading a single chunk via
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
	params := &s3.PutObjectInput{}
	awsutil.Copy(params, u.in)
	params.Body = buf

	req, _ := u.s.PutObjectRequest(params)
	if err := req.Send(); err != nil {
		return nil, err
	}

	url := req.HTTPRequest.URL.String()
	return &UploadOutput{Location: url}, nil
}
예제 #2
0
func TestCopyReader(t *testing.T) {
	var buf io.Reader = bytes.NewReader([]byte("hello world"))
	var r io.Reader
	awsutil.Copy(&r, buf)
	b, err := ioutil.ReadAll(r)
	assert.NoError(t, err)
	assert.Equal(t, []byte("hello world"), b)

	// empty bytes because this is not a deep copy
	b, err = ioutil.ReadAll(buf)
	assert.NoError(t, err)
	assert.Equal(t, []byte(""), b)
}
예제 #3
0
// initOptions will initialize all default values for UploadOptions.
func initOptions(o *UploadOptions) *UploadOptions {
	opts := &UploadOptions{}
	awsutil.Copy(opts, o)

	if opts == nil {
		opts = DefaultUploadOptions
	}
	if opts.Concurrency == 0 {
		opts.Concurrency = DefaultConcurrency
	}
	if opts.PartSize == 0 {
		opts.PartSize = DefaultPartSize
	}
	return opts
}
예제 #4
0
파일: copy_test.go 프로젝트: ninefive/confd
func TestCopyIgnoreNilMembers(t *testing.T) {
	type Foo struct {
		A *string
	}

	f := &Foo{}
	assert.Nil(t, f.A)

	var f2 Foo
	awsutil.Copy(&f2, f)
	assert.Nil(t, f2.A)

	fcopy := awsutil.CopyOf(f)
	f3 := fcopy.(*Foo)
	assert.Nil(t, f3.A)
}
예제 #5
0
func TestCopyDifferentStructs(t *testing.T) {
	type SrcFoo struct {
		A                int
		B                []*string
		C                map[string]*int
		SrcUnique        string
		SameNameDiffType int
	}
	type DstFoo struct {
		A                int
		B                []*string
		C                map[string]*int
		DstUnique        int
		SameNameDiffType string
	}

	// Create the initial value
	str1 := "hello"
	str2 := "bye bye"
	int1 := 1
	int2 := 2
	f1 := &SrcFoo{
		A: 1,
		B: []*string{&str1, &str2},
		C: map[string]*int{
			"A": &int1,
			"B": &int2,
		},
		SrcUnique:        "unique",
		SameNameDiffType: 1,
	}

	// Do the copy
	var f2 DstFoo
	awsutil.Copy(&f2, f1)

	// Values are equal
	assert.Equal(t, f2.A, f1.A)
	assert.Equal(t, f2.B, f1.B)
	assert.Equal(t, f2.C, f1.C)
	assert.Equal(t, "unique", f1.SrcUnique)
	assert.Equal(t, 1, f1.SameNameDiffType)
	assert.Equal(t, 0, f2.DstUnique)
	assert.Equal(t, "", f2.SameNameDiffType)
}
예제 #6
0
func TestCopy(t *testing.T) {
	type Foo struct {
		A int
		B []*string
		C map[string]*int
	}

	// Create the initial value
	str1 := "hello"
	str2 := "bye bye"
	int1 := 1
	int2 := 2
	f1 := &Foo{
		A: 1,
		B: []*string{&str1, &str2},
		C: map[string]*int{
			"A": &int1,
			"B": &int2,
		},
	}

	// Do the copy
	var f2 Foo
	awsutil.Copy(&f2, f1)

	// Values are equal
	assert.Equal(t, f2.A, f1.A)
	assert.Equal(t, f2.B, f1.B)
	assert.Equal(t, f2.C, f1.C)

	// But pointers are not!
	str3 := "nothello"
	int3 := 57
	f2.A = 100
	f2.B[0] = &str3
	f2.C["B"] = &int3
	assert.NotEqual(t, f2.A, f1.A)
	assert.NotEqual(t, f2.B, f1.B)
	assert.NotEqual(t, f2.C, f1.C)
}
func fillPresignedURL(r *aws.Request) {
	if !r.ParamsFilled() {
		return
	}

	params := r.Params.(*CopySnapshotInput)

	// Stop if PresignedURL/DestinationRegion is set
	if params.PresignedURL != nil || params.DestinationRegion != nil {
		return
	}

	// First generate a copy of parameters
	r.Params = awsutil.CopyOf(r.Params)
	params = r.Params.(*CopySnapshotInput)

	// Set destination region. Avoids infinite handler loop.
	// Also needed to sign sub-request.
	params.DestinationRegion = &r.Service.Config.Region

	// Create a new client pointing at source region.
	// We will use this to presign the CopySnapshot request against
	// the source region
	var config aws.Config
	awsutil.Copy(&config, r.Service.Config)
	config.Endpoint = ""
	config.Region = *params.SourceRegion
	client := New(&config)

	// Presign a CopySnapshot request with modified params
	req, _ := client.CopySnapshotRequest(params)
	url, err := req.Presign(300 * time.Second) // 5 minutes should be enough.

	if err != nil { // bubble error back up to original request
		r.Error = err
	}

	// We have our URL, set it on params
	params.PresignedURL = &url
}
예제 #8
0
func ExampleCopy() {
	type Foo struct {
		A int
		B []*string
	}

	// Create the initial value
	str1 := "hello"
	str2 := "bye bye"
	f1 := &Foo{A: 1, B: []*string{&str1, &str2}}

	// Do the copy
	var f2 Foo
	awsutil.Copy(&f2, f1)

	// Print the result
	fmt.Println(awsutil.StringValue(f2))

	// Output:
	// {
	//   A: 1,
	//   B: ["hello","bye bye"]
	// }
}
예제 #9
0
func TestCopyNil(t *testing.T) {
	var s string
	awsutil.Copy(&s, nil)
	assert.Equal(t, "", s)
}
예제 #10
0
func TestCopyPrimitive(t *testing.T) {
	str := "hello"
	var s string
	awsutil.Copy(&s, &str)
	assert.Equal(t, "hello", s)
}
예제 #11
0
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
	params := &s3.CreateMultipartUploadInput{}
	awsutil.Copy(params, u.in)

	// Create the multipart
	resp, err := u.s.CreateMultipartUpload(params)
	if err != nil {
		return nil, err
	}
	u.uploadID = *resp.UploadID

	// Create the workers
	ch := make(chan chunk, u.opts.Concurrency)
	for i := 0; i < u.opts.Concurrency; i++ {
		u.wg.Add(1)
		go u.readChunk(ch)
	}

	// Send part 1 to the workers
	var num int64 = 1
	ch <- chunk{buf: firstBuf, num: num}

	// Read and queue the rest of the parts
	for u.geterr() == nil {
		// This upload exceeded maximum number of supported parts, error now.
		if num > int64(MaxUploadParts) {
			msg := fmt.Sprintf("exceeded total allowed parts (%d). "+
				"Adjust PartSize to fit in this limit", MaxUploadParts)
			u.seterr(apierr.New("TotalPartsExceeded", msg, nil))
			break
		}

		num++

		buf, err := u.nextReader()
		if err == io.EOF {
			break
		}

		ch <- chunk{buf: buf, num: num}

		if err != nil && err != io.ErrUnexpectedEOF {
			u.seterr(apierr.New("ReadRequestBody", "read multipart upload data failed", err))
			break
		}
	}

	// Close the channel, wait for workers, and complete upload
	close(ch)
	u.wg.Wait()
	complete := u.complete()

	if err := u.geterr(); err != nil {
		var berr *apierr.BaseError
		switch t := err.(type) {
		case *apierr.BaseError:
			berr = t
		default:
			berr = apierr.New("MultipartUpload", "upload multipart failed", err)
		}
		return nil, &multiUploadError{
			BaseError: berr,
			uploadID:  u.uploadID,
		}
	}
	return &UploadOutput{
		Location: *complete.Location,
		UploadID: u.uploadID,
	}, nil
}