func TestDownloadError(t *testing.T) {
	s, names, _ := dlLoggingSvc([]byte{1, 2, 3})

	num := 0
	s.Handlers.Send.PushBack(func(r *request.Request) {
		num++
		if num > 1 {
			r.HTTPResponse.StatusCode = 400
			r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
		}
	})

	d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) {
		d.Concurrency = 1
		d.PartSize = 1
	})
	w := &aws.WriteAtBuffer{}
	n, err := d.Download(w, &s3.GetObjectInput{
		Bucket: aws.String("bucket"),
		Key:    aws.String("key"),
	})

	assert.NotNil(t, err)
	assert.Equal(t, int64(1), n)
	assert.Equal(t, []string{"GetObject", "GetObject"}, *names)
	assert.Equal(t, []byte{1}, w.Bytes())
}
Example #2
0
// Get fetches a blob from an S3 compatible blobstore
// Destination will be overwritten if exists
func (client *S3Blobstore) Get(src string, dest io.WriterAt) error {
	downloader := s3manager.NewDownloaderWithClient(client.s3Client)

	_, err := downloader.Download(dest, &s3.GetObjectInput{
		Bucket: aws.String(client.s3cliConfig.BucketName),
		Key:    aws.String(src),
	})

	if err != nil {
		return err
	}

	return nil
}
func TestDownloadZero(t *testing.T) {
	s, names, ranges := dlLoggingSvc([]byte{})

	d := s3manager.NewDownloaderWithClient(s)
	w := &aws.WriteAtBuffer{}
	n, err := d.Download(w, &s3.GetObjectInput{
		Bucket: aws.String("bucket"),
		Key:    aws.String("key"),
	})

	assert.Nil(t, err)
	assert.Equal(t, int64(0), n)
	assert.Equal(t, []string{"GetObject"}, *names)
	assert.Equal(t, []string{"bytes=0-5242879"}, *ranges)
}
func TestDownloadSetPartSize(t *testing.T) {
	s, names, ranges := dlLoggingSvc([]byte{1, 2, 3})

	d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) {
		d.Concurrency = 1
		d.PartSize = 1
	})
	w := &aws.WriteAtBuffer{}
	n, err := d.Download(w, &s3.GetObjectInput{
		Bucket: aws.String("bucket"),
		Key:    aws.String("key"),
	})

	assert.Nil(t, err)
	assert.Equal(t, int64(3), n)
	assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names)
	assert.Equal(t, []string{"bytes=0-0", "bytes=1-1", "bytes=2-2"}, *ranges)
	assert.Equal(t, []byte{1, 2, 3}, w.Bytes())
}
Example #5
0
func TestDownloadPartBodyRetry_FailRetry(t *testing.T) {
	s, names := dlLoggingSvcWithErrReader([]testErrReader{
		{Buf: []byte("ab"), Len: 3, Err: io.ErrUnexpectedEOF},
	})

	d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) {
		d.Concurrency = 1
	})

	w := &aws.WriteAtBuffer{}
	n, err := d.Download(w, &s3.GetObjectInput{
		Bucket: aws.String("bucket"),
		Key:    aws.String("key"),
	})

	assert.Error(t, err)
	assert.Equal(t, int64(2), n)
	assert.Equal(t, []string{"GetObject"}, *names)
	assert.Equal(t, []byte("ab"), w.Bytes())
}
func TestDownloadContentRangeTotalAny(t *testing.T) {
	s, names := dlLoggingSvcContentRangeTotalAny(buf2MB, []int{200, 416})

	d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) {
		d.Concurrency = 1
	})
	w := &aws.WriteAtBuffer{}
	n, err := d.Download(w, &s3.GetObjectInput{
		Bucket: aws.String("bucket"),
		Key:    aws.String("key"),
	})

	assert.Nil(t, err)
	assert.Equal(t, int64(len(buf2MB)), n)
	assert.Equal(t, []string{"GetObject", "GetObject"}, *names)

	count := 0
	for _, b := range w.Bytes() {
		count += int(b)
	}
	assert.Equal(t, 0, count)
}
func TestDownloadOrder(t *testing.T) {
	s, names, ranges := dlLoggingSvc(buf12MB)

	d := s3manager.NewDownloaderWithClient(s, func(d *s3manager.Downloader) {
		d.Concurrency = 1
	})
	w := &aws.WriteAtBuffer{}
	n, err := d.Download(w, &s3.GetObjectInput{
		Bucket: aws.String("bucket"),
		Key:    aws.String("key"),
	})

	assert.Nil(t, err)
	assert.Equal(t, int64(len(buf12MB)), n)
	assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names)
	assert.Equal(t, []string{"bytes=0-5242879", "bytes=5242880-10485759", "bytes=10485760-15728639"}, *ranges)

	count := 0
	for _, b := range w.Bytes() {
		count += int(b)
	}
	assert.Equal(t, 0, count)
}
Example #8
0
// Pull imports docker image from tar artifact stored on S3
func (s *StorageS3) Pull(name string) error {
	img := imagename.NewFromString(name)

	if img.Storage != imagename.StorageS3 {
		return fmt.Errorf("Can only pull images with s3 storage specified, got: %s", img)
	}

	if img.Registry == "" {
		return fmt.Errorf("Cannot pull image from S3, missing bucket name, got: %s", img)
	}

	// TODO: here we use tmp file, but we can stream from S3 directly to Docker
	tmpf, err := ioutil.TempFile("", "rocker_image_")
	if err != nil {
		return err
	}
	defer os.Remove(tmpf.Name())

	var (
		// Create a downloader with the s3 client and custom options
		downloader = s3manager.NewDownloaderWithClient(s.s3, func(d *s3manager.Downloader) {
			d.PartSize = 64 * 1024 * 1024 // 64MB per part
		})

		imgPath = img.Name + "/" + img.Tag + ".tar"

		downloadParams = &s3.GetObjectInput{
			Bucket: aws.String(img.Registry),
			Key:    aws.String(imgPath),
		}
	)

	log.Infof("| Import %s/%s.tar to %s", img.NameWithRegistry(), img.Tag, tmpf.Name())

	if err := s.retryer.Outer(func() error {
		_, err := downloader.Download(tmpf, downloadParams)
		return err
	}); err != nil {
		return fmt.Errorf("Failed to download object from S3, error: %s", err)
	}

	fd, err := os.Open(tmpf.Name())
	if err != nil {
		return err
	}
	defer fd.Close()

	// Read through tar reader to patch repositories file since we might
	// mave a different tag property
	var (
		pipeReader, pipeWriter = io.Pipe()
		tr                     = tar.NewReader(fd)
		tw                     = tar.NewWriter(pipeWriter)
		errch                  = make(chan error, 1)

		loadOptions = docker.LoadImageOptions{
			InputStream: pipeReader,
		}
	)

	go func() {
		errch <- s.client.LoadImage(loadOptions)
	}()

	// Iterate through the files in the archive.
	for {
		hdr, err := tr.Next()
		if err == io.EOF {
			break
		}
		if err != nil {
			return fmt.Errorf("Failed to read tar content, error: %s", err)
		}

		// Skip "repositories" file, we will write our own
		if hdr.Name == "repositories" {
			// Read repositories file and pass to JSON decoder
			r1 := Repositories{}
			data, err := ioutil.ReadAll(tr)
			if err != nil {
				return fmt.Errorf("Failed to read `repositories` file content, error: %s", err)
			}
			if err := json.Unmarshal(data, &r1); err != nil {
				return fmt.Errorf("Failed to parse `repositories` file json, error: %s", err)
			}

			var imageID string

			// Read first key from repositories
			for _, tags := range r1 {
				for _, id := range tags {
					imageID = id
					break
				}
				break
			}

			// Make a new repositories struct
			r2 := Repositories{
				img.NameWithRegistry(): {
					img.GetTag(): imageID,
				},
			}

			// Write repositories file to the stream
			reposBody, err := json.Marshal(r2)
			if err != nil {
				return fmt.Errorf("Failed to marshal `repositories` file json, error: %s", err)
			}

			hdr := &tar.Header{
				Name: "repositories",
				Mode: 0644,
				Size: int64(len(reposBody)),
			}
			if err := tw.WriteHeader(hdr); err != nil {
				return fmt.Errorf("Failed to write `repositories` file tar header, error: %s", err)
			}
			if _, err := tw.Write(reposBody); err != nil {
				return fmt.Errorf("Failed to write `repositories` file to tar, error: %s", err)
			}

			continue
		}

		// Passthrough other files as is
		if err := tw.WriteHeader(hdr); err != nil {
			return fmt.Errorf("Failed to passthough tar header, error: %s", err)
		}
		if _, err := io.Copy(tw, tr); err != nil {
			return fmt.Errorf("Failed to passthough tar content, error: %s", err)
		}
	}

	// Finish tar
	if err := tw.Close(); err != nil {
		return fmt.Errorf("Failed to close tar writer, error: %s", err)
	}

	// Close pipeWriter
	if err := pipeWriter.Close(); err != nil {
		return fmt.Errorf("Failed to close tar pipeWriter, error: %s", err)
	}

	if err := <-errch; err != nil {
		errch <- fmt.Errorf("Failed to import image, error: %s", err)
	}

	return nil
}
Example #9
0
func newS3Downloader(svc s3iface.S3API) *_s3downloader {
	return &_s3downloader{downloader: s3manager.NewDownloaderWithClient(svc)}
}