Example #1
0
func GetImagesInGallery(bucket *s3.Bucket, g *Gallery) ([]Image, error) {
	pre := fmt.Sprintf("%s%s", g.S3Prefix, "orig-")
	res, err := bucket.List(pre, "/", "", 1000)
	if err != nil {
		return nil, tracederror.New(err)
	}

	paths := []Image{}
	for _, v := range res.Contents {
		if v.Key[len(v.Key)-1] != '/' {
			raw := strings.TrimPrefix(v.Key, pre)

			t, err := time.Parse("2006-01-02T15:04:05.000Z", v.LastModified)
			if err != nil {
				log.Print("Couldn't parse time from amazon, assuming 'now' for upload date.")
				log.Print(err)

				t = time.Now()
			}

			paths = append(paths, Image{
				Thumb: fmt.Sprintf("%sthumb-%s", g.S3Prefix, raw),
				Orig:  fmt.Sprintf("%sorig-%s", g.S3Prefix, raw),
				Hero:  fmt.Sprintf("%shero-%s", g.S3Prefix, raw),

				ETag:         v.ETag,
				LastModified: t,
			})
		}
	}

	sort.Sort(ByDate(paths))
	return paths, nil
}
Example #2
0
//Uploads only if the given key does not exist
func uploads3fileifnotexists(binpath, binfile, contenttype string, bucket *s3.Bucket) error {
	k, _ := bucket.GetKey(binpath)
	if k == nil {
		//Binary does not exist on s3.. upload it now...
		return uploads3file(binpath, binfile, contenttype, bucket)
	}
	return nil
}
Example #3
0
func upload(_file file, uploads chan<- bool, client *s3.S3, bucket *s3.Bucket) {
	err := bucket.Put(_file.path, _file.data, _file.contentType, permissions)
	if err != nil {
		fmt.Printf("UPLOAD ERROR: %+v\n", err)
		panic(err)
	}
	uploads <- true
	fmt.Printf("Uploaded %s!\n", _file.path)
}
Example #4
0
func s3Upload(bucket *s3.Bucket, path string, im *Image) (string, error) {
	var url string
	if len(im.Data) == 0 {
		return "", fmt.Errorf("No image data found for %s", path)
	}
	err := bucket.Put(path, im.Data, im.MimeType(), s3.PublicRead)
	if err != nil {
		return url, err
	}
	url = bucket.URL(path)
	return url, nil
}
Example #5
0
//Uploads only if the given key does not exist
func uploads3fileifnotexists(binpath, binfile, contenttype string, bucket *s3.Bucket) error {
	k, _ := bucket.GetKey(binpath)
	if k == nil {
		//Binary does not exist on s3.. gzip and upload it now...
		finalfile, err := gziptotempfile(binfile)
		if err != nil {
			return err
		}
		return uploads3file(binpath, finalfile, "application/x-gzip", bucket)
	}
	return nil
}
Example #6
0
func writeS3FileToPath(file string, bucket *s3.Bucket, path string) error {
	data, err := bucket.Get(path)
	if err != nil {
		return err
	}
	perms := os.FileMode(0644)

	err = ioutil.WriteFile(file, data, perms)
	if err != nil {
		return err
	}

	return nil
}
Example #7
0
func killBucket(b *s3.Bucket) {
	var err error
	for attempt := attempts.Start(); attempt.Next(); {
		err = b.DelBucket()
		if err == nil {
			return
		}
		if _, ok := err.(*net.DNSError); ok {
			return
		}
		e, ok := err.(*s3.Error)
		if ok && e.Code == "NoSuchBucket" {
			return
		}
		if ok && e.Code == "BucketNotEmpty" {
			// Errors are ignored here. Just retry.
			resp, err := b.List("", "", "", 1000)
			if err == nil {
				for _, key := range resp.Contents {
					_ = b.Del(key.Key)
				}
			}
			multis, _, _ := b.ListMulti("", "")
			for _, m := range multis {
				_ = m.Abort()
			}
		}
	}
	message := "cannot delete test bucket"
	if err != nil {
		message += ": " + err.Error()
	}
	panic(message)
}
Example #8
0
func writeLocalFileToS3(bucket *s3.Bucket, path string, file string) error {
	contType := mime.TypeByExtension(filepath.Ext(file))
	Perms := s3.ACL("private")

	data, err := ioutil.ReadFile(file)
	if err != nil {
		return err
	}

	if err := bucket.Put(path, data, contType, Perms); err != nil {
		return err
	}

	return nil
}
Example #9
0
func writeS3FileToS3(sourceBucket, targetBucket *s3.Bucket, sourceKeyPath, targetKeyPath string) error {
	data, err := sourceBucket.Get(sourceKeyPath)
	if err != nil {
		return err
	}

	contType := mime.TypeByExtension(filepath.Ext(sourceKeyPath))
	Perms := s3.ACL("private")

	if err := targetBucket.Put(targetKeyPath, data, contType, Perms); err != nil {
		return err
	}

	return nil
}
Example #10
0
func uploads3file(path, file, contenttype string, bucket *s3.Bucket) error {
	f, err := os.Open(file)
	if err != nil {
		return err
	}
	defer f.Close()
	info, err := f.Stat()
	if err != nil {
		return err
	}
	err = bucket.PutReader(path, f, info.Size(), contenttype, s3.Private)
	if err != nil {
		return err
	}
	return nil
}
func (s3p *s3Provider) rawUpload(opts *Options, b *s3.Bucket, a *artifact.Artifact) error {
	dest := a.FullDest()
	reader, err := a.Reader()
	if err != nil {
		return err
	}

	ctype := a.ContentType()
	size, err := a.Size()
	if err != nil {
		return err
	}

	downloadHost := s3p.getRegion().S3BucketEndpoint
	if downloadHost == "" {
		downloadHost = fmt.Sprintf("https://%s.s3.amazonaws.com", b.Name)
	}
	s3p.log.WithFields(logrus.Fields{
		"download_url": fmt.Sprintf("%s/%s", downloadHost, dest),
	}).Info(fmt.Sprintf("uploading: %s (size: %s)", a.Source, humanize.Bytes(size)))

	s3p.log.WithFields(logrus.Fields{
		"percent_max_size": pctMax(size, opts.MaxSize),
		"max_size":         humanize.Bytes(opts.MaxSize),
		"source":           a.Source,
		"dest":             dest,
		"bucket":           b.Name,
		"content_type":     ctype,
		"cache_control":    opts.CacheControl,
	}).Debug("more artifact details")

	err = b.PutReaderHeader(dest, reader, int64(size),
		map[string][]string{
			"Content-Type":  []string{ctype},
			"Cache-Control": []string{opts.CacheControl},
		}, a.Perm)
	if err != nil {
		return err
	}

	return nil
}
Example #12
0
// listFiles lists the files in a specific s3 bucket
func listFiles(prefix, delimiter, marker string, maxKeys int, b *s3.Bucket) (files []s3.Key, err error) {
	resp, err := b.List(prefix, delimiter, marker, maxKeys)
	if err != nil {
		return nil, err
	}

	// append to files
	files = append(files, resp.Contents...)

	// recursion for the recursion god
	if resp.IsTruncated && resp.NextMarker != "" {
		f, err := listFiles(resp.Prefix, resp.Delimiter, resp.NextMarker, resp.MaxKeys, b)
		if err != nil {
			return nil, err
		}

		// append to files
		files = append(files, f...)
	}

	return files, nil
}
Example #13
0
func loadS3Files(bucket *s3.Bucket, path string, files map[string]string, marker string) (map[string]string, error) {
	log.Debugf("Loading files from 's3://%s/%s'.", bucket.Name, path)
	data, err := bucket.List(path, "", marker, 0)
	if err != nil {
		return files, err
	}

	for _, key := range data.Contents {
		md5sum := strings.Trim(key.ETag, "\"")
		files[key.Key] = md5sum
	}

	// Continue to call loadS3files and add
	// Files to map if next marker set
	if data.IsTruncated {
		lastKey := data.Contents[(len(data.Contents) - 1)].Key
		log.Infof("Results truncated, loading additional files via previous last key '%s'.", lastKey)
		loadS3Files(bucket, path, files, lastKey)
	}

	log.Debugf("Loaded '%d' files from 's3://%s/%s' succesfully.", len(files), bucket.Name, path)
	return files, nil
}
		Ω(err).ShouldNot(HaveOccurred())

		destination = path.Join(tmpdir, "in-dir")

		inCmd = exec.Command(inPath, destination)
	})

	AfterEach(func() {
		os.RemoveAll(tmpdir)
	})

	Context("when executed", func() {
		var request models.InRequest
		var response models.InResponse

		var bucket *s3.Bucket

		BeforeEach(func() {
			guid, err := uuid.NewV4()
			Ω(err).ShouldNot(HaveOccurred())

			key = guid.String()

			auth := aws.Auth{
				AccessKey: accessKeyID,
				SecretKey: secretAccessKey,
			}

			region, ok := aws.Regions[regionName]
			Ω(ok).Should(BeTrue())
		Ω(err).ShouldNot(HaveOccurred())

		destination = path.Join(tmpdir, "in-dir")

		checkCmd = exec.Command(checkPath, destination)
	})

	AfterEach(func() {
		os.RemoveAll(tmpdir)
	})

	Context("when executed", func() {
		var request models.CheckRequest
		var response models.CheckResponse

		var bucket *s3.Bucket

		BeforeEach(func() {
			guid, err := uuid.NewV4()
			Ω(err).ShouldNot(HaveOccurred())

			key = guid.String()

			auth := aws.Auth{
				AccessKey: accessKeyID,
				SecretKey: secretAccessKey,
			}

			region, ok := aws.Regions[regionName]
			Ω(ok).Should(BeTrue())
		source, err = ioutil.TempDir("", "out-source")
		Ω(err).ShouldNot(HaveOccurred())

		outCmd = exec.Command(outPath, source)
	})

	AfterEach(func() {
		os.RemoveAll(source)
	})

	Context("when executed", func() {
		var request models.OutRequest
		var response models.OutResponse

		var bucket *s3.Bucket

		BeforeEach(func() {
			guid, err := uuid.NewV4()
			Ω(err).ShouldNot(HaveOccurred())

			key = guid.String()

			auth := aws.Auth{
				AccessKey: accessKeyID,
				SecretKey: secretAccessKey,
			}

			region, ok := aws.Regions[regionName]
			Ω(ok).Should(BeTrue())
	"github.com/mitchellh/goamz/aws"
	goamz "github.com/mitchellh/goamz/s3"
	"github.com/pivotal-cf/cf-redis-broker/s3"
	"github.com/pivotal-golang/lager"

	. "github.com/onsi/ginkgo"
	. "github.com/onsi/gomega"
	"github.com/onsi/gomega/gbytes"
)

var _ = Describe("Client", func() {
	var (
		fakeRegion        aws.Region
		goamzBucketClient *goamz.Bucket
		bucketName        string
		log               *gbytes.Buffer
		logger            lager.Logger
	)

	BeforeEach(func() {
		bucketName = "i_am_bucket"

		logger = lager.NewLogger("logger")
		log = gbytes.NewBuffer()
		logger.RegisterSink(lager.NewWriterSink(log, lager.INFO))

		fakeRegion = aws.Region{
			Name:                 "fake_region",
			S3Endpoint:           fakeS3EndpointURL,
			S3LocationConstraint: true,