func uploadFileToS3(bucket *s3.Bucket, fpath, s3path string) error { // try to get the mime type mimetype := "" err := magicmime.Open(magicmime.MAGIC_MIME_TYPE | magicmime.MAGIC_SYMLINK | magicmime.MAGIC_ERROR) if err != nil { log.Debugf("Magic meme failed for: %v", err) } else { mimetype, err = magicmime.TypeByFile(fpath) if err != nil { log.Debugf("Mime type detection for %s failed: %v", fpath, err) } } contents, err := ioutil.ReadFile(fpath) if err != nil { log.Warnf("Reading %q failed: %v", fpath, err) } // push the file to s3 log.Debugf("Pushing %s to s3", s3path) if err := bucket.Put(s3path, contents, mimetype, "public-read", s3.Options{CacheControl: "no-cache"}); err != nil { return err } log.Infof("Sucessfully pushed %s to s3", s3path) return nil }
// listFiles lists the files in a specific s3 bucket. func listFiles(prefix, delimiter, marker string, maxKeys int, b *s3.Bucket) (files []s3.Key, err error) { resp, err := b.List(prefix, delimiter, marker, maxKeys) if err != nil { return nil, err } // append to files for _, fl := range resp.Contents { if strings.Contains(fl.Key, "index.html") || strings.Contains(fl.Key, "static") || strings.Contains(fl.Key, "logs") { continue } files = append(files, fl) } // recursion for the recursion god if resp.IsTruncated && resp.NextMarker != "" { f, err := listFiles(resp.Prefix, resp.Delimiter, resp.NextMarker, resp.MaxKeys, b) if err != nil { return nil, err } // append to files files = append(files, f...) } return files, nil }
func LastS3KeyWithPrefix(bucket *s3.Bucket, prefix *string) (string, error) { narrowedPrefix := *prefix keyMarker := "" // First, do a few checks for shortcuts for checking backwards: focus in on the 14 days. // Otherwise just loop forward until there aren't any more results currentDay := time.Now() for i := 0; i < S3_REWIND_IN_DAYS_BEFORE_LONG_LOOP; i++ { testPrefix := fmt.Sprintf("%s%s", *prefix, S3DatePrefix(¤tDay)) results, err := bucket.List(narrowedPrefix, "", keyMarker, 0) if err != nil && len(results.Contents) > 0 { narrowedPrefix = testPrefix break } currentDay = currentDay.Add(-1 * time.Duration(DAY_IN_SECONDS) * time.Second) } lastKey := "" moreResults := true for moreResults { results, err := bucket.List(narrowedPrefix, "", keyMarker, 0) if err != nil { return lastKey, err } if len(results.Contents) == 0 { // empty request, return last found lastKey return lastKey, nil } lastKey = results.Contents[len(results.Contents)-1].Key keyMarker = lastKey moreResults = results.IsTruncated } return lastKey, nil }
func createIndexFile(bucket *s3.Bucket, bucketpath, html string) error { p := path.Join(bucketpath, "index.html") contents := strings.Replace(index, "{{ . }}", html, -1) // push the file to s3 log.Debugf("Pushing %s to s3", p) if err := bucket.Put(p, []byte(contents), "text/html", "public-read", s3.Options{CacheControl: "no-cache"}); err != nil { return err } log.Infof("Sucessfully pushed %s to s3", p) return nil }
func killBucket(b *s3.Bucket) { var err error for attempt := attempts.Start(); attempt.Next(); { err = b.DelBucket() if err == nil { return } if _, ok := err.(*net.DNSError); ok { return } e, ok := err.(*s3.Error) if ok && e.Code == "NoSuchBucket" { return } if ok && e.Code == "BucketNotEmpty" { // Errors are ignored here. Just retry. resp, err := b.List("", "", "", 1000) if err == nil { for _, key := range resp.Contents { _ = b.Del(key.Key) } } multis, _, _ := b.ListMulti("", "") for _, m := range multis { _ = m.Abort() } } } message := "cannot delete test bucket" if err != nil { message += ": " + err.Error() } panic(message) }
func CreateManifestInBucket(batch *metadata.LoadBatch, bucket *s3.Bucket) (string, error) { manifest, err := makeManifestJson(batch) if err != nil { return "", err } url := manifestUrl(bucket.Name, batch.UUID) err = bucket.Put(batch.UUID+".json", manifest, "application/json", s3.BucketOwnerRead, s3.Options{}) if err != nil { return "", err } return url, err }
func uploadFileToS3(bucket *s3.Bucket, fpath, s3path, contentType string) error { contents, err := ioutil.ReadFile(fpath) if err != nil { return fmt.Errorf("Reading %q failed: %v", fpath, err) } // push the file to s3 logrus.Debugf("Pushing %s to s3", s3path) if err := bucket.Put(s3path, contents, contentType, "public-read", s3.Options{CacheControl: "no-cache"}); err != nil { return err } logrus.Infof("Sucessfully pushed %s to s3", s3path) return nil }
func (chunkBuffer *ChunkBuffer) StoreToS3AndRelease(s3bucket *s3.Bucket) (bool, error) { var s3path string var err error if debug { fmt.Printf("Closing bufferfile: %s\n", chunkBuffer.File.Name()) } chunkBuffer.File.Close() contents, err := ioutil.ReadFile(chunkBuffer.File.Name()) if err != nil { return false, err } if len(contents) <= 0 { if debug { fmt.Printf("Nothing to store to s3 for bufferfile: %s\n", chunkBuffer.File.Name()) } } else { // Write to s3 in a new filename alreadyExists := true for alreadyExists { writeTime := time.Now() s3path = fmt.Sprintf("%s%s%d", S3TopicPartitionPrefix(chunkBuffer.Topic, chunkBuffer.Partition), S3DatePrefix(&writeTime), writeTime.UnixNano()) alreadyExists, err = s3bucket.Exists(s3path) if err != nil { panic(err) return false, err } } fmt.Printf("S3 Put Object: { Bucket: %s, Key: %s, MimeType:%s }\n", s3bucket.Name, s3path, mime.TypeByExtension(filepath.Ext(chunkBuffer.File.Name()))) err = s3bucket.Put(s3path, contents, mime.TypeByExtension(filepath.Ext(chunkBuffer.File.Name())), s3.Private, s3.Options{}) if err != nil { panic(err) } } if !keepBufferFiles { if debug { fmt.Printf("Deleting bufferfile: %s\n", chunkBuffer.File.Name()) } err = os.Remove(chunkBuffer.File.Name()) if err != nil { fmt.Errorf("Error deleting bufferfile %s: %#v", chunkBuffer.File.Name(), err) } } return true, nil }
func getS3FileHash(bucket *s3.Bucket, filePath string) (string, error) { list, err := bucket.List(filePath, "", "", 1) if err != nil { return "", err } if len(list.Contents) < 1 { return "", ErrFileDoesNotExistS3 } md5sum := strings.Trim(list.Contents[0].ETag, "\"") return md5sum, nil }
func (i *Importer) download(buk *s3.Bucket, id string) error { tmpPath := path.Join(env.DIR, "graph", ":artmp:"+id) outPath := path.Join(env.DIR, "graph", id) os.MkdirAll(tmpPath, 0755) os.MkdirAll(path.Join(outPath, "layer"), 0755) key := fmt.Sprintf("/binary/repos/%s.layer", id) rc, err := buk.GetReader(key) if err != nil { return err } cmd := exec.Command("tar", "-f", "-", "-C", tmpPath, "-x") cmd.Stdin = rc cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Run() rc.Close() img := i.extract(id, tmpPath) if img.Parent != "" { if i.alreadyExists(img.Parent) { return fmt.Errorf("Parent layer %s already installed, not overwriting\n", img.Parent) } fmt.Printf("Moving to download parent %s...\n", img.Parent) return i.download(buk, img.Parent) } return nil }
func putLog(t transfer, bucket *s3.Bucket, dry bool) { data, err := ioutil.ReadFile(t.Src) if err != nil { // Error reading log log.Printf("Error reading source file %s:\n", t.Src) log.Fatal(err) } contType := "binary/octet-stream" perm := s3.ACL("private") if dry { log.Printf("Starting sync of %s to bucket path %s...\n", t.Src, t.Dest) } else { log.Printf("Starting sync of %s to s3://%s/%s...\n", t.Src, bucket.Name, t.Dest) err = bucket.Put(t.Dest, data, contType, perm, s3.Options{}) if err != nil { // Error uploading log to s3 log.Printf("Sync of %s to s3://%s/%s failed:\n", t.Src, bucket.Name, t.Dest) log.Fatal(err) } } }
func putFile(bucket *s3.Bucket, src string) { bytes, _ := ioutil.ReadFile(src) bucket.Put(src, bytes, "", "", s3.Options{}) }