func putFiles(bucket *s3.Bucket, c <-chan *Item) { var wg sync.WaitGroup wg.Add(options.Concurrency) for i := 0; i < options.Concurrency; i++ { go func() { for item := range c { func() { f, err := os.Open(item.Path) if err != nil { log.Printf("Pushing %s failed: %s", item.Path, err) } defer f.Close() path := item.Path[len(item.Prefix)+1:] err = bucket.PutReader(options.Put.Prefix+path, f, item.FileInfo.Size(), mime.TypeByExtension(filepath.Ext(item.Path)), s3.BucketOwnerFull) if err != nil { log.Printf("Uploading %s failed: %s", path, err) return } log.Printf("Uploading %s done", path) }() } wg.Done() }() } wg.Wait() }
func generateThumbnails(file multipart.File, userId, pathPrefix string, bucket *s3.Bucket) error { file.Seek(0, 0) img, _, err := image.Decode(file) if err != nil { log.Println("Error decoding image", err) return err } var buf bytes.Buffer for _, i := range THUMBNAIL_SIZES { resized := resize.Resize(uint(i), 0, img, resize.Lanczos3) err = jpeg.Encode(&buf, resized, nil) if err != nil { return err } path := fmt.Sprintf("%s/%d.jpg", pathPrefix, i) err = bucket.Put(path, buf.Bytes(), "image/jpeg", s3.PublicRead) if err != nil { return err } buf.Reset() } return err }
func hashRemote(bucket *s3.Bucket, path string) []byte { file, err := bucket.GetReader(path) if err != nil { return nil } return hashFile(file) }
func listBucketFiles(bucket *s3.Bucket) <-chan *Item { c := make(chan *Item) go func() { marker := "" for { resp, err := bucket.List(options.Get.Prefix, "", marker, 1000) if err != nil { log.Printf("Could not list items in bucket: %s", err) } for _, item := range resp.Contents { c <- &Item{ Prefix: options.Get.Prefix, Path: item.Key, FileInfo: nil, } marker = item.Key } if !resp.IsTruncated { break } } close(c) }() return c }
func Get(file string, bucket *s3.Bucket, path string) { data, err := bucket.Get(path) if err != nil { panic(err.Error()) } perms := os.FileMode(0644) err = ioutil.WriteFile(file, data, perms) if err != nil { panic(err.Error()) } }
func upload(directory string, f os.FileInfo, bucket *s3.Bucket, permission s3.ACL) { log.Println("uploading " + f.Name()) data, err := ioutil.ReadFile(path.Join(directory, f.Name())) if err != nil { panic(err.Error()) } err = bucket.Put(f.Name(), data, "", permission) if err != nil { panic(err.Error()) } log.Println("finished uploading " + f.Name()) }
func upload(bucket *s3.Bucket, checksum, filename string) { f, err := os.Open(filename) handleErr(err) defer f.Close() path := fmt.Sprintf("%s/%s", checksum, filename) fi, err := f.Stat() handleErr(err) err = bucket.PutReader(path, f, fi.Size(), "binary/octet-stream", s3.PublicRead) if err != nil { log.Fatalf("Go makes me sad because of: %s", err) } log.Printf("Uploaded %s", path) }
func Put(bucket *s3.Bucket, path string, file string) { contType := "binary/octet-stream" Perms := s3.ACL("private") data, err := ioutil.ReadFile(file) if err != nil { panic(err.Error()) } err = bucket.Put(path, data, contType, Perms) if err != nil { panic(err.Error()) } }
func killBucket(b *s3.Bucket) { var err error for attempt := attempts.Start(); attempt.Next(); { err = b.DelBucket() if err == nil { return } if _, ok := err.(*net.DNSError); ok { return } e, ok := err.(*s3.Error) if ok && e.Code == "NoSuchBucket" { return } if ok && e.Code == "BucketNotEmpty" { // Errors are ignored here. Just retry. resp, err := b.List("", "", "", 1000) if err == nil { for _, key := range resp.Contents { _ = b.Del(key.Key) } } multis, _, _ := b.ListMulti("", "") for _, m := range multis { _ = m.Abort() } } } message := "cannot delete test bucket" if err != nil { message += ": " + err.Error() } panic(message) }
func GetReader(b *s3.Bucket, path string) (reader *TweetReader, err error) { body, err := b.GetReader(path) if err != nil { return nil, err } gr, err := gzip.NewReader(body) if err != nil { return nil, err } reader = NewTweetReaderSerial(bufio.NewReaderSize(gr, 16*4096)) return reader, err }
// Sends this upload to the given S3 bucket func (u *Upload) send(b *s3.Bucket) error { f, err := os.Open(u.LocalFile) if err != nil { return err } defer f.Close() info, err := os.Stat(u.LocalFile) if err != nil { return err } r := bufio.NewReader(f) err = b.PutReader(u.RemoteFile, r, info.Size(), u.GetContentType(), u.Access) if err != nil { return err } return nil }
func getFiles(bucket *s3.Bucket, c <-chan *Item) { var wg sync.WaitGroup wg.Add(options.Concurrency) for i := 0; i < options.Concurrency; i++ { go func() { for item := range c { func() { itempath := item.Path[len(item.Prefix):] dirname, fname := filepath.Split(itempath) dirname = filepath.Join(options.Remainder[0], dirname) err := os.MkdirAll(dirname, os.FileMode(0755)) if err != nil { log.Printf("Could not create target folder %s: %s", dirname, err) return } f, err := os.Create(filepath.Join(dirname, fname)) if err != nil { log.Printf("Opening %s failed: %s", item.Path, err) return } defer f.Close() rc, err := bucket.GetReader(item.Path) if err != nil { log.Printf("Downloading %s failed: %s", item.Path, err) return } defer rc.Close() io.Copy(f, rc) log.Printf("Downloading %s done", item.Path) }() } wg.Done() }() } wg.Wait() }
func syncPath(acl, localPath, path string, bucket *s3.Bucket) { s3Path := strings.Replace(path, localPath, "", -1) file, err := os.Open(path) if err != nil { fmt.Println("Error opening local file, Failed to sync '" + path + "'") return } info, err := file.Stat() if err != nil { fmt.Println("Error stating local file, Failed to sync '" + path + "'") return } length := info.Size() pathSplit := strings.Split(path, ".") ext := pathSplit[len(pathSplit)-1] mtype := mime.TypeByExtension(ext) puterr := bucket.PutReader(s3Path, file, length, mtype, s3.ACL(acl)) if puterr != nil { fmt.Println("Failed to sync: " + s3Path) return } fmt.Println("Synced: " + s3Path) }