func killBucket(b *s3.Bucket) { var err error for attempt := attempts.Start(); attempt.Next(); { err = b.DelBucket() if err == nil { return } if _, ok := err.(*net.DNSError); ok { return } e, ok := err.(*s3.Error) if ok && e.Code == "NoSuchBucket" { return } if ok && e.Code == "BucketNotEmpty" { // Errors are ignored here. Just retry. resp, err := b.List("", "", "", 1000) if err == nil { for _, key := range resp.Contents { _ = b.Del(key.Key) } } multis, _, _ := b.ListMulti("", "") for _, m := range multis { _ = m.Abort() } } } message := "cannot delete test bucket" if err != nil { message += ": " + err.Error() } panic(message) }
func copyFile(bucket *s3.Bucket, from string, to string, contentType string, maxAge int) { copyOpts := s3.CopyOptions{ MetadataDirective: "REPLACE", ContentType: contentType, Options: s3.Options{ CacheControl: fmt.Sprintf("public, max-age=%d", maxAge), // ContentEncoding: "gzip", }, } _, err := bucket.PutCopy(to, s3.PublicRead, copyOpts, filepath.Join(bucket.Name, from)) if err != nil { panic(err) } }
func uploadFile(bucket *s3.Bucket, reader io.Reader, dest string, includeHash bool, caching int) string { buffer := bytes.NewBuffer([]byte{}) compress := shouldCompress(dest) if compress { writer := gzip.NewWriter(buffer) must(io.Copy(writer, reader)) writer.Close() } else { must(io.Copy(buffer, reader)) } data := buffer.Bytes() hash := hashBytes(data) hashPrefix := fmt.Sprintf("%x", hash)[:12] s3Opts := s3.Options{ ContentMD5: base64.StdEncoding.EncodeToString(hash), CacheControl: fmt.Sprintf("public, max-age=%d", caching), } if compress { s3Opts.ContentEncoding = "gzip" } if includeHash { dest = filepath.Join(hashPrefix, dest) } log.Printf("Uploading to %s in %s (%s) [%d]\n", dest, bucket.Name, hashPrefix, caching) err := bucket.PutReader(dest, buffer, int64(len(data)), guessContentType(dest), s3.PublicRead, s3Opts) panicIf(err) return dest }