func NewS3(awsId, awsSecret, s3Bucket, s3Region string) Source { cred := credentials.NewStaticCredentials(awsId, awsSecret, "") session := session.New(&aws.Config{Region: aws.String(s3Region), Credentials: cred}) s3manager.NewDownloader(session) return &S3{s3manager.NewDownloader(session), s3Bucket} }
func downloadFromS3(s3path string, localPath string, scanned func(string, bool)) error { outputPath := scratchPath(localPath) sess := session.New(&aws.Config{Region: aws.String("us-west-2")}) downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) { d.PartSize = 5 * 1024 * 1024 // 5MB per part }) file, err := os.Create(outputPath) if err != nil { return err } log.Println("Starting download") svc := s3.New(sess) resp, err := svc.HeadObject(&s3.HeadObjectInput{ Bucket: aws.String("smick-media-output"), Key: aws.String(s3path), }) if err != nil { log.Println("Error getting head:", err) } log.Println(resp) downloader.Download(file, &s3.GetObjectInput{ Bucket: aws.String("smick-media-output"), Key: aws.String(s3path), }, func(d *s3manager.Downloader) { log.Println("yeye") }) log.Println("Downloaded") return nil }
func GetAction(c *cli.Context) { if len(c.Args()) != 2 { log.Fatal("get s3path localpath") } s3path, err := NewS3Path(c.Args().Get(0)) if err != nil { log.Fatal(err) } localpath := c.Args().Get(1) if localpath == "" { log.Fatal("get s3path localpath") } log.Printf("s3path Bucket:%v Prefix:%v", s3path.Bucket, s3path.Prefix) parallel := c.Int("parallel") manager := s3manager.NewDownloader(nil) d := NewDownloader(s3path, localpath, parallel, manager) client := s3.New(nil) params := &s3.ListObjectsInput{Bucket: &s3path.Bucket, Prefix: &s3path.Prefix} err = client.ListObjectsPages(params, d.eachPage) if err != nil { log.Fatal(err) } d.Wait() }
// Get downloads the specified release from S3 bucket, as described in // the passed in config, and returns the location of the downloaded // release and an error, if any func Get(conf *S3Config) (string, error) { defaults.DefaultConfig.Region = aws.String(*conf.Region) tmpDir := os.TempDir() destFile := filepath.Join(tmpDir, *conf.Revision) downloadFile, err := os.Create(destFile) if err != nil { return "", err } filePath := filepath.Join(*conf.RevisionPath, *conf.Revision) downloader := s3manager.NewDownloader(nil) _, err = downloader.Download( downloadFile, &s3.GetObjectInput{ Bucket: conf.Bucket, Key: &filePath, }) if err != nil { return "", err } return destFile, nil }
// getRemoteBackup is used to pull backups from S3 func getRemoteBackup(r *Restore, conf *config.Config) { s3Conn := session.New(&aws.Config{Region: aws.String(string(conf.S3Region))}) r.LocalFilePath = fmt.Sprintf("%v/%v", conf.TmpDir, r.RestorePath) localFileDir := filepath.Dir(r.LocalFilePath) err := os.MkdirAll(localFileDir, 0755) if err != nil { log.Fatalf("[ERR] Unable to create local restore directory!: %v", err) } outFile, err := os.Create(r.LocalFilePath) if err != nil { log.Fatalf("[ERR] Unable to create local restore temp file!: %v", err) } // Create the params to pass into the actual downloader params := &s3.GetObjectInput{ Bucket: &conf.S3Bucket, Key: &r.RestorePath, } log.Printf("[INFO] Downloading %v%v from S3 in %v", string(conf.S3Bucket), r.RestorePath, string(conf.S3Region)) downloader := s3manager.NewDownloader(s3Conn) _, err = downloader.Download(outFile, params) if err != nil { log.Fatalf("[ERR] Could not download file from S3!: %v", err) } outFile.Close() log.Print("[INFO] Download completed") }
func (D *Data) GetFile(src string, dst string) error { log.WithFields( log.Fields{"src": src, "dst": dst, "S3 Data": D, }).Info("S3: GetFile") file, err := os.Create(dst) if err != nil { log.WithFields(log.Fields{"dst": dst}).Error("S3: Cannot create file") return err } defer file.Close() s3Param := &s3.GetObjectInput{ Bucket: aws.String(D.BktName), Key: aws.String(src)} downloader := s3manager.NewDownloader(D.s3Sess) n, err := downloader.Download(file, s3Param) if err != nil { log.WithFields( log.Fields{"S3": D, "Key": src, "Error": err}).Error("S3: Cannot download") os.Remove(dst) return err } log.WithFields( log.Fields{"S3": D, "Src": src, "Dst": dst, "Bytes recvd": n}).Debug("S3: GetFile") return nil }
func (client *s3client) DownloadFile( bucketName string, remotePath string, localPath string, ) error { downloader := s3manager.NewDownloader(client.session) localFile, err := os.Create(localPath) if err != nil { return err } defer localFile.Close() getObject := &s3.GetObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(remotePath), } _, err = downloader.Download(localFile, getObject) if err != nil { return err } return nil }
func (client *s3client) DownloadFile(bucketName string, remotePath string, versionID string, localPath string) error { downloader := s3manager.NewDownloader(&s3manager.DownloadOptions{ S3: client.client, }) localFile, err := os.Create(localPath) if err != nil { return err } defer localFile.Close() getObject := &s3.GetObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(remotePath), } if versionID != "" { getObject.VersionId = aws.String(versionID) } _, err = downloader.Download(localFile, getObject) if err != nil { return err } return nil }
func TestDownloadError(t *testing.T) { s, names, _ := dlLoggingSvc([]byte{1, 2, 3}) opts := &s3manager.DownloadOptions{S3: s, PartSize: 1, Concurrency: 1} num := 0 s.Handlers.Send.PushBack(func(r *request.Request) { num++ if num > 1 { r.HTTPResponse.StatusCode = 400 r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) } }) d := s3manager.NewDownloader(opts) w := &aws.WriteAtBuffer{} n, err := d.Download(w, &s3.GetObjectInput{ Bucket: aws.String("bucket"), Key: aws.String("key"), }) assert.NotNil(t, err) assert.Equal(t, int64(1), n) assert.Equal(t, []string{"GetObject", "GetObject"}, *names) assert.Equal(t, []byte{1}, w.Bytes()) }
func validate(t *testing.T, key string, md5value string) { mgr := s3manager.NewDownloader(nil) params := &s3.GetObjectInput{Bucket: bucketName, Key: &key} w := newDLWriter(1024 * 1024 * 20) n, err := mgr.Download(w, params) assert.NoError(t, err) assert.Equal(t, md5value, fmt.Sprintf("%x", md5.Sum(w.buf[0:n]))) }
func main() { jobs := make(chan *s3.Object) s3Url := os.Args[1] destDir := os.Args[2] u, err := url.Parse(s3Url) if err != nil { log.Fatal(err) } s3Bucket := u.Host s3Prefix := u.Path[1:] err = os.MkdirAll(destDir, 0700) if err != nil { log.Fatal(err) } session := session.New(&aws.Config{Region: aws.String(awsRegion)}) svc := s3.New(session) downloader := s3manager.NewDownloader(session) params := &s3.ListObjectsInput{ Bucket: aws.String(s3Bucket), Prefix: aws.String(s3Prefix), } var wg sync.WaitGroup for w := 1; w <= concurrency; w++ { wg.Add(1) go func(w int) { worker(w, jobs, downloader, s3Bucket, destDir) defer wg.Done() }(w) } log.Printf("Looking for objects in bucket: %s, prefix: %s", s3Bucket, s3Prefix) err = svc.ListObjectsPages(params, func(page *s3.ListObjectsOutput, lastPage bool) bool { for _, object := range page.Contents { jobs <- object } objectsCount += len(page.Contents) return true }) close(jobs) wg.Wait() if err != nil { log.Fatal(err) } fmt.Printf("Found %d objects to download.\n", objectsCount) }
func NewS3Connection(connection *s3.S3, cacheDirectory string, s3BucketName string, options *s3manager.DownloadOptions) S3Connection { conn := S3Connection{ Connection: connection, BucketName: s3BucketName, CacheDirectory: cacheDirectory, } conn.Downloader = s3manager.NewDownloader(options) return conn }
// Download using to download file from S3 by bucket and key func Download(session *session.Session, bucket, key, path string) { file, err := os.Create(path) logger.Process(err, "Can't create file") defer file.Close() client := s3manager.NewDownloader(session) _, err = client.Download(file, &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), }) logger.Process(err, "Can't retrieve object from S3") }
/* Download downloads a file from S3. This is merely a wrapper around the aws-sdk-go downloader. It allows us to isolate the aws-sdk-go dependencies and unify error handling. */ func Download(file *os.File, bucket, key string) error { downloader := s3manager.NewDownloader(session.New(&aws.Config{Region: aws.String("us-east-1")})) numBytes, err := downloader.Download(file, &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), }) if err != nil { return err } log.Println("Downloaded", numBytes, "bytes") return nil }
func TestDownloadZero(t *testing.T) { s, names, ranges := dlLoggingSvc([]byte{}) opts := &s3manager.DownloadOptions{S3: s} d := s3manager.NewDownloader(opts) w := &aws.WriteAtBuffer{} n, err := d.Download(w, &s3.GetObjectInput{ Bucket: aws.String("bucket"), Key: aws.String("key"), }) assert.Nil(t, err) assert.Equal(t, int64(0), n) assert.Equal(t, []string{"GetObject"}, *names) assert.Equal(t, []string{"bytes=0-5242879"}, *ranges) }
func TestDownloadSetPartSize(t *testing.T) { s, names, ranges := dlLoggingSvc([]byte{1, 2, 3}) opts := &s3manager.DownloadOptions{S3: s, PartSize: 1, Concurrency: 1} d := s3manager.NewDownloader(opts) w := &aws.WriteAtBuffer{} n, err := d.Download(w, &s3.GetObjectInput{ Bucket: aws.String("bucket"), Key: aws.String("key"), }) assert.Nil(t, err) assert.Equal(t, int64(3), n) assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) assert.Equal(t, []string{"bytes=0-0", "bytes=1-1", "bytes=2-2"}, *ranges) assert.Equal(t, []byte{1, 2, 3}, w.Bytes()) }
// Fetch fetches target from path specified in opts func Fetch(opts *Fetcher, target string, showProgress bool) error { targetPath := filepath.Dir(opts.Destination) writable, err := targetPathWritable(targetPath) if !writable || err != nil { fmt.Printf("Cannot write to target `%s`. Please check that it exists and is writable.\n", targetPath) return err } temp, err := ioutil.TempFile(targetPath, fmt.Sprintf(".%s-", opts.Project)) if err != nil { return err } defer temp.Close() bar := pb.New64(*targetSize(opts, target)).SetUnits(pb.U_BYTES) if showProgress { bar.Start() } etag := readMD5Sum(opts.Destination) writer := &ProgressWriter{temp, bar} downloader := s3manager.NewDownloader(&s3manager.DownloadOptions{ S3: opts.S3, }) _, err = downloader.Download(writer, &s3.GetObjectInput{ Bucket: aws.String(opts.Bucket), Key: opts.Key(target), IfNoneMatch: aws.String(etag), }) if err != nil { os.Remove(temp.Name()) if reqErr, ok := err.(awserr.RequestFailure); ok { if reqErr.StatusCode() == 304 { bar.Set64(bar.Total) bar.FinishPrint("Using local copy.") return nil } return reqErr } return err } return os.Rename(temp.Name(), opts.Destination) }
func (client *s3client) DownloadFile(bucketName string, remotePath string, versionID string, localPath string) error { headObject := &s3.HeadObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(remotePath), } if versionID != "" { headObject.VersionId = aws.String(versionID) } object, err := client.client.HeadObject(headObject) if err != nil { return err } progress := client.newProgressBar(*object.ContentLength) downloader := s3manager.NewDownloader(client.session) localFile, err := os.Create(localPath) if err != nil { return err } defer localFile.Close() getObject := &s3.GetObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(remotePath), } if versionID != "" { getObject.VersionId = aws.String(versionID) } progress.Start() defer progress.Finish() _, err = downloader.Download(progressWriterAt{localFile, progress}, getObject) if err != nil { return err } return nil }
func main() { flag.Parse() argsErr := "Missing required arg:" if *bucket == "" { log.Fatalln(argsErr, "bucket") } if *prefix == "" { log.Fatalln(argsErr, "prefix") } awsSession := session.New(&aws.Config{Region: aws.String(*awsRegion)}) svc := s3.New(awsSession) loi := &s3.ListObjectsInput{ Bucket: bucket, Prefix: prefix, } listObjectsO, err := svc.ListObjects(loi) if err != nil { log.Fatalln("ListObjects failed:", err) } if *listObjectsO.IsTruncated { fmt.Println("ListObjectsOutput is truncated") } downloader := s3manager.NewDownloader(awsSession) fmt.Println("found keys:") for _, v := range listObjectsO.Contents { key := *v.Key fmt.Print("Downloading ", key, " ... ") file := getFile(key) _, err := downloader.Download(file, &s3.GetObjectInput{ Bucket: aws.String(*bucket), Key: aws.String(key), }) if err != nil { log.Fatalln("Failed to download file", err) } fmt.Println("done") file.Close() } fmt.Println("DONE") }
func (s3 *s3driver) GetFileTobeSend(dpconn, dpname, itemlocation, tagdetail string) (filepathname string) { bucket := getAwsInfoFromDpconn(dpconn) e := os.MkdirAll(gDpPath+"/"+bucket+"/"+itemlocation, 0777) if e != nil { log.Error(e) return } filepathname = gDpPath + "/" + bucket + "/" + itemlocation + "/" + tagdetail if true == isFileExists(filepathname) { return } //AWS_SECRET_ACCESS_KEY = Env("AWS_SECRET_ACCESS_KEY", false) //AWS_ACCESS_KEY_ID = Env("AWS_ACCESS_KEY_ID", false) AWS_REGION = Env("AWS_REGION", false) file, err := os.Create(filepathname) if err != nil { log.Error("Failed to create file", err) return "" } defer file.Close() downloader := s3manager.NewDownloader(session.New(&aws.Config{Region: aws.String(AWS_REGION)})) numBytes, err := downloader.Download(file, &s3aws.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String( /*dpname + "/" + */ itemlocation + "/" + tagdetail), }) if err != nil { log.Info("Failed to download file.", err) os.Remove(filepathname) return } log.Println("Downloaded file", file.Name(), numBytes, "bytes") return }
// ファイルをダウンロードする。 // // 引数: ダウンロードするキー名 // // 戻り値: エラー情報 func downlowdFile(bucket, key, localDir string) (string, error) { fileName := path.Base(key) localPath := filepath.Join(localDir, fileName) file, err := os.Create(localPath) if err != nil { return localPath, err } defer file.Close() fmt.Printf("Downloading s3://%s/%s to %s...\n", bucket, key, localPath) d := s3manager.NewDownloader(nil) params := &s3.GetObjectInput{Bucket: &bucket, Key: &key} if _, err := d.Download(file, params); err != nil { return localPath, err } fmt.Println("Download complete .") return localPath, nil }
func TestDownloadOrder(t *testing.T) { s, names, ranges := dlLoggingSvc(buf12MB) opts := &s3manager.DownloadOptions{S3: s, Concurrency: 1} d := s3manager.NewDownloader(opts) w := &aws.WriteAtBuffer{} n, err := d.Download(w, &s3.GetObjectInput{ Bucket: aws.String("bucket"), Key: aws.String("key"), }) assert.Nil(t, err) assert.Equal(t, int64(len(buf12MB)), n) assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) assert.Equal(t, []string{"bytes=0-5242879", "bytes=5242880-10485759", "bytes=10485760-15728639"}, *ranges) count := 0 for _, b := range w.Bytes() { count += int(b) } assert.Equal(t, 0, count) }
func c_awsBilling(accessKey, secretKey, region, productCodes, bucketName, bucketPath string, purgeDays int) (opentsdb.MultiDataPoint, error) { creds := credentials.NewStaticCredentials(accessKey, secretKey, "") conf := &aws.Config{ Credentials: creds, Region: ®ion, } awsBilling := awsBillingConfig{ bucketName: bucketName, bucketPath: bucketPath, } regCompiled, err := regexp.Compile(productCodes) if err != nil { return nil, err } awsBilling.prodCodesReg = regCompiled awsBilling.s3svc = s3.New(session.New(conf)) //Connect to S3 if awsBilling.s3svc == nil { return nil, fmt.Errorf("unable to connect to S3") } awsBilling.r53svc = route53.New(session.New(conf)) //Connect to R53 if awsBilling.r53svc == nil { return nil, fmt.Errorf("unable to connect to Route 53") } awsBilling.downloader = s3manager.NewDownloader(session.New(conf)) //Gimmie a downloader if awsBilling.downloader == nil { return nil, fmt.Errorf("unable to create S3 downloader") } if purgeDays == 0 { slog.Infof("S3 purging of objects is disabled") awsBilling.purgeOlderThan = time.Date(2999, 12, 31, 23, 59, 59, 0, time.UTC) } else { purgeHours := time.Duration(-1 * 24 * purgeDays) awsBilling.purgeOlderThan = time.Now().Add(purgeHours * time.Hour) } return awsBilling.Check() }
func NewServer(msession *DatabaseSession) *martini.ClassicMartini { // Create the server and set up middleware. m := martini.Classic() m.Use(render.Renderer(render.Options{ IndentJSON: true, })) m.Use(msession.Database()) // Get the first 20 filerequest documnents m.Get("/requestedFiles", func(r render.Render, db *mgo.Database, w http.ResponseWriter, t *http.Request) { requestedFiles, err := fetchAllRequestedFiles(db) if err != nil { r.JSON(401, map[string]string{ "DB Error": err.Error(), }) } else { r.JSON(200, requestedFiles) } }) //get the s3 file from the url stored in the rqeuested file document m.Get("/file", func(r render.Render, db *mgo.Database, w http.ResponseWriter, t *http.Request) { if err := t.ParseForm(); err != nil { r.JSON(400, map[string]string{ "error": "Failed to get parms", }) } requestid := t.URL.Query().Get("requestid") destinationFile := t.URL.Query().Get("destination") if len(destinationFile) < 1 || len(requestid) < 1 { r.JSON(406, map[string]string{ "Error": "destination file (1), requestid (2) must be specified: [(1):" + destinationFile + " (2):" + requestid + "]", }) } else { requestedFile, err := fetchRequestedFile(db, requestid) if err != nil { r.JSON(401, map[string]string{ " DB Error": err.Error(), }) } url := requestedFile.FileURL fbucket := requestedFile.FileBucket fkey := requestedFile.FileKey if len(fbucket) < 1 || len(fkey) < 1 { r.JSON(408, map[string]string{ "Error": "source bucket (1) and key (2) must be specified in requestfile document [(1):" + fbucket + " (2):" + fkey + "]", }) } else { filewriter, err := os.Create(destinationFile) defer filewriter.Close() if err != nil { r.JSON(404, map[string]error{ "File error": err, }) } else { var s3sess = session.New(aws.NewConfig().WithCredentials(credentials.AnonymousCredentials).WithRegion("us-west-2")) downloader := s3manager.NewDownloader(s3sess) numBytes, err := downloader.Download(filewriter, &s3.GetObjectInput{ Bucket: &fbucket, Key: &fkey, }) //if _, err := io.Copy(filewriter, rr); err != nil { if err != nil { r.JSON(405, map[string]string{ "error": "Failed to download to file", }) } else { r.JSON(200, map[string]string{ "done": url + " downloaded to " + destinationFile + " [" + strconv.FormatInt(numBytes, 10) + "] bytes", }) } } } } //result.Close() }) // Define the "POST /files" route. i.e pst requestFile documents m.Post("/files", binding.Json(RequestedFile{}), func(requestedFile RequestedFile, r render.Render, db *mgo.Database) { if requestedFile.valid() { // signature is valid, insert into database err := addRequestedFile(db, requestedFile) if err == nil { // insert successful, 201 Created r.JSON(201, requestedFile) } else { // insert failed, 400 Bad Request r.JSON(400, map[string]string{ "error": err.Error(), }) } } else { // signature is invalid, 400 Bad Request r.JSON(400, map[string]string{ "error": "Not a valid requestedFile", }) } }) // Return the server. Call Run() on the server to // begin listening for HTTP requests. return m }
// NewS3DownloadManager inits with defaults and returns // a *s3manager.Downloader func NewS3DownloadManager(c client.ConfigProvider) *s3manager.Downloader { return s3manager.NewDownloader(c) }
func main() { client := s3.New(&aws.Config{ Region: aws.String("us-west-2"), }) downloader := s3manager.NewDownloader(&s3manager.DownloadOptions{ S3: client, }) logFiles, err := LatestLogFilesList(client, downloader, 10) if err != nil { fmt.Println(err.Error()) return } var logs []string logs, err = GetLogLines(client, downloader, logFiles) if err != nil { fmt.Println(err.Error()) return } IPs := map[string]int{} //reConcourse := regexp.MustCompile("^.*user/lattice-concourse.*$") reIP := regexp.MustCompile(`^.* ((\d+\.){3}\d+) .*$`) TotalDownloads := 0 for _, l := range logs { //if reConcourse.MatchString(l) { //continue //} matches := reIP.FindStringSubmatch(l) if len(matches) < 2 { continue } TotalDownloads++ IPs[matches[1]]++ } fmt.Println(IPs) AWSIPRanges, err := GetAWSPublicIPRanges() if err != nil { fmt.Println(err.Error()) return } AWSDownloads := 0 for IP := range IPs { IPValue, err := getIPValue(IP) if err != nil { fmt.Println(err.Error()) return } for _, IPRange := range AWSIPRanges { if IPValue >= IPRange[0] && IPValue < IPRange[1] { AWSDownloads += IPs[IP] break } } } fmt.Println("Total Downloads: ", TotalDownloads) fmt.Println("AWS Downloads: ", AWSDownloads) }