//get object from s3 func getObject(svc *s3.S3, bucket string, key string) string { params := &s3.GetObjectInput{ Bucket: aws.String(bucket), // Required Key: aws.String(key), // Required } resp, err := svc.GetObject(params) if err != nil { // Print the error, cast err to awserr.Error to get the Code and // Message from an error. glog.Fatal(err.Error()) } size := int(*resp.ContentLength) buffer := make([]byte, size) defer resp.Body.Close() var bbuffer bytes.Buffer for true { num, rerr := resp.Body.Read(buffer) if num > 0 { bbuffer.Write(buffer[:num]) } else if rerr == io.EOF || rerr != nil { break } } return bbuffer.String() }
func GetObject(svc *s3.S3, key string) string { //svc := s3.New(session.New(),&aws.Config{Region: aws.String("us-east-1")}) params := &s3.GetObjectInput{ Bucket: aws.String("ecomm-order-items"), // Required Key: aws.String(key), // Required } resp, err := svc.GetObject(params) if err != nil { // Print the error, cast err to awserr.Error to get the Code and // Message from an error. glog.Fatal(err.Error()) } // Pretty-print the response data. fmt.Print(resp) size := int(*resp.ContentLength) fmt.Println("size is ", size) buffer := make([]byte, size) defer resp.Body.Close() var bbuffer bytes.Buffer for true { num, rerr := resp.Body.Read(buffer) if num > 0 { //fmt.Println("times ",count) bbuffer.Write(buffer[:num]) //bbuffer.WriteString(string(buffer[:num])) } else if rerr == io.EOF || rerr != nil { break } } return bbuffer.String() }
func pull(s3serv *s3.S3, c config, p string) { if list, e := s3serv.ListObjects(&s3.ListObjectsInput{Bucket: &c.Bucket}); e == nil { for _, l := range list.Contents { log.Println(*l.Key) if obj, e := s3serv.GetObject(&s3.GetObjectInput{Bucket: &c.Bucket, Key: l.Key}); e == nil { os.MkdirAll(path.Dir(p+*l.Key), 0700) if f, e := os.OpenFile(p+*l.Key, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666); e == nil { defer f.Close() defer obj.Body.Close() counter := &CountReader{input: obj.Body, co: c} counter.Init() io.Copy(f, counter) log.Println("written", *l.Key) } else { log.Println(e) } } else { log.Println(e) } } } else { log.Fatal(e) } }
func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error { req := &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), } if version != "" { req.VersionId = aws.String(version) } resp, err := client.GetObject(req) if err != nil { return err } // Create all the parent directories if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { return err } f, err := os.Create(dst) if err != nil { return err } defer f.Close() _, err = io.Copy(f, resp.Body) return err }
// GetS3Object returns the object output for the given object key func GetS3Object(client *s3.S3, bucket, objKey string) (*s3.GetObjectOutput, error) { logger.Debug("GetS3Object: ", bucket, "-", objKey) params := &s3.GetObjectInput{ Bucket: aws.String(bucket), // Required Key: aws.String(objKey), // Required // IfMatch: aws.String("IfMatch"), // IfModifiedSince: aws.Time(time.Now()), // IfNoneMatch: aws.String("IfNoneMatch"), // IfUnmodifiedSince: aws.Time(time.Now()), // Range: aws.String("Range"), // RequestPayer: aws.String("RequestPayer"), // ResponseCacheControl: aws.String("ResponseCacheControl"), // ResponseContentDisposition: aws.String("ResponseContentDisposition"), // ResponseContentEncoding: aws.String("ResponseContentEncoding"), // ResponseContentLanguage: aws.String("ResponseContentLanguage"), // ResponseContentType: aws.String("ResponseContentType"), // ResponseExpires: aws.Time(time.Now()), // SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), // SSECustomerKey: aws.String("SSECustomerKey"), // SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), // VersionId: aws.String("ObjectVersionId"), } return client.GetObject(params) }
func getObject(client *s3.S3, bucket string, key *string) (output *s3.GetObjectOutput, err error) { request := &s3.GetObjectInput{ Bucket: &bucket, Key: key, } return client.GetObject(request) }
func get_object(svc *s3.S3, bucket *string, key *string) (io.ReadCloser, error) { params := &s3.GetObjectInput{ Bucket: bucket, Key: key, } resp, err := svc.GetObject(params) if err != nil { return nil, err } return resp.Body, nil }
func getBlob(c *s3.S3, b, k string) ([]byte, error) { resp, err := c.GetObject(&s3.GetObjectInput{ Bucket: aws.String(b), Key: aws.String(k), }) if err != nil { return nil, err } blob := make([]byte, *resp.ContentLength) _, err = resp.Body.Read(blob) return blob, nil }
func get(service *s3.S3, bucketName string, key, path string) { obj, err := service.GetObject(&s3.GetObjectInput{ Bucket: stringPtr(bucketName), Key: stringPtr(key), }) if err != nil { panic(err) } defer obj.Body.Close() f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0755) if err != nil { panic(err) } defer f.Close() _, err = io.Copy(f, obj.Body) if err != nil { panic(err) } }
// Tries to download the file from S3. This uses GetObject which // uses a single HTTP stream, rather than an s3Manager.Downloader, // which uses multiple streams. We generally have to calculate // both an md5 and a sha256 checksum on download, and we're choosing // to write the file and do the checksums all in one pass. The // s3Manager.Downloader's multiple concurrent connections produce // faster downloads, but requires a WrterAt interface, which the // hashing algorithms don't provide. When we're working with // multi-gigabyte files, we really don't want to have to read them // again to produce the checksums. func (client *S3Download) tryDownload(service *s3.S3, params *s3.GetObjectInput) error { resp, err := service.GetObject(params) if err != nil { return err } defer resp.Body.Close() client.Response = resp // Create the download directory and open a file for writing. writers := make([]io.Writer, 0) if client.LocalPath == os.DevNull { writers = append(writers, ioutil.Discard) } else { err = os.MkdirAll(filepath.Dir(client.LocalPath), 0755) if err != nil { return err } outputFile, err := os.Create(client.LocalPath) if err != nil { return err } writers = append(writers, outputFile) defer outputFile.Close() } // Create a writer to write the contents to the file, // and optionally to pass the bitstream through the // md5 and sha256 algorithms while we're at it. var multiWriter io.Writer var md5Hash hash.Hash var sha256Hash hash.Hash if client.CalculateMd5 { md5Hash = md5.New() writers = append(writers, md5Hash) } if client.CalculateSha256 { sha256Hash = sha256.New() writers = append(writers, sha256Hash) } multiWriter = io.MultiWriter(writers...) // Copy the file, with several tries. On larger files, // we often get a "connection reset by peer" error. // Better to retry a few times now than throw this // back into the work queue. for attemptNumber := 0; attemptNumber < 5; attemptNumber++ { client.BytesCopied, err = io.Copy(multiWriter, resp.Body) if err == nil { break } } if err != nil { return err } // Set the checksums, if needed... if client.CalculateMd5 { client.Md5Digest = fmt.Sprintf("%x", md5Hash.Sum(nil)) } if client.CalculateSha256 { client.Sha256Digest = fmt.Sprintf("%x", sha256Hash.Sum(nil)) } // No errors. return nil }