func (c *s3Client) listIncompleteInRoutine(contentCh chan client.ContentOnChannel) { defer close(contentCh) b, o := c.url2BucketAndObject() switch { case b == "" && o == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { contentCh <- client.ContentOnChannel{ Content: nil, Err: probe.NewError(bucket.Err), } return } content := new(client.Content) content.Name = bucket.Stat.Name content.Size = 0 content.Time = bucket.Stat.CreationDate content.Type = os.ModeDir contentCh <- client.ContentOnChannel{ Content: content, Err: nil, } } default: for object := range c.api.ListIncompleteUploads(b, o, false) { if object.Err != nil { contentCh <- client.ContentOnChannel{ Content: nil, Err: probe.NewError(object.Err), } return } content := new(client.Content) normalizedPrefix := strings.TrimSuffix(o, string(c.hostURL.Separator)) + string(c.hostURL.Separator) normalizedKey := object.Stat.Key if normalizedPrefix != object.Stat.Key && strings.HasPrefix(object.Stat.Key, normalizedPrefix) { normalizedKey = strings.TrimPrefix(object.Stat.Key, normalizedPrefix) } content.Name = normalizedKey switch { case strings.HasSuffix(object.Stat.Key, string(c.hostURL.Separator)): content.Time = time.Now() content.Type = os.ModeDir default: content.Size = object.Stat.Size content.Time = object.Stat.Initiated content.Type = os.ModeTemporary } contentCh <- client.ContentOnChannel{ Content: content, Err: nil, } } } }
func (c *s3Client) listIncompleteRecursiveInRoutine(contentCh chan *client.Content) { defer close(contentCh) // get bucket and object from URL. b, o := c.url2BucketAndObject() switch { case b == "" && o == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(bucket.Err), } return } for object := range c.api.ListIncompleteUploads(bucket.Name, o, true) { if object.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(object.Err), } return } content := new(client.Content) url := *c.hostURL url.Path = filepath.Join(url.Path, bucket.Name, object.Key) content.URL = url content.Size = object.Size content.Time = object.Initiated content.Type = os.ModeTemporary contentCh <- content } } default: for object := range c.api.ListIncompleteUploads(b, o, true) { if object.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(object.Err), } return } url := *c.hostURL // Join bucket and incoming object key. url.Path = filepath.Join(string(url.Separator), b, object.Key) if c.virtualStyle { url.Path = filepath.Join(string(url.Separator), object.Key) } content := new(client.Content) content.URL = url content.Size = object.Size content.Time = object.Initiated content.Type = os.ModeTemporary contentCh <- content } } }
// Stat - send a 'HEAD' on a bucket or object to fetch its metadata. func (c *s3Client) Stat() (*client.Content, *probe.Error) { c.mu.Lock() objectMetadata := new(client.Content) bucket, object := c.url2BucketAndObject() switch { // valid case for 'ls -r s3/' case bucket == "" && object == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { c.mu.Unlock() return nil, probe.NewError(bucket.Err) } } c.mu.Unlock() return &client.Content{URL: *c.hostURL, Type: os.ModeDir}, nil } if object != "" { metadata, e := c.api.StatObject(bucket, object) if e != nil { c.mu.Unlock() errResponse := minio.ToErrorResponse(e) if errResponse != nil { if errResponse.Code == "NoSuchKey" { // Append "/" to the object name proactively and see if the Listing // produces an output. If yes, then we treat it as a directory. prefixName := object // Trim any trailing separators and add it. prefixName = strings.TrimSuffix(prefixName, string(c.hostURL.Separator)) + string(c.hostURL.Separator) for objectStat := range c.api.ListObjects(bucket, prefixName, false) { if objectStat.Err != nil { return nil, probe.NewError(objectStat.Err) } content := client.Content{} content.URL = *c.hostURL content.Type = os.ModeDir return &content, nil } return nil, probe.NewError(client.PathNotFound{Path: c.hostURL.Path}) } } return nil, probe.NewError(e) } objectMetadata.URL = *c.hostURL objectMetadata.Time = metadata.LastModified objectMetadata.Size = metadata.Size objectMetadata.Type = os.FileMode(0664) c.mu.Unlock() return objectMetadata, nil } e := c.api.BucketExists(bucket) if e != nil { c.mu.Unlock() return nil, probe.NewError(e) } bucketMetadata := new(client.Content) bucketMetadata.URL = *c.hostURL bucketMetadata.Type = os.ModeDir c.mu.Unlock() return bucketMetadata, nil }
// Stat - send a 'HEAD' on a bucket or object to get its metadata func (c *s3Client) Stat() (*client.Content, *probe.Error) { c.mu.Lock() objectMetadata := new(client.Content) bucket, object := c.url2BucketAndObject() switch { // valid case for s3/... case bucket == "" && object == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { c.mu.Unlock() return nil, probe.NewError(bucket.Err) } } c.mu.Unlock() return &client.Content{URL: *c.hostURL, Type: os.ModeDir}, nil } if object != "" { metadata, err := c.api.StatObject(bucket, object) if err != nil { c.mu.Unlock() errResponse := minio.ToErrorResponse(err) if errResponse != nil { if errResponse.Code == "NoSuchKey" { for content := range c.List(false, false) { if content.Err != nil { return nil, content.Err.Trace() } content.Content.URL = *c.hostURL content.Content.Type = os.ModeDir content.Content.Size = 0 return content.Content, nil } } } return nil, probe.NewError(err) } objectMetadata.URL = *c.hostURL objectMetadata.Time = metadata.LastModified objectMetadata.Size = metadata.Size objectMetadata.Type = os.FileMode(0664) c.mu.Unlock() return objectMetadata, nil } err := c.api.BucketExists(bucket) if err != nil { c.mu.Unlock() return nil, probe.NewError(err) } bucketMetadata := new(client.Content) bucketMetadata.URL = *c.hostURL bucketMetadata.Type = os.ModeDir c.mu.Unlock() return bucketMetadata, nil }
// Stat - send a 'HEAD' on a bucket or object to get its metadata func (c *s3Client) Stat() (*client.Content, error) { objectMetadata := new(client.Content) bucket, object := c.url2BucketAndObject() switch { // valid case for s3:... case bucket == "" && object == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { return nil, iodine.New(bucket.Err, nil) } return &client.Content{Type: os.ModeDir}, nil } } if object != "" { metadata, err := c.api.StatObject(bucket, object) if err != nil { errResponse := minio.ToErrorResponse(err) if errResponse != nil { if errResponse.Code == "NoSuchKey" { for content := range c.List(false) { if content.Err != nil { return nil, iodine.New(err, nil) } content.Content.Type = os.ModeDir content.Content.Name = object content.Content.Size = 0 return content.Content, nil } } } return nil, iodine.New(err, nil) } objectMetadata.Name = metadata.Key objectMetadata.Time = metadata.LastModified objectMetadata.Size = metadata.Size objectMetadata.Type = os.FileMode(0664) return objectMetadata, nil } err := c.api.BucketExists(bucket) if err != nil { return nil, iodine.New(err, nil) } bucketMetadata := new(client.Content) bucketMetadata.Name = bucket bucketMetadata.Type = os.ModeDir return bucketMetadata, nil }
func (c *s3Client) listRecursiveInRoutine(contentCh chan client.ContentOnChannel) { defer close(contentCh) b, o := c.url2BucketAndObject() switch { case b == "" && o == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { contentCh <- client.ContentOnChannel{ Content: nil, Err: probe.NewError(bucket.Err), } return } for object := range c.api.ListObjects(bucket.Stat.Name, o, true) { if object.Err != nil { contentCh <- client.ContentOnChannel{ Content: nil, Err: probe.NewError(object.Err), } return } content := new(client.Content) content.Name = filepath.Join(bucket.Stat.Name, object.Stat.Key) content.Size = object.Stat.Size content.Time = object.Stat.LastModified content.Type = os.FileMode(0664) contentCh <- client.ContentOnChannel{ Content: content, Err: nil, } } } default: for object := range c.api.ListObjects(b, o, true) { if object.Err != nil { contentCh <- client.ContentOnChannel{ Content: nil, Err: probe.NewError(object.Err), } return } content := new(client.Content) normalizedKey := object.Stat.Key switch { case o == "": // if no prefix provided and also URL is not delimited then we add bucket back into object name if strings.LastIndex(c.hostURL.Path, string(c.hostURL.Separator)) == 0 { if c.hostURL.String()[:strings.LastIndex(c.hostURL.String(), string(c.hostURL.Separator))+1] != b { normalizedKey = filepath.Join(b, object.Stat.Key) } } default: if strings.HasSuffix(o, string(c.hostURL.Separator)) { normalizedKey = strings.TrimPrefix(object.Stat.Key, o) } } content.Name = normalizedKey content.Size = object.Stat.Size content.Time = object.Stat.LastModified content.Type = os.FileMode(0664) contentCh <- client.ContentOnChannel{ Content: content, Err: nil, } } } }
func (c *s3Client) listInRoutine(contentCh chan client.ContentOnChannel) { defer close(contentCh) b, o := c.url2BucketAndObject() switch { case b == "" && o == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { contentCh <- client.ContentOnChannel{ Content: nil, Err: bucket.Err, } return } content := new(client.Content) content.Name = bucket.Stat.Name content.Size = 0 content.Time = bucket.Stat.CreationDate content.Type = os.ModeDir contentCh <- client.ContentOnChannel{ Content: content, Err: nil, } } default: metadata, err := c.api.StatObject(b, o) switch err.(type) { case nil: content := new(client.Content) content.Name = metadata.Key content.Time = metadata.LastModified content.Size = metadata.Size content.Type = os.FileMode(0664) contentCh <- client.ContentOnChannel{ Content: content, Err: nil, } default: for object := range c.api.ListObjects(b, o, false) { if object.Err != nil { contentCh <- client.ContentOnChannel{ Content: nil, Err: object.Err, } return } content := new(client.Content) normalizedPrefix := strings.TrimSuffix(o, string(c.hostURL.Separator)) + string(c.hostURL.Separator) normalizedKey := object.Stat.Key if normalizedPrefix != object.Stat.Key && strings.HasPrefix(object.Stat.Key, normalizedPrefix) { normalizedKey = strings.TrimPrefix(object.Stat.Key, normalizedPrefix) } content.Name = normalizedKey switch { case strings.HasSuffix(object.Stat.Key, string(c.hostURL.Separator)): content.Time = time.Now() content.Type = os.ModeDir default: content.Size = object.Stat.Size content.Time = object.Stat.LastModified content.Type = os.FileMode(0664) } contentCh <- client.ContentOnChannel{ Content: content, Err: nil, } } } } }
func (c *s3Client) listRecursiveInRoutine(contentCh chan *client.Content) { defer close(contentCh) // get bucket and object from URL. b, o := c.url2BucketAndObject() switch { case b == "" && o == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(bucket.Err), } return } bucketURL := *c.hostURL bucketURL.Path = filepath.Join(bucketURL.Path, bucket.Name) contentCh <- &client.Content{ URL: bucketURL, Type: os.ModeDir, Time: bucket.CreationDate, } for object := range c.api.ListObjects(bucket.Name, o, true) { if object.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(object.Err), } continue } content := new(client.Content) objectURL := *c.hostURL objectURL.Path = filepath.Join(objectURL.Path, bucket.Name, object.Key) content.URL = objectURL content.Size = object.Size content.Time = object.LastModified content.Type = os.FileMode(0664) contentCh <- content } } default: for object := range c.api.ListObjects(b, o, true) { if object.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(object.Err), } continue } content := new(client.Content) url := *c.hostURL // Join bucket and incoming object key. url.Path = filepath.Join(string(url.Separator), b, object.Key) // If virtualStyle replace the url.Path back. if c.virtualStyle { url.Path = filepath.Join(string(url.Separator), object.Key) } content.URL = url content.Size = object.Size content.Time = object.LastModified content.Type = os.FileMode(0664) contentCh <- content } } }
func (c *s3Client) listInRoutine(contentCh chan *client.Content) { defer close(contentCh) // get bucket and object from URL. b, o := c.url2BucketAndObject() switch { case b == "" && o == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(bucket.Err), } return } url := *c.hostURL url.Path = filepath.Join(url.Path, bucket.Name) content := new(client.Content) content.URL = url content.Size = 0 content.Time = bucket.CreationDate content.Type = os.ModeDir contentCh <- content } case b != "" && !strings.HasSuffix(c.hostURL.Path, string(c.hostURL.Separator)) && o == "": e := c.api.BucketExists(b) if e != nil { contentCh <- &client.Content{ Err: probe.NewError(e), } } content := new(client.Content) content.URL = *c.hostURL content.Type = os.ModeDir contentCh <- content default: metadata, e := c.api.StatObject(b, o) switch e.(type) { case nil: content := new(client.Content) content.URL = *c.hostURL content.Time = metadata.LastModified content.Size = metadata.Size content.Type = os.FileMode(0664) contentCh <- content default: for object := range c.api.ListObjects(b, o, false) { if object.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(object.Err), } return } content := new(client.Content) url := *c.hostURL // Join bucket and incoming object key. url.Path = filepath.Join(string(url.Separator), b, object.Key) if c.virtualStyle { url.Path = filepath.Join(string(url.Separator), object.Key) } switch { case strings.HasSuffix(object.Key, string(c.hostURL.Separator)): // We need to keep the trailing Separator, do not use filepath.Join(). content.URL = url content.Time = time.Now() content.Type = os.ModeDir default: content.URL = url content.Size = object.Size content.Time = object.LastModified content.Type = os.FileMode(0664) } contentCh <- content } } } }
func (c *s3Client) listIncompleteInRoutine(contentCh chan *client.Content) { defer close(contentCh) // get bucket and object from URL. b, o := c.url2BucketAndObject() switch { case b == "" && o == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(bucket.Err), } return } for object := range c.api.ListIncompleteUploads(bucket.Name, o, false) { if object.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(object.Err), } return } content := new(client.Content) url := *c.hostURL // Join bucket with - incoming object key. url.Path = filepath.Join(string(url.Separator), bucket.Name, object.Key) if c.virtualStyle { url.Path = filepath.Join(string(url.Separator), object.Key) } switch { case strings.HasSuffix(object.Key, string(c.hostURL.Separator)): // We need to keep the trailing Separator, do not use filepath.Join(). content.URL = url content.Time = time.Now() content.Type = os.ModeDir default: content.URL = url content.Size = object.Size content.Time = object.Initiated content.Type = os.ModeTemporary } contentCh <- content } } default: for object := range c.api.ListIncompleteUploads(b, o, false) { if object.Err != nil { contentCh <- &client.Content{ Err: probe.NewError(object.Err), } return } content := new(client.Content) url := *c.hostURL // Join bucket with - incoming object key. url.Path = filepath.Join(string(url.Separator), b, object.Key) if c.virtualStyle { url.Path = filepath.Join(string(url.Separator), object.Key) } switch { case strings.HasSuffix(object.Key, string(c.hostURL.Separator)): // We need to keep the trailing Separator, do not use filepath.Join(). content.URL = url content.Time = time.Now() content.Type = os.ModeDir default: content.URL = url content.Size = object.Size content.Time = object.Initiated content.Type = os.ModeTemporary } contentCh <- content } } }
func (c *s3Client) listRecursiveInRoutine(contentCh chan client.ContentOnChannel) { defer close(contentCh) // get bucket and object from URL b, o := c.url2BucketAndObject() switch { case b == "" && o == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { contentCh <- client.ContentOnChannel{ Content: nil, Err: probe.NewError(bucket.Err), } return } for object := range c.api.ListObjects(bucket.Stat.Name, o, true) { if object.Err != nil { contentCh <- client.ContentOnChannel{ Content: nil, Err: probe.NewError(object.Err), } return } content := new(client.Content) url := *c.hostURL url.Path = filepath.Join(url.Path, bucket.Stat.Name, object.Stat.Key) content.URL = url content.Size = object.Stat.Size content.Time = object.Stat.LastModified content.Type = os.FileMode(0664) contentCh <- client.ContentOnChannel{ Content: content, Err: nil, } } } default: for object := range c.api.ListObjects(b, o, true) { if object.Err != nil { contentCh <- client.ContentOnChannel{ Content: nil, Err: probe.NewError(object.Err), } return } content := new(client.Content) url := *c.hostURL // join bucket and incoming object key. url.Path = filepath.Join(string(url.Separator), b, object.Stat.Key) // if virtualStyle replace the url.Path back. if c.virtualStyle { url.Path = filepath.Join(string(url.Separator), object.Stat.Key) } content.URL = url content.Size = object.Stat.Size content.Time = object.Stat.LastModified content.Type = os.FileMode(0664) contentCh <- client.ContentOnChannel{ Content: content, Err: nil, } } } }