// GetObjectMetadata - get object metadata from cache func (xl API) GetObjectMetadata(bucket, key string) (ObjectMetadata, *probe.Error) { xl.lock.Lock() defer xl.lock.Unlock() // check if bucket exists if !IsValidBucket(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key}) } if !xl.storedBuckets.Exists(bucket) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) objectKey := bucket + "/" + key if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true { return objMetadata, nil } if len(xl.config.NodeDiskMap) > 0 { objMetadata, err := xl.getObjectMetadata(bucket, key) if err != nil { return ObjectMetadata{}, err.Trace() } // update storedBucket.objectMetadata[objectKey] = objMetadata xl.storedBuckets.Set(bucket, storedBucket) return objMetadata, nil } return ObjectMetadata{}, probe.NewError(ObjectNotFound{Object: key}) }
func (r rpcSignature) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) { // set new calulated payload r.Request.Header.Set("X-Minio-Content-Sha256", hashedPayload) // Add date if not present throw error var date string if date = r.Request.Header.Get(http.CanonicalHeaderKey("x-minio-date")); date == "" { if date = r.Request.Header.Get("Date"); date == "" { return false, probe.NewError(errMissingDateHeader) } } t, err := time.Parse(iso8601Format, date) if err != nil { return false, probe.NewError(err) } canonicalRequest := r.getCanonicalRequest() stringToSign := r.getStringToSign(canonicalRequest, t) signingKey := r.getSigningKey(t) newSignature := r.getSignature(signingKey, stringToSign) if newSignature != r.Signature { return false, nil } return true, nil }
// GetBucketMetadata - get bucket metadata func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) { fs.lock.Lock() defer fs.lock.Unlock() if !IsValidBucketName(bucket) { return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } // get bucket path bucketDir := filepath.Join(fs.path, bucket) fi, err := os.Stat(bucketDir) if err != nil { // check if bucket exists if os.IsNotExist(err) { return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return BucketMetadata{}, probe.NewError(err) } bucketMetadata, ok := fs.buckets.Metadata[bucket] if !ok { bucketMetadata = &BucketMetadata{} bucketMetadata.Name = fi.Name() bucketMetadata.Created = fi.ModTime() bucketMetadata.ACL = BucketACL("private") } return *bucketMetadata, nil }
// AbortMultipartUpload - abort an incomplete multipart session func (donut API) AbortMultipartUpload(bucket, key, uploadID string) *probe.Error { donut.lock.Lock() defer donut.lock.Unlock() if !IsValidBucket(bucket) { return probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return probe.NewError(ObjectNameInvalid{Object: key}) } // TODO: multipart support for donut is broken, since we haven't finalized the format in which // it can be stored, disabling this for now until we get the underlying layout stable. // // if len(donut.config.NodeDiskMap) > 0 { // return donut.abortMultipartUpload(bucket, key, uploadID) // } if !donut.storedBuckets.Exists(bucket) { return probe.NewError(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) if storedBucket.multiPartSession[key].UploadID != uploadID { return probe.NewError(InvalidUploadID{UploadID: uploadID}) } donut.cleanupMultipartSession(bucket, key, uploadID) return nil }
// initPostPresignedPolicyV4 initializing post policy signature verification func initPostPresignedPolicyV4(formValues map[string]string) (*signv4.Signature, *probe.Error) { credentialElements := strings.Split(strings.TrimSpace(formValues["X-Amz-Credential"]), "/") if len(credentialElements) != 5 { return nil, probe.NewError(errCredentialTagMalformed) } accessKeyID := credentialElements[0] if !IsValidAccessKey(accessKeyID) { return nil, probe.NewError(errAccessKeyIDInvalid) } authConfig, perr := LoadConfig() if perr != nil { return nil, perr.Trace() } for _, user := range authConfig.Users { if user.AccessKeyID == accessKeyID { signature := &signv4.Signature{ AccessKeyID: user.AccessKeyID, SecretAccessKey: user.SecretAccessKey, Signature: formValues["X-Amz-Signature"], PresignedPolicy: formValues["Policy"], } return signature, nil } } return nil, probe.NewError(errAccessKeyIDInvalid) }
func deleteObjectPath(basePath, deletePath, bucket, object string) *probe.Error { if basePath == deletePath { return nil } fi, err := os.Stat(deletePath) if err != nil { if os.IsNotExist(err) { return probe.NewError(ObjectNotFound{Bucket: bucket, Object: object}) } return probe.NewError(err) } if fi.IsDir() { empty, err := isDirEmpty(deletePath) if err != nil { return err.Trace() } if !empty { return nil } } if err := os.Remove(deletePath); err != nil { return probe.NewError(err) } if err := deleteObjectPath(basePath, filepath.Dir(deletePath), bucket, object); err != nil { return err.Trace() } return nil }
// GetBucketMetadata - get bucket metadata. func (fs Filesystem) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) { if !IsValidBucketName(bucket) { return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } bucket = fs.denormalizeBucket(bucket) // Get bucket path. bucketDir := filepath.Join(fs.path, bucket) fi, e := os.Stat(bucketDir) if e != nil { // Check if bucket exists. if os.IsNotExist(e) { return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return BucketMetadata{}, probe.NewError(e) } fs.rwLock.RLock() bucketMetadata, ok := fs.buckets.Metadata[bucket] fs.rwLock.RUnlock() // If metadata value is not found, get it from disk. if !ok { bucketMetadata = &BucketMetadata{} bucketMetadata.Name = fi.Name() bucketMetadata.Created = fi.ModTime() bucketMetadata.ACL = BucketACL("private") } return *bucketMetadata, nil }
// initPresignedSignatureV4 initializing presigned signature verification func initPresignedSignatureV4(req *http.Request) (*fs.Signature, *probe.Error) { credentialElements := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-Credential")), "/") if len(credentialElements) != 5 { return nil, probe.NewError(errCredentialTagMalformed) } accessKeyID := credentialElements[0] if !isValidAccessKey(accessKeyID) { return nil, probe.NewError(errAccessKeyIDInvalid) } config, err := loadConfigV2() if err != nil { return nil, err.Trace() } region := credentialElements[2] signedHeaders := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-SignedHeaders")), ";") signature := strings.TrimSpace(req.URL.Query().Get("X-Amz-Signature")) if config.Credentials.AccessKeyID == accessKeyID { signature := &fs.Signature{ AccessKeyID: config.Credentials.AccessKeyID, SecretAccessKey: config.Credentials.SecretAccessKey, Region: region, Signature: signature, SignedHeaders: signedHeaders, Presigned: true, Request: req, } return signature, nil } return nil, probe.NewError(errAccessKeyIDInvalid) }
// Delete removes all the session files. func (s *sessionV2) Delete() *probe.Error { s.mutex.Lock() defer s.mutex.Unlock() if s.DataFP != nil { name := s.DataFP.Name() // close file pro-actively before deleting // ignore any error, it could be possibly that // the file is closed already s.DataFP.Close() err := os.Remove(name) if err != nil { return probe.NewError(err) } } sessionFile, err := getSessionFile(s.SessionID) if err != nil { return err.Trace(s.SessionID) } if err := os.Remove(sessionFile); err != nil { return probe.NewError(err) } return nil }
// New - instantiate new disk func New(diskPath string) (Disk, *probe.Error) { if diskPath == "" { return Disk{}, probe.NewError(InvalidArgument{}) } st, err := os.Stat(diskPath) if err != nil { return Disk{}, probe.NewError(err) } if !st.IsDir() { return Disk{}, probe.NewError(syscall.ENOTDIR) } s := syscall.Statfs_t{} err = syscall.Statfs(diskPath, &s) if err != nil { return Disk{}, probe.NewError(err) } disk := Disk{ lock: &sync.Mutex{}, path: diskPath, fsInfo: make(map[string]string), } if fsType := getFSType(s.Type); fsType != "UNKNOWN" { disk.fsInfo["FSType"] = fsType disk.fsInfo["MountPoint"] = disk.path return disk, nil } return Disk{}, probe.NewError(UnsupportedFilesystem{Type: strconv.FormatInt(int64(s.Type), 10)}) }
// initPostPresignedPolicyV4 initializing post policy signature verification func initPostPresignedPolicyV4(formValues map[string]string) (*fs.Signature, *probe.Error) { credentialElements := strings.Split(strings.TrimSpace(formValues["X-Amz-Credential"]), "/") if len(credentialElements) != 5 { return nil, probe.NewError(errCredentialTagMalformed) } accessKeyID := credentialElements[0] if !isValidAccessKey(accessKeyID) { return nil, probe.NewError(errAccessKeyIDInvalid) } config, perr := loadConfigV2() if perr != nil { return nil, perr.Trace() } region := credentialElements[2] if config.Credentials.AccessKeyID == accessKeyID { signature := &fs.Signature{ AccessKeyID: config.Credentials.AccessKeyID, SecretAccessKey: config.Credentials.SecretAccessKey, Region: region, Signature: formValues["X-Amz-Signature"], PresignedPolicy: formValues["Policy"], } return signature, nil } return nil, probe.NewError(errAccessKeyIDInvalid) }
// GetObjectMetadata - HEAD object func (fs Filesystem) GetObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) { fs.lock.Lock() defer fs.lock.Unlock() if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(object) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: bucket}) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, e := os.Stat(bucketPath); e != nil { if os.IsNotExist(e) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return ObjectMetadata{}, probe.NewError(e) } metadata, err := getMetadata(fs.path, bucket, object) if err != nil { return ObjectMetadata{}, err.Trace(bucket, object) } if metadata.Mode.IsDir() { return ObjectMetadata{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object}) } return metadata, nil }
func configureWebServer(conf cloudServerConfig) (*http.Server, *probe.Error) { // Split the api address into host and port. host, port, e := net.SplitHostPort(conf.Address) if e != nil { return nil, probe.NewError(e) } webPort, e := strconv.Atoi(port) if e != nil { return nil, probe.NewError(e) } // Always choose the next port, based on the API address port. webPort = webPort + 1 webAddress := net.JoinHostPort(host, strconv.Itoa(webPort)) // Minio server config webServer := &http.Server{ Addr: webAddress, Handler: getWebAPIHandler(getNewWebAPI(conf)), MaxHeaderBytes: 1 << 20, } if conf.TLS { var err error webServer.TLSConfig = &tls.Config{} webServer.TLSConfig.Certificates = make([]tls.Certificate, 1) webServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(conf.CertFile, conf.KeyFile) if err != nil { return nil, probe.NewError(err) } } return webServer, nil }
func printServerMsg(serverConf *http.Server) { host, port, e := net.SplitHostPort(serverConf.Addr) fatalIf(probe.NewError(e), "Unable to split host port.", nil) var hosts []string switch { case host != "": hosts = append(hosts, host) default: addrs, e := net.InterfaceAddrs() fatalIf(probe.NewError(e), "Unable to get interface address.", nil) for _, addr := range addrs { if addr.Network() == "ip+net" { host := strings.Split(addr.String(), "/")[0] if ip := net.ParseIP(host); ip.To4() != nil { hosts = append(hosts, host) } } } } for _, host := range hosts { if serverConf.TLSConfig != nil { Printf(" https://%s:%s\n", host, port) } else { Printf(" http://%s:%s\n", host, port) } } }
// Get download an full or part object from bucket // getobject returns a reader, length and nil for no errors // with errors getobject will return nil reader, length and typed errors func (f *fsClient) Get(offset, length int64) (io.ReadCloser, int64, *probe.Error) { if offset < 0 || length < 0 { return nil, 0, probe.NewError(client.InvalidRange{Offset: offset}) } tmppath := f.PathURL.Path // Golang strips trailing / if you clean(..) or // EvalSymlinks(..). Adding '.' prevents it from doing so. if strings.HasSuffix(tmppath, string(f.PathURL.Separator)) { tmppath = tmppath + "." } // Resolve symlinks _, err := filepath.EvalSymlinks(tmppath) if os.IsNotExist(err) { return nil, length, probe.NewError(err) } if err != nil { return nil, length, probe.NewError(err) } if offset == 0 && length == 0 { return f.get() } body, err := os.Open(f.PathURL.Path) if err != nil { return nil, length, probe.NewError(err) } _, err = io.CopyN(ioutil.Discard, body, int64(offset)) if err != nil { return nil, length, probe.NewError(err) } return body, length, nil }
// Load - loads json config from filename for the a given struct data func Load(filename string, data interface{}) (Config, *probe.Error) { _, err := os.Stat(filename) if err != nil { return nil, probe.NewError(err) } fileData, err := ioutil.ReadFile(filename) if err != nil { return nil, probe.NewError(err) } if runtime.GOOS == "windows" { fileData = []byte(strings.Replace(string(fileData), "\r\n", "\n", -1)) } err = json.Unmarshal(fileData, &data) if err != nil { switch err := err.(type) { case *json.SyntaxError: return nil, probe.NewError(FormatJSONSyntaxError(bytes.NewReader(fileData), err)) default: return nil, probe.NewError(err) } } config, perr := New(data) if perr != nil { return nil, perr.Trace() } return config, nil }
func getMetadata(rootPath, bucket, object string) (ObjectMetadata, *probe.Error) { // Do not use filepath.Join() since filepath.Join strips off any object names with '/', use them as is // in a static manner so that we can send a proper 'ObjectNotFound' reply back upon os.Stat() var objectPath string // For windows use its special os.PathSeparator == "\\" if runtime.GOOS == "windows" { objectPath = rootPath + string(os.PathSeparator) + bucket + string(os.PathSeparator) + object } else { objectPath = rootPath + string(os.PathSeparator) + bucket + string(os.PathSeparator) + object } stat, err := os.Stat(objectPath) if err != nil { if os.IsNotExist(err) { return ObjectMetadata{}, probe.NewError(ObjectNotFound{Bucket: bucket, Object: object}) } return ObjectMetadata{}, probe.NewError(err) } contentType := "application/octet-stream" if runtime.GOOS == "windows" { object = sanitizeWindowsPath(object) } metadata := ObjectMetadata{ Bucket: bucket, Object: object, Created: stat.ModTime(), Size: stat.Size(), ContentType: contentType, Mode: stat.Mode(), } return metadata, nil }
// Save writes config data in JSON format to a file. func (d config) Save(filename string) *probe.Error { d.lock.Lock() defer d.lock.Unlock() jsonData, err := json.MarshalIndent(d.data, "", "\t") if err != nil { return probe.NewError(err) } if runtime.GOOS == "windows" { jsonData = []byte(strings.Replace(string(jsonData), "\n", "\r\n", -1)) } atomicFile, err := atomic.FileCreate(filename) if err != nil { return probe.NewError(err) } _, err = atomicFile.Write(jsonData) if err != nil { return probe.NewError(err) } err = atomicFile.Close() if err != nil { return probe.NewError(err) } return nil }
func (sl *sortedList) Match(source *client.Content) (bool, *probe.Error) { if len(sl.current.Name) == 0 { // for the first time read if err := sl.dec.Decode(&sl.current); err != nil { if err != io.EOF { return false, probe.NewError(err) } return false, nil } } for { compare := strings.Compare(source.Name, sl.current.Name) if compare == 0 { if source.Type.IsRegular() && sl.current.Type.IsRegular() && source.Size == sl.current.Size { return true, nil } return false, nil } if compare < 0 { return false, nil } // assign zero values to fields because if s.current's previous decode had non zero value // fields it will not be over written if this loop's decode does not contain those fields sl.current = client.Content{} if err := sl.dec.Decode(&sl.current); err != nil { return false, probe.NewError(err) } } }
// ShareUpload - get data for presigned post http form upload. func (c *s3Client) ShareUpload(isRecursive bool, expires time.Duration, contentType string) (map[string]string, *probe.Error) { bucket, object := c.url2BucketAndObject() p := minio.NewPostPolicy() if e := p.SetExpires(time.Now().UTC().Add(expires)); e != nil { return nil, probe.NewError(e) } if strings.TrimSpace(contentType) != "" || contentType != "" { // No need to verify for error here, since we have stripped out spaces. p.SetContentType(contentType) } if e := p.SetBucket(bucket); e != nil { return nil, probe.NewError(e) } if isRecursive { if e := p.SetKeyStartsWith(object); e != nil { return nil, probe.NewError(e) } } else { if e := p.SetKey(object); e != nil { return nil, probe.NewError(e) } } m, e := c.api.PresignedPostPolicy(p) return m, probe.NewError(e) }
func (donut API) mergeMultipart(parts *CompleteMultipartUpload, uploadID string, fullObjectWriter *io.PipeWriter) { for _, part := range parts.Part { recvMD5 := part.ETag object, ok := donut.multiPartObjects[uploadID].Get(part.PartNumber) if ok == false { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidPart{}))) return } calcMD5Bytes := md5.Sum(object) // complete multi part request header md5sum per part is hex encoded recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\"")) if err != nil { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidDigest{Md5: recvMD5}))) return } if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(BadDigest{}))) return } if _, err := io.Copy(fullObjectWriter, bytes.NewReader(object)); err != nil { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(err))) return } object = nil } fullObjectWriter.Close() return }
// Put - put object. func (c *s3Client) Put(data io.ReadSeeker, size int64, contentType string) *probe.Error { // md5 is purposefully ignored since AmazonS3 does not return proper md5sum // for a multipart upload and there is no need to cross verify, // invidual parts are properly verified fully in transit and also upon completion // of the multipart request. bucket, object := c.url2BucketAndObject() if contentType == "" { contentType = "application/octet-stream" } e := c.api.PutObject(bucket, object, data, size, contentType) if e != nil { errResponse := minio.ToErrorResponse(e) if errResponse != nil { if errResponse.Code == "AccessDenied" { return probe.NewError(client.PathInsufficientPermission{ Path: c.hostURL.String(), }) } if errResponse.Code == "MethodNotAllowed" { return probe.NewError(client.ObjectAlreadyExists{ Object: object, }) } if errResponse.Code == "InvalidArgument" { return probe.NewError(client.ObjectMissing{}) } } return probe.NewError(e) } return nil }
func extractHTTPFormValues(reader *multipart.Reader) (io.Reader, map[string]string, *probe.Error) { /// HTML Form values formValues := make(map[string]string) filePart := new(bytes.Buffer) var err error for err == nil { var part *multipart.Part part, err = reader.NextPart() if part != nil { if part.FileName() == "" { buffer, err := ioutil.ReadAll(part) if err != nil { return nil, nil, probe.NewError(err) } formValues[http.CanonicalHeaderKey(part.FormName())] = string(buffer) } else { _, err := io.Copy(filePart, part) if err != nil { return nil, nil, probe.NewError(err) } } } } return filePart, formValues, nil }
// Stat - send a 'HEAD' on a bucket or object to fetch its metadata. func (c *s3Client) Stat() (*client.Content, *probe.Error) { c.mu.Lock() objectMetadata := new(client.Content) bucket, object := c.url2BucketAndObject() switch { // valid case for 'ls -r s3/' case bucket == "" && object == "": for bucket := range c.api.ListBuckets() { if bucket.Err != nil { c.mu.Unlock() return nil, probe.NewError(bucket.Err) } } c.mu.Unlock() return &client.Content{URL: *c.hostURL, Type: os.ModeDir}, nil } if object != "" { metadata, e := c.api.StatObject(bucket, object) if e != nil { c.mu.Unlock() errResponse := minio.ToErrorResponse(e) if errResponse != nil { if errResponse.Code == "NoSuchKey" { // Append "/" to the object name proactively and see if the Listing // produces an output. If yes, then we treat it as a directory. prefixName := object // Trim any trailing separators and add it. prefixName = strings.TrimSuffix(prefixName, string(c.hostURL.Separator)) + string(c.hostURL.Separator) for objectStat := range c.api.ListObjects(bucket, prefixName, false) { if objectStat.Err != nil { return nil, probe.NewError(objectStat.Err) } content := client.Content{} content.URL = *c.hostURL content.Type = os.ModeDir return &content, nil } return nil, probe.NewError(client.PathNotFound{Path: c.hostURL.Path}) } } return nil, probe.NewError(e) } objectMetadata.URL = *c.hostURL objectMetadata.Time = metadata.LastModified objectMetadata.Size = metadata.Size objectMetadata.Type = os.FileMode(0664) c.mu.Unlock() return objectMetadata, nil } e := c.api.BucketExists(bucket) if e != nil { c.mu.Unlock() return nil, probe.NewError(e) } bucketMetadata := new(client.Content) bucketMetadata.URL = *c.hostURL bucketMetadata.Type = os.ModeDir c.mu.Unlock() return bucketMetadata, nil }
// initPresignedSignatureV4 initializing presigned signature verification func initPresignedSignatureV4(req *http.Request) (*signv4.Signature, *probe.Error) { credentialElements := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-Credential")), "/") if len(credentialElements) != 5 { return nil, probe.NewError(errCredentialTagMalformed) } accessKeyID := credentialElements[0] if !IsValidAccessKey(accessKeyID) { return nil, probe.NewError(errAccessKeyIDInvalid) } authConfig, err := LoadConfig() if err != nil { return nil, err.Trace() } signedHeaders := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-SignedHeaders")), ";") signature := strings.TrimSpace(req.URL.Query().Get("X-Amz-Signature")) for _, user := range authConfig.Users { if user.AccessKeyID == accessKeyID { signature := &signv4.Signature{ AccessKeyID: user.AccessKeyID, SecretAccessKey: user.SecretAccessKey, Signature: signature, SignedHeaders: signedHeaders, Presigned: true, Request: req, } return signature, nil } } return nil, probe.NewError(errAccessKeyIDInvalid) }
// getNewWebAPI instantiate a new WebAPI. func getNewWebAPI(conf cloudServerConfig) *WebAPI { // Split host port. host, port, e := net.SplitHostPort(conf.Address) fatalIf(probe.NewError(e), "Unable to parse web addess.", nil) // Default host is 'localhost', if no host present. if host == "" { host = "localhost" } // Initialize minio client for AWS Signature Version '4' inSecure := !conf.TLS // Insecure true when TLS is false. client, e := minio.NewV4(net.JoinHostPort(host, port), conf.AccessKeyID, conf.SecretAccessKey, inSecure) fatalIf(probe.NewError(e), "Unable to initialize minio client", nil) web := &WebAPI{ FSPath: conf.Path, AccessLog: conf.AccessLog, Client: client, inSecure: inSecure, apiAddress: conf.Address, accessKeyID: conf.AccessKeyID, secretAccessKey: conf.SecretAccessKey, } return web }
// fsStat - wrapper function to get file stat. func (f *fsClient) fsStat() (os.FileInfo, *probe.Error) { fpath := f.PathURL.Path // Golang strips trailing / if you clean(..) or // EvalSymlinks(..). Adding '.' prevents it from doing so. if strings.HasSuffix(fpath, string(f.PathURL.Separator)) { fpath = fpath + "." } fpath, e := filepath.EvalSymlinks(fpath) if e != nil { if os.IsPermission(e) { if runtime.GOOS == "windows" { return f.handleWindowsSymlinks(f.PathURL.Path) } return nil, probe.NewError(client.PathInsufficientPermission{Path: f.PathURL.Path}) } err := f.toClientError(e, f.PathURL.Path) return nil, err.Trace(fpath) } st, e := os.Stat(fpath) if e != nil { if os.IsPermission(e) { if runtime.GOOS == "windows" { return f.handleWindowsSymlinks(fpath) } return nil, probe.NewError(client.PathInsufficientPermission{Path: f.PathURL.Path}) } if os.IsNotExist(e) { return nil, probe.NewError(client.PathNotFound{Path: f.PathURL.Path}) } return nil, probe.NewError(e) } return st, nil }
// Put - create a new file func (f *fsClient) Put(size int64, data io.Reader) *probe.Error { objectDir, _ := filepath.Split(f.PathURL.Path) objectPath := f.PathURL.Path if objectDir != "" { if err := os.MkdirAll(objectDir, 0700); err != nil { return probe.NewError(err) } } fs, err := os.Create(objectPath) if err != nil { return probe.NewError(err) } defer fs.Close() // even if size is zero try to read from source if size > 0 { _, err = io.CopyN(fs, data, int64(size)) if err != nil { return probe.NewError(err) } } else { // size could be 0 for virtual files on certain filesystems // for example /proc, so read till EOF for such files _, err = io.Copy(fs, data) if err != nil { return probe.NewError(err) } } return nil }
// SetBucketMetadata - set bucket metadata func (fs Filesystem) SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error { fs.lock.Lock() defer fs.lock.Unlock() if !IsValidBucketName(bucket) { return probe.NewError(BucketNameInvalid{Bucket: bucket}) } acl := metadata["acl"] if !IsValidBucketACL(acl) { return probe.NewError(InvalidACL{ACL: acl}) } if strings.TrimSpace(acl) == "" { acl = "private" } bucketDir := filepath.Join(fs.path, bucket) fi, err := os.Stat(bucketDir) if err != nil { // check if bucket exists if os.IsNotExist(err) { return probe.NewError(BucketNotFound{Bucket: bucket}) } return probe.NewError(err) } bucketMetadata, ok := fs.buckets.Metadata[bucket] if !ok { bucketMetadata = &BucketMetadata{} bucketMetadata.Name = fi.Name() bucketMetadata.Created = fi.ModTime() } bucketMetadata.ACL = BucketACL(acl) fs.buckets.Metadata[bucket] = bucketMetadata if err := saveBucketsMetadata(fs.buckets); err != nil { return err.Trace(bucket) } return nil }
// deleteObjectPath - delete object path if its empty. func deleteObjectPath(basePath, deletePath, bucket, object string) *probe.Error { if basePath == deletePath { return nil } // Verify if the path exists. pathSt, e := os.Stat(deletePath) if e != nil { if os.IsNotExist(e) { return probe.NewError(ObjectNotFound{Bucket: bucket, Object: object}) } return probe.NewError(e) } if pathSt.IsDir() { // Verify if directory is empty. empty, e := ioutils.IsDirEmpty(deletePath) if e != nil { return probe.NewError(e) } if !empty { return nil } } // Attempt to remove path. if e := os.Remove(deletePath); e != nil { return probe.NewError(e) } // Recursively go down the next path and delete again. if err := deleteObjectPath(basePath, filepath.Dir(deletePath), bucket, object); err != nil { return err.Trace(basePath, deletePath, bucket, object) } return nil }