// CompleteMultipartUpload - complete a multipart upload and persist the data func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { // Check bucket name is valid. if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } // Verify object path is legal. if !IsValidObjectName(object) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } // Verify if valid upload for incoming object. if !fs.isValidUploadID(object, uploadID) { return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, e := os.Stat(bucketPath); e != nil { // Check bucket exists. if os.IsNotExist(e) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return ObjectMetadata{}, probe.NewError(InternalError{}) } objectPath := filepath.Join(bucketPath, object) file, e := atomic.FileCreateWithPrefix(objectPath, "$tmpobject") if e != nil { return ObjectMetadata{}, probe.NewError(e) } md5Hasher := md5.New() objectWriter := io.MultiWriter(file, md5Hasher) partBytes, e := ioutil.ReadAll(data) if e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(e) } if signature != nil { sh := sha256.New() sh.Write(partBytes) ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil))) if err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace() } if !ok { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) } } completeMultipartUpload := &CompleteMultipartUpload{} if e := xml.Unmarshal(partBytes, completeMultipartUpload); e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(MalformedXML{}) } if !sort.IsSorted(completedParts(completeMultipartUpload.Part)) { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(InvalidPartOrder{}) } // Save parts for verification. parts := completeMultipartUpload.Part // Critical region requiring read lock. fs.rwLock.RLock() savedParts := fs.multiparts.ActiveSession[uploadID].Parts fs.rwLock.RUnlock() if !doPartsMatch(parts, savedParts) { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(InvalidPart{}) } // Parts successfully validated, save all the parts. partPathPrefix := objectPath + uploadID if err := saveParts(partPathPrefix, objectWriter, parts); err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace(partPathPrefix) } // Successfully saved, remove all parts. removeParts(partPathPrefix, savedParts) // Critical region requiring write lock. fs.rwLock.Lock() delete(fs.multiparts.ActiveSession, uploadID) if err := saveMultipartsSession(*fs.multiparts); err != nil { fs.rwLock.Unlock() file.CloseAndPurge() return ObjectMetadata{}, err.Trace(partPathPrefix) } file.Close() fs.rwLock.Unlock() // Send stat again to get object metadata. st, e := os.Stat(objectPath) if e != nil { return ObjectMetadata{}, probe.NewError(e) } contentType := "application/octet-stream" if objectExt := filepath.Ext(objectPath); objectExt != "" { content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))] if ok { contentType = content.ContentType } } newObject := ObjectMetadata{ Bucket: bucket, Object: object, Created: st.ModTime(), Size: st.Size(), ContentType: contentType, MD5: hex.EncodeToString(md5Hasher.Sum(nil)), } return newObject, nil }
// CreateObject - create an object. func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { di, e := disk.GetInfo(fs.path) if e != nil { return ObjectMetadata{}, probe.NewError(e) } // Remove 5% from total space for cumulative disk space used for // journalling, inodes etc. availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return ObjectMetadata{}, probe.NewError(RootPathFull{Path: fs.path}) } // Check bucket name valid. if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, e = os.Stat(bucketPath); e != nil { if os.IsNotExist(e) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return ObjectMetadata{}, probe.NewError(e) } // Verify object path legal. if !IsValidObjectName(object) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } // Get object path. objectPath := filepath.Join(bucketPath, object) if strings.TrimSpace(expectedMD5Sum) != "" { var expectedMD5SumBytes []byte expectedMD5SumBytes, e = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if e != nil { // Pro-actively close the connection. return ObjectMetadata{}, probe.NewError(InvalidDigest{MD5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } // Write object. file, e := atomic.FileCreateWithPrefix(objectPath, "$tmpobject") if e != nil { switch e := e.(type) { case *os.PathError: if e.Op == "mkdir" { if strings.Contains(e.Error(), "not a directory") { return ObjectMetadata{}, probe.NewError(ObjectExistsAsPrefix{Bucket: bucket, Prefix: object}) } } return ObjectMetadata{}, probe.NewError(e) default: return ObjectMetadata{}, probe.NewError(e) } } // Instantiate checksum hashers and create a multiwriter. md5Hasher := md5.New() sha256Hasher := sha256.New() objectWriter := io.MultiWriter(file, md5Hasher, sha256Hasher) if size > 0 { if _, e = io.CopyN(objectWriter, data, size); e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(e) } } else { if _, e = io.Copy(objectWriter, data); e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(e) } } md5Sum := hex.EncodeToString(md5Hasher.Sum(nil)) // Verify if the written object is equal to what is expected, only // if it is requested as such. if strings.TrimSpace(expectedMD5Sum) != "" { if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum) { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object}) } } sha256Sum := hex.EncodeToString(sha256Hasher.Sum(nil)) if signature != nil { ok, err := signature.DoesSignatureMatch(sha256Sum) if err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace() } if !ok { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) } } file.Close() // Set stat again to get the latest metadata. st, e := os.Stat(objectPath) if e != nil { return ObjectMetadata{}, probe.NewError(e) } contentType := "application/octet-stream" if objectExt := filepath.Ext(objectPath); objectExt != "" { content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))] if ok { contentType = content.ContentType } } newObject := ObjectMetadata{ Bucket: bucket, Object: object, Created: st.ModTime(), Size: st.Size(), ContentType: contentType, MD5: md5Sum, } return newObject, nil }
// CreateObjectPart - create a part in a multipart session func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string, partID int, size int64, data io.Reader, signature *signV4.Signature) (string, *probe.Error) { di, err := disk.GetInfo(fs.path) if err != nil { return "", probe.NewError(err) } // Remove 5% from total space for cumulative disk space used for // journalling, inodes etc. availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return "", probe.NewError(RootPathFull{Path: fs.path}) } // Part id cannot be negative. if partID <= 0 { return "", probe.NewError(errors.New("invalid part id, cannot be zero or less than zero")) } // Check bucket name valid. if !IsValidBucketName(bucket) { return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) } // Verify object path legal. if !IsValidObjectName(object) { return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } // Verify upload is valid for the incoming object. if !fs.isValidUploadID(object, uploadID) { return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) } if strings.TrimSpace(expectedMD5Sum) != "" { var expectedMD5SumBytes []byte expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // Pro-actively close the connection return "", probe.NewError(InvalidDigest{MD5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, err = os.Stat(bucketPath); err != nil { // Check bucket exists. if os.IsNotExist(err) { return "", probe.NewError(BucketNotFound{Bucket: bucket}) } return "", probe.NewError(err) } objectPath := filepath.Join(bucketPath, object) partPathPrefix := objectPath + uploadID partPath := partPathPrefix + expectedMD5Sum + fmt.Sprintf("$%d-$multiparts", partID) partFile, e := atomic.FileCreateWithPrefix(partPath, "$multiparts") if e != nil { return "", probe.NewError(e) } md5Hasher := md5.New() sha256Hasher := sha256.New() partWriter := io.MultiWriter(partFile, md5Hasher, sha256Hasher) if _, e = io.CopyN(partWriter, data, size); e != nil { partFile.CloseAndPurge() return "", probe.NewError(e) } md5sum := hex.EncodeToString(md5Hasher.Sum(nil)) // Verify if the written object is equal to what is expected, only // if it is requested as such. if strings.TrimSpace(expectedMD5Sum) != "" { if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5sum) { partFile.CloseAndPurge() return "", probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object}) } } if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256Hasher.Sum(nil))) if err != nil { partFile.CloseAndPurge() return "", err.Trace() } if !ok { partFile.CloseAndPurge() return "", probe.NewError(signV4.SigDoesNotMatch{}) } } partFile.Close() fi, e := os.Stat(partPath) if e != nil { return "", probe.NewError(e) } partMetadata := PartMetadata{} partMetadata.ETag = md5sum partMetadata.PartNumber = partID partMetadata.Size = fi.Size() partMetadata.LastModified = fi.ModTime() // Critical region requiring read lock. fs.rwLock.RLock() deserializedMultipartSession, ok := fs.multiparts.ActiveSession[uploadID] fs.rwLock.RUnlock() if !ok { return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) } // Add all incoming parts. deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, partMetadata) // Remove duplicate parts based on the most recent uploaded. deserializedMultipartSession.Parts = removeDuplicateParts(deserializedMultipartSession.Parts) // Save total parts uploaded. deserializedMultipartSession.TotalParts = len(deserializedMultipartSession.Parts) // Sort by part number before saving. sort.Sort(partNumber(deserializedMultipartSession.Parts)) // Critical region requiring write lock. fs.rwLock.Lock() fs.multiparts.ActiveSession[uploadID] = deserializedMultipartSession if err := saveMultipartsSession(*fs.multiparts); err != nil { fs.rwLock.Unlock() return "", err.Trace(partPathPrefix) } fs.rwLock.Unlock() // Return etag. return partMetadata.ETag, nil }