// sum256Reader calculate sha256 sum for an input read seeker func sum256Reader(reader io.ReadSeeker) ([]byte, error) { h := sha256.New() var err error start, _ := reader.Seek(0, 1) defer reader.Seek(start, 0) for err == nil { length := 0 byteBuffer := make([]byte, 1024*1024) length, err = reader.Read(byteBuffer) byteBuffer = byteBuffer[0:length] h.Write(byteBuffer) } if err != io.EOF { return nil, err } return h.Sum(nil), nil }
// CompleteMultipartUpload - complete a multipart upload and persist the data func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) { fs.lock.Lock() defer fs.lock.Unlock() // check bucket name valid if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } // verify object path legal if !IsValidObjectName(object) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } if !fs.isValidUploadID(object, uploadID) { return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, err := os.Stat(bucketPath); err != nil { // check bucket exists if os.IsNotExist(err) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return ObjectMetadata{}, probe.NewError(InternalError{}) } objectPath := filepath.Join(bucketPath, object) file, err := atomic.FileCreateWithPrefix(objectPath, "") if err != nil { return ObjectMetadata{}, probe.NewError(err) } h := md5.New() mw := io.MultiWriter(file, h) partBytes, err := ioutil.ReadAll(data) if err != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(err) } if signature != nil { sh := sha256.New() sh.Write(partBytes) ok, perr := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil))) if perr != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(err) } if !ok { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{}) } } parts := &CompleteMultipartUpload{} if err := xml.Unmarshal(partBytes, parts); err != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(MalformedXML{}) } if !sort.IsSorted(completedParts(parts.Part)) { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(InvalidPartOrder{}) } if err := fs.concatParts(parts, objectPath, mw); err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace() } delete(fs.multiparts.ActiveSession, object) for _, part := range parts.Part { err = os.Remove(objectPath + fmt.Sprintf("$%d-$multiparts", part.PartNumber)) if err != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(err) } } if err := os.Remove(objectPath + "$multiparts"); err != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(err) } if err := saveMultipartsSession(fs.multiparts); err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace() } file.Close() st, err := os.Stat(objectPath) if err != nil { return ObjectMetadata{}, probe.NewError(err) } newObject := ObjectMetadata{ Bucket: bucket, Object: object, Created: st.ModTime(), Size: st.Size(), ContentType: "application/octet-stream", Md5: hex.EncodeToString(h.Sum(nil)), } return newObject, nil }
// CreateObjectPart - create a part in a multipart session func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string, partID int, size int64, data io.Reader, signature *Signature) (string, *probe.Error) { fs.lock.Lock() defer fs.lock.Unlock() di, err := disk.GetInfo(fs.path) if err != nil { return "", probe.NewError(err) } // Remove 5% from total space for cumulative disk space used for journalling, inodes etc. availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return "", probe.NewError(RootPathFull{Path: fs.path}) } if partID <= 0 { return "", probe.NewError(errors.New("invalid part id, cannot be zero or less than zero")) } // check bucket name valid if !IsValidBucketName(bucket) { return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) } // verify object path legal if !IsValidObjectName(object) { return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } if !fs.isValidUploadID(object, uploadID) { return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) } if strings.TrimSpace(expectedMD5Sum) != "" { var expectedMD5SumBytes []byte expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, err = os.Stat(bucketPath); err != nil { // check bucket exists if os.IsNotExist(err) { return "", probe.NewError(BucketNotFound{Bucket: bucket}) } return "", probe.NewError(err) } objectPath := filepath.Join(bucketPath, object) partPath := objectPath + fmt.Sprintf("$%d-$multiparts", partID) partFile, err := atomic.FileCreateWithPrefix(partPath, "$multiparts") if err != nil { return "", probe.NewError(err) } h := md5.New() sh := sha256.New() mw := io.MultiWriter(partFile, h, sh) _, err = io.CopyN(mw, data, size) if err != nil { partFile.CloseAndPurge() return "", probe.NewError(err) } md5sum := hex.EncodeToString(h.Sum(nil)) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5sum); err != nil { partFile.CloseAndPurge() return "", probe.NewError(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Object: object}) } } if signature != nil { ok, perr := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil))) if perr != nil { partFile.CloseAndPurge() return "", perr.Trace() } if !ok { partFile.CloseAndPurge() return "", probe.NewError(SignatureDoesNotMatch{}) } } partFile.Close() fi, err := os.Stat(partPath) if err != nil { return "", probe.NewError(err) } partMetadata := PartMetadata{} partMetadata.ETag = md5sum partMetadata.PartNumber = partID partMetadata.Size = fi.Size() partMetadata.LastModified = fi.ModTime() multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDWR|os.O_APPEND, 0600) if err != nil { return "", probe.NewError(err) } defer multiPartfile.Close() var deserializedMultipartSession MultipartSession decoder := json.NewDecoder(multiPartfile) err = decoder.Decode(&deserializedMultipartSession) if err != nil { return "", probe.NewError(err) } deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, &partMetadata) deserializedMultipartSession.TotalParts++ fs.multiparts.ActiveSession[object] = &deserializedMultipartSession sort.Sort(partNumber(deserializedMultipartSession.Parts)) encoder := json.NewEncoder(multiPartfile) err = encoder.Encode(&deserializedMultipartSession) if err != nil { return "", probe.NewError(err) } return partMetadata.ETag, nil }
// PutBucketHandler - PUT Bucket // ---------- // This implementation of the PUT operation creates a new bucket for authenticated request func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket := vars["bucket"] if !api.Anonymous { if isRequestRequiresACLCheck(req) { writeErrorResponse(w, req, AccessDenied, req.URL.Path) return } } // read from 'x-amz-acl' aclType := getACLType(req) if aclType == unsupportedACLType { writeErrorResponse(w, req, NotImplemented, req.URL.Path) return } var signature *fs.Signature if !api.Anonymous { // Init signature V4 verification if isRequestSignatureV4(req) { var err *probe.Error signature, err = initSignatureV4(req) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } } // if body of request is non-nil then check for validity of Content-Length if req.Body != nil { /// if Content-Length is unknown/missing, deny the request if req.ContentLength == -1 { writeErrorResponse(w, req, MissingContentLength, req.URL.Path) return } if signature != nil { locationBytes, err := ioutil.ReadAll(req.Body) if err != nil { sh := sha256.New() sh.Write(locationBytes) ok, perr := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil))) if perr != nil { errorIf(perr.Trace(), "MakeBucket failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } if !ok { writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) return } } } } err := api.Filesystem.MakeBucket(bucket, getACLTypeString(aclType)) if err != nil { errorIf(err.Trace(), "MakeBucket failed.", nil) switch err.ToGoError().(type) { case fs.BucketNameInvalid: writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) case fs.BucketExists: writeErrorResponse(w, req, BucketAlreadyExists, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } // Make sure to add Location information here only for bucket w.Header().Set("Location", "/"+bucket) writeSuccessResponse(w, nil) }
// sum256 calculate sha256 sum for an input byte array func sum256(data []byte) []byte { hash := sha256.New() hash.Write(data) return hash.Sum(nil) }
// createObject - internal wrapper function called by CreateObjectPart func (donut API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (string, *probe.Error) { if !IsValidBucket(bucket) { return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return "", probe.NewError(ObjectNameInvalid{Object: key}) } // TODO: multipart support for donut is broken, since we haven't finalized the format in which // it can be stored, disabling this for now until we get the underlying layout stable. // /* if len(donut.config.NodeDiskMap) > 0 { metadata := make(map[string]string) if contentType == "" { contentType = "application/octet-stream" } contentType = strings.TrimSpace(contentType) metadata["contentType"] = contentType if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } partMetadata, err := donut.putObjectPart(bucket, key, expectedMD5Sum, uploadID, partID, data, size, metadata, signature) if err != nil { return "", err.Trace() } return partMetadata.ETag, nil } */ if !donut.storedBuckets.Exists(bucket) { return "", probe.NewError(BucketNotFound{Bucket: bucket}) } strBucket := donut.storedBuckets.Get(bucket).(storedBucket) // Verify upload id if strBucket.multiPartSession[key].UploadID != uploadID { return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) } // get object key parts := strBucket.partMetadata[key] if _, ok := parts[partID]; ok { return parts[partID].ETag, nil } if contentType == "" { contentType = "application/octet-stream" } contentType = strings.TrimSpace(contentType) if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } // calculate md5 hash := md5.New() sha256hash := sha256.New() var totalLength int64 var err error for err == nil { var length int byteBuffer := make([]byte, 1024*1024) length, err = data.Read(byteBuffer) // do not read error return error here, we will handle this error later if length != 0 { hash.Write(byteBuffer[0:length]) sha256hash.Write(byteBuffer[0:length]) ok := donut.multiPartObjects[uploadID].Append(partID, byteBuffer[0:length]) if !ok { return "", probe.NewError(InternalError{}) } totalLength += int64(length) go debug.FreeOSMemory() } } if totalLength != size { donut.multiPartObjects[uploadID].Delete(partID) return "", probe.NewError(IncompleteBody{Bucket: bucket, Object: key}) } if err != io.EOF { return "", probe.NewError(err) } md5SumBytes := hash.Sum(nil) md5Sum := hex.EncodeToString(md5SumBytes) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { return "", err.Trace() } } if signature != nil { { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil))) if err != nil { return "", err.Trace() } if !ok { return "", probe.NewError(signv4.DoesNotMatch{}) } } } newPart := PartMetadata{ PartNumber: partID, LastModified: time.Now().UTC(), ETag: md5Sum, Size: totalLength, } parts[partID] = newPart strBucket.partMetadata[key] = parts multiPartSession := strBucket.multiPartSession[key] multiPartSession.TotalParts++ strBucket.multiPartSession[key] = multiPartSession donut.storedBuckets.Set(bucket, strBucket) return md5Sum, nil }
// CreateObjectPart - create a part in a multipart session func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string, partID int, size int64, data io.Reader, signature *Signature) (string, *probe.Error) { di, err := disk.GetInfo(fs.path) if err != nil { return "", probe.NewError(err) } // Remove 5% from total space for cumulative disk space used for // journalling, inodes etc. availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return "", probe.NewError(RootPathFull{Path: fs.path}) } // Part id cannot be negative. if partID <= 0 { return "", probe.NewError(errors.New("invalid part id, cannot be zero or less than zero")) } // Check bucket name valid. if !IsValidBucketName(bucket) { return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) } // Verify object path legal. if !IsValidObjectName(object) { return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } // Verify upload is valid for the incoming object. if !fs.isValidUploadID(object, uploadID) { return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) } if strings.TrimSpace(expectedMD5Sum) != "" { var expectedMD5SumBytes []byte expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // Pro-actively close the connection return "", probe.NewError(InvalidDigest{MD5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, err = os.Stat(bucketPath); err != nil { // Check bucket exists. if os.IsNotExist(err) { return "", probe.NewError(BucketNotFound{Bucket: bucket}) } return "", probe.NewError(err) } objectPath := filepath.Join(bucketPath, object) partPathPrefix := objectPath + uploadID partPath := partPathPrefix + expectedMD5Sum + fmt.Sprintf("$%d-$multiparts", partID) partFile, e := atomic.FileCreateWithPrefix(partPath, "$multiparts") if e != nil { return "", probe.NewError(e) } md5Hasher := md5.New() sha256Hasher := sha256.New() partWriter := io.MultiWriter(partFile, md5Hasher, sha256Hasher) if _, e = io.CopyN(partWriter, data, size); e != nil { partFile.CloseAndPurge() return "", probe.NewError(e) } md5sum := hex.EncodeToString(md5Hasher.Sum(nil)) // Verify if the written object is equal to what is expected, only // if it is requested as such. if strings.TrimSpace(expectedMD5Sum) != "" { if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5sum) { partFile.CloseAndPurge() return "", probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object}) } } if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256Hasher.Sum(nil))) if err != nil { partFile.CloseAndPurge() return "", err.Trace() } if !ok { partFile.CloseAndPurge() return "", probe.NewError(SignatureDoesNotMatch{}) } } partFile.Close() fi, e := os.Stat(partPath) if e != nil { return "", probe.NewError(e) } partMetadata := PartMetadata{} partMetadata.ETag = md5sum partMetadata.PartNumber = partID partMetadata.Size = fi.Size() partMetadata.LastModified = fi.ModTime() // Critical region requiring read lock. fs.rwLock.RLock() deserializedMultipartSession, ok := fs.multiparts.ActiveSession[uploadID] fs.rwLock.RUnlock() if !ok { return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) } // Append any pre-existing partNumber with new metadata, otherwise // append to the list. if len(deserializedMultipartSession.Parts) < partID { deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, partMetadata) } else { deserializedMultipartSession.Parts[partID-1] = partMetadata } deserializedMultipartSession.TotalParts = len(deserializedMultipartSession.Parts) // Sort by part number before saving. sort.Sort(partNumber(deserializedMultipartSession.Parts)) // Critical region requiring write lock. fs.rwLock.Lock() fs.multiparts.ActiveSession[uploadID] = deserializedMultipartSession if err := saveMultipartsSession(*fs.multiparts); err != nil { fs.rwLock.Unlock() return "", err.Trace(partPathPrefix) } fs.rwLock.Unlock() // Return etag. return partMetadata.ETag, nil }
// CompleteMultipartUpload - complete a multipart upload and persist the data func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) { // Check bucket name is valid. if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } // Verify object path is legal. if !IsValidObjectName(object) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } // Verify if valid upload for incoming object. if !fs.isValidUploadID(object, uploadID) { return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, e := os.Stat(bucketPath); e != nil { // Check bucket exists. if os.IsNotExist(e) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return ObjectMetadata{}, probe.NewError(InternalError{}) } objectPath := filepath.Join(bucketPath, object) file, e := atomic.FileCreateWithPrefix(objectPath, "$tmpobject") if e != nil { return ObjectMetadata{}, probe.NewError(e) } md5Hasher := md5.New() objectWriter := io.MultiWriter(file, md5Hasher) partBytes, e := ioutil.ReadAll(data) if e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(e) } if signature != nil { sh := sha256.New() sh.Write(partBytes) ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil))) if err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace() } if !ok { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{}) } } completeMultipartUpload := &CompleteMultipartUpload{} if e := xml.Unmarshal(partBytes, completeMultipartUpload); e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(MalformedXML{}) } if !sort.IsSorted(completedParts(completeMultipartUpload.Part)) { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(InvalidPartOrder{}) } // Save parts for verification. parts := completeMultipartUpload.Part // Critical region requiring read lock. fs.rwLock.RLock() savedParts := fs.multiparts.ActiveSession[uploadID].Parts fs.rwLock.RUnlock() if !doPartsMatch(parts, savedParts) { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(InvalidPart{}) } // Parts successfully validated, save all the parts. partPathPrefix := objectPath + uploadID if err := saveParts(partPathPrefix, objectWriter, parts); err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace(partPathPrefix) } // Successfully saved, remove all parts. removeParts(partPathPrefix, savedParts) // Critical region requiring write lock. fs.rwLock.Lock() delete(fs.multiparts.ActiveSession, uploadID) if err := saveMultipartsSession(*fs.multiparts); err != nil { fs.rwLock.Unlock() file.CloseAndPurge() return ObjectMetadata{}, err.Trace(partPathPrefix) } file.Close() fs.rwLock.Unlock() // Send stat again to get object metadata. st, e := os.Stat(objectPath) if e != nil { return ObjectMetadata{}, probe.NewError(e) } contentType := "application/octet-stream" if objectExt := filepath.Ext(objectPath); objectExt != "" { content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))] if ok { contentType = content.ContentType } } newObject := ObjectMetadata{ Bucket: bucket, Object: object, Created: st.ModTime(), Size: st.Size(), ContentType: contentType, MD5: hex.EncodeToString(md5Hasher.Sum(nil)), } return newObject, nil }
// CreateObject - PUT object func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) { fs.lock.Lock() defer fs.lock.Unlock() stfs, err := disk.Stat(fs.path) if err != nil { return ObjectMetadata{}, probe.NewError(err) } // Remove 5% from total space for cumulative disk space used for journalling, inodes etc. availableDiskSpace := (float64(stfs.Free) / (float64(stfs.Total) - (0.05 * float64(stfs.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return ObjectMetadata{}, probe.NewError(RootPathFull{Path: fs.path}) } // check bucket name valid if !IsValidBucket(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } // check bucket exists if _, err = os.Stat(filepath.Join(fs.path, bucket)); os.IsNotExist(err) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } // verify object path legal if !IsValidObjectName(object) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } // get object path objectPath := filepath.Join(fs.path, bucket, object) if strings.TrimSpace(expectedMD5Sum) != "" { var expectedMD5SumBytes []byte expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return ObjectMetadata{}, probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } // write object file, err := atomic.FileCreate(objectPath) if err != nil { return ObjectMetadata{}, probe.NewError(err) } h := md5.New() sh := sha256.New() mw := io.MultiWriter(file, h, sh) if size > 0 { _, err = io.CopyN(mw, data, size) if err != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(err) } } else { _, err = io.Copy(mw, data) if err != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(err) } } md5Sum := hex.EncodeToString(h.Sum(nil)) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Object: object}) } } sha256Sum := hex.EncodeToString(sh.Sum(nil)) if signature != nil { ok, perr := signature.DoesSignatureMatch(sha256Sum) if perr != nil { file.CloseAndPurge() return ObjectMetadata{}, perr.Trace() } if !ok { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{}) } } file.File.Sync() file.Close() st, err := os.Stat(objectPath) if err != nil { return ObjectMetadata{}, probe.NewError(err) } newObject := ObjectMetadata{ Bucket: bucket, Object: object, Created: st.ModTime(), Size: st.Size(), ContentType: "application/octet-stream", Md5: md5Sum, } return newObject, nil }
// readObjectData - func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) { readers, err := b.getObjectReaders(objectName, "data") if err != nil { writer.CloseWithError(probe.WrapError(err)) return } for _, reader := range readers { defer reader.Close() } var expected512Sum, expectedMd5sum []byte { var err error expectedMd5sum, err = hex.DecodeString(objMetadata.MD5Sum) if err != nil { writer.CloseWithError(probe.WrapError(probe.NewError(err))) return } expected512Sum, err = hex.DecodeString(objMetadata.SHA512Sum) if err != nil { writer.CloseWithError(probe.WrapError(probe.NewError(err))) return } } hasher := md5.New() sum512hasher := sha256.New() mwriter := io.MultiWriter(writer, hasher, sum512hasher) switch len(readers) > 1 { case true: encoder, err := newEncoder(objMetadata.DataDisks, objMetadata.ParityDisks) if err != nil { writer.CloseWithError(probe.WrapError(err)) return } totalLeft := objMetadata.Size for i := 0; i < objMetadata.ChunkCount; i++ { decodedData, err := b.decodeEncodedData(totalLeft, int64(objMetadata.BlockSize), readers, encoder, writer) if err != nil { writer.CloseWithError(probe.WrapError(err)) return } if _, err := io.Copy(mwriter, bytes.NewReader(decodedData)); err != nil { writer.CloseWithError(probe.WrapError(probe.NewError(err))) return } totalLeft = totalLeft - int64(objMetadata.BlockSize) } case false: _, err := io.Copy(writer, readers[0]) if err != nil { writer.CloseWithError(probe.WrapError(probe.NewError(err))) return } } // check if decodedData md5sum matches if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) { writer.CloseWithError(probe.WrapError(probe.NewError(ChecksumMismatch{}))) return } if !bytes.Equal(expected512Sum, sum512hasher.Sum(nil)) { writer.CloseWithError(probe.WrapError(probe.NewError(ChecksumMismatch{}))) return } writer.Close() return }
// WriteObject - write a new object into bucket func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) { b.lock.Lock() defer b.lock.Unlock() if objectName == "" || objectData == nil { return ObjectMetadata{}, probe.NewError(InvalidArgument{}) } writers, err := b.getObjectWriters(normalizeObjectName(objectName), "data") if err != nil { return ObjectMetadata{}, err.Trace() } sumMD5 := md5.New() sum512 := sha512.New() var sum256 hash.Hash var mwriter io.Writer if signature != nil { sum256 = sha256.New() mwriter = io.MultiWriter(sumMD5, sum256, sum512) } else { mwriter = io.MultiWriter(sumMD5, sum512) } objMetadata := ObjectMetadata{} objMetadata.Version = objectMetadataVersion objMetadata.Created = time.Now().UTC() // if total writers are only '1' do not compute erasure switch len(writers) == 1 { case true: mw := io.MultiWriter(writers[0], mwriter) totalLength, err := io.Copy(mw, objectData) if err != nil { CleanupWritersOnError(writers) return ObjectMetadata{}, probe.NewError(err) } objMetadata.Size = totalLength case false: // calculate data and parity dictated by total number of writers k, m, err := b.getDataAndParity(len(writers)) if err != nil { CleanupWritersOnError(writers) return ObjectMetadata{}, err.Trace() } // write encoded data with k, m and writers chunkCount, totalLength, err := b.writeObjectData(k, m, writers, objectData, size, mwriter) if err != nil { CleanupWritersOnError(writers) return ObjectMetadata{}, err.Trace() } /// donutMetadata section objMetadata.BlockSize = blockSize objMetadata.ChunkCount = chunkCount objMetadata.DataDisks = k objMetadata.ParityDisks = m objMetadata.Size = int64(totalLength) } objMetadata.Bucket = b.getBucketName() objMetadata.Object = objectName dataMD5sum := sumMD5.Sum(nil) dataSHA512sum := sum512.Sum(nil) if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sum256.Sum(nil))) if err != nil { // error occurred while doing signature calculation, we return and also cleanup any temporary writers. CleanupWritersOnError(writers) return ObjectMetadata{}, err.Trace() } if !ok { // purge all writers, when control flow reaches here // // Signature mismatch occurred all temp files to be removed and all data purged. CleanupWritersOnError(writers) return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{}) } } objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum) objMetadata.SHA512Sum = hex.EncodeToString(dataSHA512sum) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objMetadata.MD5Sum); err != nil { return ObjectMetadata{}, err.Trace() } } objMetadata.Metadata = metadata // write object specific metadata if err := b.writeObjectMetadata(normalizeObjectName(objectName), objMetadata); err != nil { // purge all writers, when control flow reaches here CleanupWritersOnError(writers) return ObjectMetadata{}, err.Trace() } // close all writers, when control flow reaches here for _, writer := range writers { writer.Close() } return objMetadata, nil }
// createObject - PUT object to cache buffer func (xl API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) { if len(xl.config.NodeDiskMap) == 0 { if size > int64(xl.config.MaxSize) { generic := GenericObjectError{Bucket: bucket, Object: key} return ObjectMetadata{}, probe.NewError(EntityTooLarge{ GenericObjectError: generic, Size: strconv.FormatInt(size, 10), MaxSize: strconv.FormatUint(xl.config.MaxSize, 10), }) } } if !IsValidBucket(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key}) } if !xl.storedBuckets.Exists(bucket) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) // get object key objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { return ObjectMetadata{}, probe.NewError(ObjectExists{Object: key}) } if contentType == "" { contentType = "application/octet-stream" } contentType = strings.TrimSpace(contentType) if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return ObjectMetadata{}, probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } if len(xl.config.NodeDiskMap) > 0 { objMetadata, err := xl.putObject( bucket, key, expectedMD5Sum, data, size, map[string]string{ "contentType": contentType, "contentLength": strconv.FormatInt(size, 10), }, signature, ) if err != nil { return ObjectMetadata{}, err.Trace() } storedBucket.objectMetadata[objectKey] = objMetadata xl.storedBuckets.Set(bucket, storedBucket) return objMetadata, nil } // calculate md5 hash := md5.New() sha256hash := sha256.New() var err error var totalLength int64 for err == nil { var length int byteBuffer := make([]byte, 1024*1024) length, err = data.Read(byteBuffer) if length != 0 { hash.Write(byteBuffer[0:length]) sha256hash.Write(byteBuffer[0:length]) ok := xl.objects.Append(objectKey, byteBuffer[0:length]) if !ok { return ObjectMetadata{}, probe.NewError(InternalError{}) } totalLength += int64(length) go debug.FreeOSMemory() } } if size != 0 { if totalLength != size { // Delete perhaps the object is already saved, due to the nature of append() xl.objects.Delete(objectKey) return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key}) } } if err != io.EOF { return ObjectMetadata{}, probe.NewError(err) } md5SumBytes := hash.Sum(nil) md5Sum := hex.EncodeToString(md5SumBytes) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { // Delete perhaps the object is already saved, due to the nature of append() xl.objects.Delete(objectKey) return ObjectMetadata{}, probe.NewError(BadDigest{}) } } if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil))) if err != nil { // Delete perhaps the object is already saved, due to the nature of append() xl.objects.Delete(objectKey) return ObjectMetadata{}, err.Trace() } if !ok { // Delete perhaps the object is already saved, due to the nature of append() xl.objects.Delete(objectKey) return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{}) } } m := make(map[string]string) m["contentType"] = contentType newObject := ObjectMetadata{ Bucket: bucket, Object: key, Metadata: m, Created: time.Now().UTC(), MD5Sum: md5Sum, Size: int64(totalLength), } storedBucket.objectMetadata[objectKey] = newObject xl.storedBuckets.Set(bucket, storedBucket) return newObject, nil }
// CreateObject - create an object. func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) { di, e := disk.GetInfo(fs.path) if e != nil { return ObjectMetadata{}, probe.NewError(e) } // Remove 5% from total space for cumulative disk space used for // journalling, inodes etc. availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return ObjectMetadata{}, probe.NewError(RootPathFull{Path: fs.path}) } // Check bucket name valid. if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, e = os.Stat(bucketPath); e != nil { if os.IsNotExist(e) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return ObjectMetadata{}, probe.NewError(e) } // Verify object path legal. if !IsValidObjectName(object) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } // Get object path. objectPath := filepath.Join(bucketPath, object) if strings.TrimSpace(expectedMD5Sum) != "" { var expectedMD5SumBytes []byte expectedMD5SumBytes, e = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if e != nil { // Pro-actively close the connection. return ObjectMetadata{}, probe.NewError(InvalidDigest{MD5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } // Write object. file, e := atomic.FileCreateWithPrefix(objectPath, "$tmpobject") if e != nil { switch e := e.(type) { case *os.PathError: if e.Op == "mkdir" { if strings.Contains(e.Error(), "not a directory") { return ObjectMetadata{}, probe.NewError(ObjectExistsAsPrefix{Bucket: bucket, Prefix: object}) } } return ObjectMetadata{}, probe.NewError(e) default: return ObjectMetadata{}, probe.NewError(e) } } // Instantiate checksum hashers and create a multiwriter. md5Hasher := md5.New() sha256Hasher := sha256.New() objectWriter := io.MultiWriter(file, md5Hasher, sha256Hasher) if size > 0 { if _, e = io.CopyN(objectWriter, data, size); e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(e) } } else { if _, e = io.Copy(objectWriter, data); e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(e) } } md5Sum := hex.EncodeToString(md5Hasher.Sum(nil)) // Verify if the written object is equal to what is expected, only // if it is requested as such. if strings.TrimSpace(expectedMD5Sum) != "" { if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum) { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object}) } } sha256Sum := hex.EncodeToString(sha256Hasher.Sum(nil)) if signature != nil { ok, err := signature.DoesSignatureMatch(sha256Sum) if err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace() } if !ok { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{}) } } file.Close() // Set stat again to get the latest metadata. st, e := os.Stat(objectPath) if e != nil { return ObjectMetadata{}, probe.NewError(e) } e = fs.checksyncstatus(objectPath) if e != nil { fmt.Println(e) return ObjectMetadata{}, probe.NewError(e) } contentType := "application/octet-stream" if objectExt := filepath.Ext(objectPath); objectExt != "" { content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))] if ok { contentType = content.ContentType } } newObject := ObjectMetadata{ Bucket: bucket, Object: object, Created: st.ModTime(), Size: st.Size(), ContentType: contentType, MD5: md5Sum, } return newObject, nil }