// MakeBucket - create bucket in cache func (xl API) MakeBucket(bucketName, acl string, location io.Reader, signature *signV4.Signature) *probe.Error { xl.lock.Lock() defer xl.lock.Unlock() // do not have to parse location constraint, using this just for signature verification locationSum := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" if location != nil { locationConstraintBytes, err := ioutil.ReadAll(location) if err != nil { return probe.NewError(InternalError{}) } locationConstraintHashBytes := sha256.Sum256(locationConstraintBytes) locationSum = hex.EncodeToString(locationConstraintHashBytes[:]) } if signature != nil { ok, err := signature.DoesSignatureMatch(locationSum) if err != nil { return err.Trace() } if !ok { return probe.NewError(SignDoesNotMatch{}) } } if xl.storedBuckets.Stats().Items == totalBuckets { return probe.NewError(TooManyBuckets{Bucket: bucketName}) } if !IsValidBucket(bucketName) { return probe.NewError(BucketNameInvalid{Bucket: bucketName}) } if !IsValidBucketACL(acl) { return probe.NewError(InvalidACL{ACL: acl}) } if xl.storedBuckets.Exists(bucketName) { return probe.NewError(BucketExists{Bucket: bucketName}) } if strings.TrimSpace(acl) == "" { // default is private acl = "private" } if len(xl.config.NodeDiskMap) > 0 { if err := xl.makeBucket(bucketName, BucketACL(acl)); err != nil { return err.Trace() } } var newBucket = storedBucket{} newBucket.objectMetadata = make(map[string]ObjectMetadata) newBucket.multiPartSession = make(map[string]MultiPartSession) newBucket.partMetadata = make(map[string]map[int]PartMetadata) newBucket.bucketMetadata = BucketMetadata{} newBucket.bucketMetadata.Name = bucketName newBucket.bucketMetadata.Created = time.Now().UTC() newBucket.bucketMetadata.ACL = BucketACL(acl) xl.storedBuckets.Set(bucketName, newBucket) return nil }
func (xl API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *signV4.Signature) (io.Reader, *probe.Error) { if !IsValidBucket(bucket) { return nil, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return nil, probe.NewError(ObjectNameInvalid{Object: key}) } // TODO: multipart support for xl is broken, since we haven't finalized the format in which // it can be stored, disabling this for now until we get the underlying layout stable. // // if len(xl.config.NodeDiskMap) > 0 { // xl.lock.Unlock() // return xl.completeMultipartUpload(bucket, key, uploadID, data, signature) // } if !xl.storedBuckets.Exists(bucket) { return nil, probe.NewError(BucketNotFound{Bucket: bucket}) } storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) // Verify upload id if storedBucket.multiPartSession[key].UploadID != uploadID { return nil, probe.NewError(InvalidUploadID{UploadID: uploadID}) } partBytes, err := ioutil.ReadAll(data) if err != nil { return nil, probe.NewError(err) } if signature != nil { partHashBytes := sha256.Sum256(partBytes) ok, err := signature.DoesSignatureMatch(hex.EncodeToString(partHashBytes[:])) if err != nil { return nil, err.Trace() } if !ok { return nil, probe.NewError(signV4.SigDoesNotMatch{}) } } parts := &CompleteMultipartUpload{} if err := xml.Unmarshal(partBytes, parts); err != nil { return nil, probe.NewError(MalformedXML{}) } if !sort.IsSorted(completedParts(parts.Part)) { return nil, probe.NewError(InvalidPartOrder{}) } fullObjectReader, fullObjectWriter := io.Pipe() go xl.mergeMultipart(parts, uploadID, fullObjectWriter) return fullObjectReader, nil }
// Verify if request has valid AWS Signature Version '4'. func isSignV4ReqAuthenticated(sign *signV4.Signature, r *http.Request) bool { auth := sign.SetHTTPRequestToVerify(r) if isRequestSignatureV4(r) { dummyPayload := sha256.Sum256([]byte("")) ok, err := auth.DoesSignatureMatch(hex.EncodeToString(dummyPayload[:])) if err != nil { errorIf(err.Trace(), "Signature verification failed.", nil) return false } return ok } else if isRequestPresignedSignatureV4(r) { ok, err := auth.DoesPresignedSignatureMatch() if err != nil { errorIf(err.Trace(), "Presigned signature verification failed.", nil) return false } return ok } return false }
// completeMultipartUpload complete an incomplete multipart upload func (xl API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { if bucket == "" || strings.TrimSpace(bucket) == "" { return ObjectMetadata{}, probe.NewError(InvalidArgument{}) } if object == "" || strings.TrimSpace(object) == "" { return ObjectMetadata{}, probe.NewError(InvalidArgument{}) } if err := xl.listXLBuckets(); err != nil { return ObjectMetadata{}, err.Trace() } if _, ok := xl.buckets[bucket]; !ok { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } allBuckets, err := xl.getXLBucketMetadata() if err != nil { return ObjectMetadata{}, err.Trace() } bucketMetadata := allBuckets.Buckets[bucket] if _, ok := bucketMetadata.Multiparts[object]; !ok { return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) } if bucketMetadata.Multiparts[object].UploadID != uploadID { return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) } var partBytes []byte { var err error partBytes, err = ioutil.ReadAll(data) if err != nil { return ObjectMetadata{}, probe.NewError(err) } } if signature != nil { partHashBytes := sha256.Sum256(partBytes) ok, err := signature.DoesSignatureMatch(hex.EncodeToString(partHashBytes[:])) if err != nil { return ObjectMetadata{}, err.Trace() } if !ok { return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) } } parts := &CompleteMultipartUpload{} if err := xml.Unmarshal(partBytes, parts); err != nil { return ObjectMetadata{}, probe.NewError(MalformedXML{}) } if !sort.IsSorted(completedParts(parts.Part)) { return ObjectMetadata{}, probe.NewError(InvalidPartOrder{}) } for _, part := range parts.Part { if strings.Trim(part.ETag, "\"") != bucketMetadata.Multiparts[object].Parts[strconv.Itoa(part.PartNumber)].ETag { return ObjectMetadata{}, probe.NewError(InvalidPart{}) } } var finalETagBytes []byte var finalSize int64 totalParts := strconv.Itoa(bucketMetadata.Multiparts[object].TotalParts) for _, part := range bucketMetadata.Multiparts[object].Parts { partETagBytes, err := hex.DecodeString(part.ETag) if err != nil { return ObjectMetadata{}, probe.NewError(err) } finalETagBytes = append(finalETagBytes, partETagBytes...) finalSize += part.Size } finalETag := hex.EncodeToString(finalETagBytes) objMetadata := ObjectMetadata{} objMetadata.MD5Sum = finalETag + "-" + totalParts objMetadata.Object = object objMetadata.Bucket = bucket objMetadata.Size = finalSize objMetadata.Created = bucketMetadata.Multiparts[object].Parts[totalParts].LastModified return objMetadata, nil }
// WriteObject - write a new object into bucket func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *signv4.Signature) (ObjectMetadata, *probe.Error) { b.lock.Lock() defer b.lock.Unlock() if objectName == "" || objectData == nil { return ObjectMetadata{}, probe.NewError(InvalidArgument{}) } writers, err := b.getObjectWriters(normalizeObjectName(objectName), "data") if err != nil { return ObjectMetadata{}, err.Trace() } sumMD5 := md5.New() sum512 := sha512.New() var sum256 hash.Hash var mwriter io.Writer if signature != nil { sum256 = sha256.New() mwriter = io.MultiWriter(sumMD5, sum256, sum512) } else { mwriter = io.MultiWriter(sumMD5, sum512) } objMetadata := ObjectMetadata{} objMetadata.Version = objectMetadataVersion objMetadata.Created = time.Now().UTC() // if total writers are only '1' do not compute erasure switch len(writers) == 1 { case true: mw := io.MultiWriter(writers[0], mwriter) totalLength, err := io.Copy(mw, objectData) if err != nil { CleanupWritersOnError(writers) return ObjectMetadata{}, probe.NewError(err) } objMetadata.Size = totalLength case false: // calculate data and parity dictated by total number of writers k, m, err := b.getDataAndParity(len(writers)) if err != nil { CleanupWritersOnError(writers) return ObjectMetadata{}, err.Trace() } // write encoded data with k, m and writers chunkCount, totalLength, err := b.writeObjectData(k, m, writers, objectData, size, mwriter) if err != nil { CleanupWritersOnError(writers) return ObjectMetadata{}, err.Trace() } /// donutMetadata section objMetadata.BlockSize = blockSize objMetadata.ChunkCount = chunkCount objMetadata.DataDisks = k objMetadata.ParityDisks = m objMetadata.Size = int64(totalLength) } objMetadata.Bucket = b.getBucketName() objMetadata.Object = objectName dataMD5sum := sumMD5.Sum(nil) dataSHA512sum := sum512.Sum(nil) if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sum256.Sum(nil))) if err != nil { // error occurred while doing signature calculation, we return and also cleanup any temporary writers. CleanupWritersOnError(writers) return ObjectMetadata{}, err.Trace() } if !ok { // purge all writers, when control flow reaches here // // Signature mismatch occurred all temp files to be removed and all data purged. CleanupWritersOnError(writers) return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{}) } } objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum) objMetadata.SHA512Sum = hex.EncodeToString(dataSHA512sum) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objMetadata.MD5Sum); err != nil { return ObjectMetadata{}, err.Trace() } } objMetadata.Metadata = metadata // write object specific metadata if err := b.writeObjectMetadata(normalizeObjectName(objectName), objMetadata); err != nil { // purge all writers, when control flow reaches here CleanupWritersOnError(writers) return ObjectMetadata{}, err.Trace() } // close all writers, when control flow reaches here for _, writer := range writers { writer.Close() } return objMetadata, nil }
// createObject - PUT object to cache buffer func (donut API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) { if len(donut.config.NodeDiskMap) == 0 { if size > int64(donut.config.MaxSize) { generic := GenericObjectError{Bucket: bucket, Object: key} return ObjectMetadata{}, probe.NewError(EntityTooLarge{ GenericObjectError: generic, Size: strconv.FormatInt(size, 10), MaxSize: strconv.FormatUint(donut.config.MaxSize, 10), }) } } if !IsValidBucket(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key}) } if !donut.storedBuckets.Exists(bucket) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) // get object key objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { return ObjectMetadata{}, probe.NewError(ObjectExists{Object: key}) } if contentType == "" { contentType = "application/octet-stream" } contentType = strings.TrimSpace(contentType) if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return ObjectMetadata{}, probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } if len(donut.config.NodeDiskMap) > 0 { objMetadata, err := donut.putObject( bucket, key, expectedMD5Sum, data, size, map[string]string{ "contentType": contentType, "contentLength": strconv.FormatInt(size, 10), }, signature, ) if err != nil { return ObjectMetadata{}, err.Trace() } storedBucket.objectMetadata[objectKey] = objMetadata donut.storedBuckets.Set(bucket, storedBucket) return objMetadata, nil } // calculate md5 hash := md5.New() sha256hash := sha256.New() var err error var totalLength int64 for err == nil { var length int byteBuffer := make([]byte, 1024*1024) length, err = data.Read(byteBuffer) if length != 0 { hash.Write(byteBuffer[0:length]) sha256hash.Write(byteBuffer[0:length]) ok := donut.objects.Append(objectKey, byteBuffer[0:length]) if !ok { return ObjectMetadata{}, probe.NewError(InternalError{}) } totalLength += int64(length) go debug.FreeOSMemory() } } if size != 0 { if totalLength != size { // Delete perhaps the object is already saved, due to the nature of append() donut.objects.Delete(objectKey) return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key}) } } if err != io.EOF { return ObjectMetadata{}, probe.NewError(err) } md5SumBytes := hash.Sum(nil) md5Sum := hex.EncodeToString(md5SumBytes) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { // Delete perhaps the object is already saved, due to the nature of append() donut.objects.Delete(objectKey) return ObjectMetadata{}, probe.NewError(BadDigest{}) } } if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil))) if err != nil { // Delete perhaps the object is already saved, due to the nature of append() donut.objects.Delete(objectKey) return ObjectMetadata{}, err.Trace() } if !ok { // Delete perhaps the object is already saved, due to the nature of append() donut.objects.Delete(objectKey) return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{}) } } m := make(map[string]string) m["contentType"] = contentType newObject := ObjectMetadata{ Bucket: bucket, Object: key, Metadata: m, Created: time.Now().UTC(), MD5Sum: md5Sum, Size: int64(totalLength), } storedBucket.objectMetadata[objectKey] = newObject donut.storedBuckets.Set(bucket, storedBucket) return newObject, nil }
// CreateObject - create an object. func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { di, e := disk.GetInfo(fs.path) if e != nil { return ObjectMetadata{}, probe.NewError(e) } // Remove 5% from total space for cumulative disk space used for // journalling, inodes etc. availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return ObjectMetadata{}, probe.NewError(RootPathFull{Path: fs.path}) } // Check bucket name valid. if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, e = os.Stat(bucketPath); e != nil { if os.IsNotExist(e) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return ObjectMetadata{}, probe.NewError(e) } // Verify object path legal. if !IsValidObjectName(object) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } // Get object path. objectPath := filepath.Join(bucketPath, object) if strings.TrimSpace(expectedMD5Sum) != "" { var expectedMD5SumBytes []byte expectedMD5SumBytes, e = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if e != nil { // Pro-actively close the connection. return ObjectMetadata{}, probe.NewError(InvalidDigest{MD5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } // Write object. file, e := atomic.FileCreateWithPrefix(objectPath, "$tmpobject") if e != nil { switch e := e.(type) { case *os.PathError: if e.Op == "mkdir" { if strings.Contains(e.Error(), "not a directory") { return ObjectMetadata{}, probe.NewError(ObjectExistsAsPrefix{Bucket: bucket, Prefix: object}) } } return ObjectMetadata{}, probe.NewError(e) default: return ObjectMetadata{}, probe.NewError(e) } } // Instantiate checksum hashers and create a multiwriter. md5Hasher := md5.New() sha256Hasher := sha256.New() objectWriter := io.MultiWriter(file, md5Hasher, sha256Hasher) if size > 0 { if _, e = io.CopyN(objectWriter, data, size); e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(e) } } else { if _, e = io.Copy(objectWriter, data); e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(e) } } md5Sum := hex.EncodeToString(md5Hasher.Sum(nil)) // Verify if the written object is equal to what is expected, only // if it is requested as such. if strings.TrimSpace(expectedMD5Sum) != "" { if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum) { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object}) } } sha256Sum := hex.EncodeToString(sha256Hasher.Sum(nil)) if signature != nil { ok, err := signature.DoesSignatureMatch(sha256Sum) if err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace() } if !ok { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) } } file.Close() // Set stat again to get the latest metadata. st, e := os.Stat(objectPath) if e != nil { return ObjectMetadata{}, probe.NewError(e) } contentType := "application/octet-stream" if objectExt := filepath.Ext(objectPath); objectExt != "" { content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))] if ok { contentType = content.ContentType } } newObject := ObjectMetadata{ Bucket: bucket, Object: object, Created: st.ModTime(), Size: st.Size(), ContentType: contentType, MD5: md5Sum, } return newObject, nil }
// CompleteMultipartUpload - complete a multipart upload and persist the data func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { // Check bucket name is valid. if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) } // Verify object path is legal. if !IsValidObjectName(object) { return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } // Verify if valid upload for incoming object. if !fs.isValidUploadID(object, uploadID) { return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, e := os.Stat(bucketPath); e != nil { // Check bucket exists. if os.IsNotExist(e) { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } return ObjectMetadata{}, probe.NewError(InternalError{}) } objectPath := filepath.Join(bucketPath, object) file, e := atomic.FileCreateWithPrefix(objectPath, "$tmpobject") if e != nil { return ObjectMetadata{}, probe.NewError(e) } md5Hasher := md5.New() objectWriter := io.MultiWriter(file, md5Hasher) partBytes, e := ioutil.ReadAll(data) if e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(e) } if signature != nil { sh := sha256.New() sh.Write(partBytes) ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil))) if err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace() } if !ok { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) } } completeMultipartUpload := &CompleteMultipartUpload{} if e := xml.Unmarshal(partBytes, completeMultipartUpload); e != nil { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(MalformedXML{}) } if !sort.IsSorted(completedParts(completeMultipartUpload.Part)) { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(InvalidPartOrder{}) } // Save parts for verification. parts := completeMultipartUpload.Part // Critical region requiring read lock. fs.rwLock.RLock() savedParts := fs.multiparts.ActiveSession[uploadID].Parts fs.rwLock.RUnlock() if !doPartsMatch(parts, savedParts) { file.CloseAndPurge() return ObjectMetadata{}, probe.NewError(InvalidPart{}) } // Parts successfully validated, save all the parts. partPathPrefix := objectPath + uploadID if err := saveParts(partPathPrefix, objectWriter, parts); err != nil { file.CloseAndPurge() return ObjectMetadata{}, err.Trace(partPathPrefix) } // Successfully saved, remove all parts. removeParts(partPathPrefix, savedParts) // Critical region requiring write lock. fs.rwLock.Lock() delete(fs.multiparts.ActiveSession, uploadID) if err := saveMultipartsSession(*fs.multiparts); err != nil { fs.rwLock.Unlock() file.CloseAndPurge() return ObjectMetadata{}, err.Trace(partPathPrefix) } file.Close() fs.rwLock.Unlock() // Send stat again to get object metadata. st, e := os.Stat(objectPath) if e != nil { return ObjectMetadata{}, probe.NewError(e) } contentType := "application/octet-stream" if objectExt := filepath.Ext(objectPath); objectExt != "" { content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))] if ok { contentType = content.ContentType } } newObject := ObjectMetadata{ Bucket: bucket, Object: object, Created: st.ModTime(), Size: st.Size(), ContentType: contentType, MD5: hex.EncodeToString(md5Hasher.Sum(nil)), } return newObject, nil }
// CreateObjectPart - create a part in a multipart session func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string, partID int, size int64, data io.Reader, signature *signV4.Signature) (string, *probe.Error) { di, err := disk.GetInfo(fs.path) if err != nil { return "", probe.NewError(err) } // Remove 5% from total space for cumulative disk space used for // journalling, inodes etc. availableDiskSpace := (float64(di.Free) / (float64(di.Total) - (0.05 * float64(di.Total)))) * 100 if int64(availableDiskSpace) <= fs.minFreeDisk { return "", probe.NewError(RootPathFull{Path: fs.path}) } // Part id cannot be negative. if partID <= 0 { return "", probe.NewError(errors.New("invalid part id, cannot be zero or less than zero")) } // Check bucket name valid. if !IsValidBucketName(bucket) { return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) } // Verify object path legal. if !IsValidObjectName(object) { return "", probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: object}) } // Verify upload is valid for the incoming object. if !fs.isValidUploadID(object, uploadID) { return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) } if strings.TrimSpace(expectedMD5Sum) != "" { var expectedMD5SumBytes []byte expectedMD5SumBytes, err = base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // Pro-actively close the connection return "", probe.NewError(InvalidDigest{MD5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } bucket = fs.denormalizeBucket(bucket) bucketPath := filepath.Join(fs.path, bucket) if _, err = os.Stat(bucketPath); err != nil { // Check bucket exists. if os.IsNotExist(err) { return "", probe.NewError(BucketNotFound{Bucket: bucket}) } return "", probe.NewError(err) } objectPath := filepath.Join(bucketPath, object) partPathPrefix := objectPath + uploadID partPath := partPathPrefix + expectedMD5Sum + fmt.Sprintf("$%d-$multiparts", partID) partFile, e := atomic.FileCreateWithPrefix(partPath, "$multiparts") if e != nil { return "", probe.NewError(e) } md5Hasher := md5.New() sha256Hasher := sha256.New() partWriter := io.MultiWriter(partFile, md5Hasher, sha256Hasher) if _, e = io.CopyN(partWriter, data, size); e != nil { partFile.CloseAndPurge() return "", probe.NewError(e) } md5sum := hex.EncodeToString(md5Hasher.Sum(nil)) // Verify if the written object is equal to what is expected, only // if it is requested as such. if strings.TrimSpace(expectedMD5Sum) != "" { if !isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5sum) { partFile.CloseAndPurge() return "", probe.NewError(BadDigest{MD5: expectedMD5Sum, Bucket: bucket, Object: object}) } } if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256Hasher.Sum(nil))) if err != nil { partFile.CloseAndPurge() return "", err.Trace() } if !ok { partFile.CloseAndPurge() return "", probe.NewError(signV4.SigDoesNotMatch{}) } } partFile.Close() fi, e := os.Stat(partPath) if e != nil { return "", probe.NewError(e) } partMetadata := PartMetadata{} partMetadata.ETag = md5sum partMetadata.PartNumber = partID partMetadata.Size = fi.Size() partMetadata.LastModified = fi.ModTime() // Critical region requiring read lock. fs.rwLock.RLock() deserializedMultipartSession, ok := fs.multiparts.ActiveSession[uploadID] fs.rwLock.RUnlock() if !ok { return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) } // Add all incoming parts. deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, partMetadata) // Remove duplicate parts based on the most recent uploaded. deserializedMultipartSession.Parts = removeDuplicateParts(deserializedMultipartSession.Parts) // Save total parts uploaded. deserializedMultipartSession.TotalParts = len(deserializedMultipartSession.Parts) // Sort by part number before saving. sort.Sort(partNumber(deserializedMultipartSession.Parts)) // Critical region requiring write lock. fs.rwLock.Lock() fs.multiparts.ActiveSession[uploadID] = deserializedMultipartSession if err := saveMultipartsSession(*fs.multiparts); err != nil { fs.rwLock.Unlock() return "", err.Trace(partPathPrefix) } fs.rwLock.Unlock() // Return etag. return partMetadata.ETag, nil }
func (s signatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var signature *signv4.Signature if isRequestSignatureV4(r) { // For PUT and POST requests with payload, send the call upwards for verification. // Or PUT and POST requests without payload, verify here. if (r.Body == nil && (r.Method == "PUT" || r.Method == "POST")) || (r.Method != "PUT" && r.Method != "POST") { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(r) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, r, AuthorizationHeaderMalformed, r.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } } ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256([]byte("")))) if err != nil { errorIf(err.Trace(), "Unable to verify signature.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } if !ok { writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path) return } } s.handler.ServeHTTP(w, r) return } if isRequestPresignedSignatureV4(r) { var err *probe.Error signature, err = initPresignedSignatureV4(r) if err != nil { switch err.ToGoError() { case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id requested.", nil) writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } } ok, err := signature.DoesPresignedSignatureMatch() if err != nil { errorIf(err.Trace(), "Unable to verify signature.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } if !ok { writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path) return } s.handler.ServeHTTP(w, r) } writeErrorResponse(w, r, AccessDenied, r.URL.Path) }
// PutBucketHandler - PUT Bucket // ---------- // This implementation of the PUT operation creates a new bucket for authenticated request func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket := vars["bucket"] if isRequestRequiresACLCheck(req) { writeErrorResponse(w, req, AccessDenied, req.URL.Path) return } // read from 'x-amz-acl' aclType := getACLType(req) if aclType == unsupportedACLType { writeErrorResponse(w, req, NotImplemented, req.URL.Path) return } var signature *v4.Signature // Init signature V4 verification if isRequestSignatureV4(req) { var err *probe.Error signature, err = initSignatureV4(req) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } // if body of request is non-nil then check for validity of Content-Length if req.Body != nil { /// if Content-Length is unknown/missing, deny the request if req.ContentLength == -1 && !contains(req.TransferEncoding, "chunked") { writeErrorResponse(w, req, MissingContentLength, req.URL.Path) return } if signature != nil { locationBytes, e := ioutil.ReadAll(req.Body) if e != nil { errorIf(probe.NewError(e), "MakeBucket failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } sh := sha256.New() sh.Write(locationBytes) ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil))) if err != nil { errorIf(err.Trace(), "MakeBucket failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } if !ok { writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) return } } } err := api.Filesystem.MakeBucket(bucket, getACLTypeString(aclType)) if err != nil { errorIf(err.Trace(), "MakeBucket failed.", nil) switch err.ToGoError().(type) { case fs.BucketNameInvalid: writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) case fs.BucketExists: writeErrorResponse(w, req, BucketAlreadyExists, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } // Make sure to add Location information here only for bucket w.Header().Set("Location", "/"+bucket) writeSuccessResponse(w, nil) }
// createObject - internal wrapper function called by CreateObjectPart func (xl API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signV4.Signature) (string, *probe.Error) { if !IsValidBucket(bucket) { return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return "", probe.NewError(ObjectNameInvalid{Object: key}) } // TODO: multipart support for xl is broken, since we haven't finalized the format in which // it can be stored, disabling this for now until we get the underlying layout stable. // /* if len(xl.config.NodeDiskMap) > 0 { metadata := make(map[string]string) if contentType == "" { contentType = "application/octet-stream" } contentType = strings.TrimSpace(contentType) metadata["contentType"] = contentType if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } partMetadata, err := xl.putObjectPart(bucket, key, expectedMD5Sum, uploadID, partID, data, size, metadata, signature) if err != nil { return "", err.Trace() } return partMetadata.ETag, nil } */ if !xl.storedBuckets.Exists(bucket) { return "", probe.NewError(BucketNotFound{Bucket: bucket}) } strBucket := xl.storedBuckets.Get(bucket).(storedBucket) // Verify upload id if strBucket.multiPartSession[key].UploadID != uploadID { return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) } // get object key parts := strBucket.partMetadata[key] if _, ok := parts[partID]; ok { return parts[partID].ETag, nil } if contentType == "" { contentType = "application/octet-stream" } contentType = strings.TrimSpace(contentType) if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } // calculate md5 hash := md5.New() sha256hash := sha256.New() var totalLength int64 var err error for err == nil { var length int byteBuffer := make([]byte, 1024*1024) length, err = data.Read(byteBuffer) // do not read error return error here, we will handle this error later if length != 0 { hash.Write(byteBuffer[0:length]) sha256hash.Write(byteBuffer[0:length]) ok := xl.multiPartObjects[uploadID].Append(partID, byteBuffer[0:length]) if !ok { return "", probe.NewError(InternalError{}) } totalLength += int64(length) go debug.FreeOSMemory() } } if totalLength != size { xl.multiPartObjects[uploadID].Delete(partID) return "", probe.NewError(IncompleteBody{Bucket: bucket, Object: key}) } if err != io.EOF { return "", probe.NewError(err) } md5SumBytes := hash.Sum(nil) md5Sum := hex.EncodeToString(md5SumBytes) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { return "", err.Trace() } } if signature != nil { { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil))) if err != nil { return "", err.Trace() } if !ok { return "", probe.NewError(signV4.SigDoesNotMatch{}) } } } newPart := PartMetadata{ PartNumber: partID, LastModified: time.Now().UTC(), ETag: md5Sum, Size: totalLength, } parts[partID] = newPart strBucket.partMetadata[key] = parts multiPartSession := strBucket.multiPartSession[key] multiPartSession.TotalParts++ strBucket.multiPartSession[key] = multiPartSession xl.storedBuckets.Set(bucket, strBucket) return md5Sum, nil }