// CreateObject creates a new object func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedMD5Sum string, size int64, reader io.Reader) (string, error) { errParams := map[string]string{ "bucketName": bucketName, "objectName": objectName, "contentType": contentType, } if d.donut == nil { return "", iodine.New(drivers.InternalError{}, errParams) } if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") { return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) } if !drivers.IsValidObjectName(objectName) || strings.TrimSpace(objectName) == "" { return "", iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil) } if strings.TrimSpace(contentType) == "" { contentType = "application/octet-stream" } metadata := make(map[string]string) metadata["contentType"] = strings.TrimSpace(contentType) metadata["contentLength"] = strconv.FormatInt(size, 10) if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { return "", iodine.New(err, nil) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } calculatedMD5Sum, err := d.donut.PutObject(bucketName, objectName, expectedMD5Sum, ioutil.NopCloser(reader), metadata) if err != nil { return "", iodine.New(err, errParams) } return calculatedMD5Sum, nil }
// GetObjectMetadata retrieves an object's metadata func (d donutDriver) GetObjectMetadata(bucketName, objectName string) (drivers.ObjectMetadata, error) { d.lock.RLock() defer d.lock.RUnlock() errParams := map[string]string{ "bucketName": bucketName, "objectName": objectName, } if d.donut == nil { return drivers.ObjectMetadata{}, iodine.New(drivers.InternalError{}, errParams) } if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") { return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, errParams) } if !drivers.IsValidObjectName(objectName) || strings.TrimSpace(objectName) == "" { return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, errParams) } metadata, err := d.donut.GetObjectMetadata(bucketName, objectName) if err != nil { return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{ Bucket: bucketName, Object: objectName, }, errParams) } objectMetadata := drivers.ObjectMetadata{ Bucket: bucketName, Key: objectName, ContentType: metadata.Metadata["contentType"], Created: metadata.Created, Md5: metadata.MD5Sum, Size: metadata.Size, } return objectMetadata, nil }
// GetPartialObject - GET object from cache buffer range func (cache *cacheDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { errParams := map[string]string{ "bucket": bucket, "object": object, "start": strconv.FormatInt(start, 10), "length": strconv.FormatInt(length, 10), } cache.lock.RLock() if !drivers.IsValidBucket(bucket) { cache.lock.RUnlock() return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, errParams) } if !drivers.IsValidObjectName(object) { cache.lock.RUnlock() return 0, iodine.New(drivers.ObjectNameInvalid{Object: object}, errParams) } if start < 0 { return 0, iodine.New(drivers.InvalidRange{ Start: start, Length: length, }, errParams) } objectKey := bucket + "/" + object data, ok := cache.objects.Get(objectKey) if !ok { cache.lock.RUnlock() return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, errParams) } written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length) cache.lock.RUnlock() return written, iodine.New(err, nil) }
// GetObjectMetadata - HEAD object func (fs *fsDriver) GetObjectMetadata(bucket, object string) (drivers.ObjectMetadata, error) { if drivers.IsValidBucket(bucket) == false { return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } if drivers.IsValidObjectName(object) == false { return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: bucket}, nil) } // Do not use filepath.Join() since filepath.Join strips off any object names with '/', use them as is // in a static manner so that we can send a proper 'ObjectNotFound' reply back upon os.Stat() objectPath := fs.root + "/" + bucket + "/" + object stat, err := os.Stat(objectPath) if os.IsNotExist(err) { return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) } _, err = os.Stat(objectPath + "$metadata") if os.IsNotExist(err) { return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) } file, err := os.Open(objectPath + "$metadata") defer file.Close() if err != nil { return drivers.ObjectMetadata{}, iodine.New(err, nil) } var deserializedMetadata Metadata decoder := json.NewDecoder(file) err = decoder.Decode(&deserializedMetadata) if err != nil { return drivers.ObjectMetadata{}, iodine.New(err, nil) } contentType := "application/octet-stream" if deserializedMetadata.ContentType != "" { contentType = deserializedMetadata.ContentType } contentType = strings.TrimSpace(contentType) etag := bucket + "#" + filepath.Base(object) if len(deserializedMetadata.Md5sum) != 0 { etag = hex.EncodeToString(deserializedMetadata.Md5sum) } metadata := drivers.ObjectMetadata{ Bucket: bucket, Key: object, Created: stat.ModTime(), Size: stat.Size(), Md5: etag, ContentType: contentType, } return metadata, nil }
// ListObjects - GET bucket (list objects) func (fs *fsDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { p := bucketDir{} p.files = make(map[string]os.FileInfo) if drivers.IsValidBucket(bucket) == false { return []drivers.ObjectMetadata{}, resources, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } if resources.Prefix != "" && drivers.IsValidObjectName(resources.Prefix) == false { return []drivers.ObjectMetadata{}, resources, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix}, nil) } rootPrefix := filepath.Join(fs.root, bucket) // check bucket exists if _, err := os.Stat(rootPrefix); os.IsNotExist(err) { return []drivers.ObjectMetadata{}, resources, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } p.root = rootPrefix err := filepath.Walk(rootPrefix, p.getAllFiles) if err != nil { return []drivers.ObjectMetadata{}, resources, iodine.New(err, nil) } var metadataList []drivers.ObjectMetadata var metadata drivers.ObjectMetadata // Populate filtering mode resources.Mode = drivers.GetMode(resources) var fileNames []string for name := range p.files { fileNames = append(fileNames, name) } sort.Strings(fileNames) for _, name := range fileNames { if len(metadataList) >= resources.Maxkeys { resources.IsTruncated = true if resources.IsTruncated && resources.IsDelimiterSet() { resources.NextMarker = metadataList[len(metadataList)-1].Key } break } if name > resources.Marker { metadata, resources, err = fs.filterObjects(bucket, name, p.files[name], resources) if err != nil { return []drivers.ObjectMetadata{}, resources, iodine.New(err, nil) } if metadata.Bucket != "" { metadataList = append(metadataList, metadata) } } } sort.Sort(byObjectKey(metadataList)) return metadataList, resources, nil }
func (fs *fsDriver) AbortMultipartUpload(bucket, key, uploadID string) error { fs.lock.Lock() defer fs.lock.Unlock() // check bucket name valid if drivers.IsValidBucket(bucket) == false { return iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } // verify object path legal if drivers.IsValidObjectName(key) == false { return iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil) } if !fs.isValidUploadID(key, uploadID) { return iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil) } bucketPath := filepath.Join(fs.root, bucket) _, err := os.Stat(bucketPath) // check bucket exists if os.IsNotExist(err) { return iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } if err != nil { return iodine.New(drivers.InternalError{}, nil) } objectPath := filepath.Join(bucketPath, key) multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDWR, 0600) if err != nil { return iodine.New(err, nil) } var deserializedMultipartSession MultipartSession decoder := json.NewDecoder(multiPartfile) err = decoder.Decode(&deserializedMultipartSession) if err != nil { return iodine.New(err, nil) } multiPartfile.Close() // close it right here, since we will delete it subsequently delete(fs.multiparts.ActiveSession, key) for _, part := range deserializedMultipartSession.Parts { err = os.RemoveAll(objectPath + fmt.Sprintf("$%d", part.PartNumber)) if err != nil { return iodine.New(err, nil) } } err = os.RemoveAll(objectPath + "$multiparts") if err != nil { return iodine.New(err, nil) } return nil }
// GetObject retrieves an object and writes it to a writer func (d donutDriver) GetObject(w io.Writer, bucketName, objectName string) (int64, error) { if d.donut == nil { return 0, iodine.New(drivers.InternalError{}, nil) } if !drivers.IsValidBucket(bucketName) { return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) } if !drivers.IsValidObjectName(objectName) { return 0, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil) } if _, ok := d.storedBuckets[bucketName]; ok == false { return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil) } d.lock.RLock() defer d.lock.RUnlock() objectKey := bucketName + "/" + objectName data, ok := d.objects.Get(objectKey) if !ok { reader, size, err := d.donut.GetObject(bucketName, objectName) if err != nil { switch iodine.ToError(err).(type) { case donut.BucketNotFound: return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil) case donut.ObjectNotFound: return 0, iodine.New(drivers.ObjectNotFound{ Bucket: bucketName, Object: objectName, }, nil) default: return 0, iodine.New(drivers.InternalError{}, nil) } } pw := newProxyWriter(w) n, err := io.CopyN(pw, reader, size) if err != nil { return 0, iodine.New(err, nil) } // Save in memory for future reads d.objects.Set(objectKey, pw.writtenBytes) // free up pw.writtenBytes = nil go debug.FreeOSMemory() return n, nil } written, err := io.Copy(w, bytes.NewBuffer(data)) if err != nil { return 0, iodine.New(err, nil) } return written, nil }
// GetPartialObject retrieves an object range and writes it to a writer func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string, start, length int64) (int64, error) { d.lock.RLock() defer d.lock.RUnlock() if d.donut == nil { return 0, iodine.New(drivers.InternalError{}, nil) } errParams := map[string]string{ "bucketName": bucketName, "objectName": objectName, "start": strconv.FormatInt(start, 10), "length": strconv.FormatInt(length, 10), } if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") { return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, errParams) } if !drivers.IsValidObjectName(objectName) || strings.TrimSpace(objectName) == "" { return 0, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, errParams) } if start < 0 { return 0, iodine.New(drivers.InvalidRange{ Start: start, Length: length, }, errParams) } reader, size, err := d.donut.GetObject(bucketName, objectName) if err != nil { return 0, iodine.New(drivers.ObjectNotFound{ Bucket: bucketName, Object: objectName, }, nil) } defer reader.Close() if start > size || (start+length-1) > size { return 0, iodine.New(drivers.InvalidRange{ Start: start, Length: length, }, errParams) } _, err = io.CopyN(ioutil.Discard, reader, start) if err != nil { return 0, iodine.New(err, errParams) } n, err := io.CopyN(w, reader, length) if err != nil { return 0, iodine.New(err, errParams) } return n, nil }
// ListObjects - returns list of objects func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { errParams := map[string]string{ "bucketName": bucketName, } if d.donut == nil { return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.InternalError{}, errParams) } if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") { return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) } if !drivers.IsValidObjectName(resources.Prefix) { return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil) } actualObjects, commonPrefixes, isTruncated, err := d.donut.ListObjects(bucketName, resources.Prefix, resources.Marker, resources.Delimiter, resources.Maxkeys) if err != nil { return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams) } resources.CommonPrefixes = commonPrefixes resources.IsTruncated = isTruncated if resources.IsTruncated && resources.IsDelimiterSet() { resources.NextMarker = actualObjects[len(actualObjects)-1] } var results []drivers.ObjectMetadata for _, objectName := range actualObjects { objectMetadata, err := d.donut.GetObjectMetadata(bucketName, objectName) if err != nil { return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams) } t, err := time.Parse(time.RFC3339Nano, objectMetadata["created"]) if err != nil { return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, nil) } size, err := strconv.ParseInt(objectMetadata["size"], 10, 64) if err != nil { return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, nil) } metadata := drivers.ObjectMetadata{ Key: objectName, Created: t, Size: size, } results = append(results, metadata) } sort.Sort(byObjectKey(results)) return results, resources, nil }
// ListObjects - list objects from cache func (cache *cacheDriver) ListObjects(bucket string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { cache.lock.RLock() defer cache.lock.RUnlock() if !drivers.IsValidBucket(bucket) { return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } if !drivers.IsValidObjectName(resources.Prefix) { return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { return nil, drivers.BucketResourcesMetadata{IsTruncated: false}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } var results []drivers.ObjectMetadata var keys []string storedBucket := cache.storedBuckets[bucket] for key := range storedBucket.objectMetadata { if strings.HasPrefix(key, bucket+"/") { key = key[len(bucket)+1:] keys, resources = cache.listObjects(keys, key, resources) } } var newKeys []string switch { case resources.Marker != "": for _, key := range keys { if key > resources.Marker { newKeys = appendUniq(newKeys, key) } } default: newKeys = keys } sort.Strings(newKeys) for _, key := range newKeys { if len(results) == resources.Maxkeys { resources.IsTruncated = true if resources.IsTruncated && resources.IsDelimiterSet() { resources.NextMarker = results[len(results)-1].Key } return results, resources, nil } object := storedBucket.objectMetadata[bucket+"/"+key] results = append(results, object) } return results, resources, nil }
// GetPartialObject - GET object from range func (fs *fsDriver) GetPartialObject(w io.Writer, bucket, object string, start, length int64) (int64, error) { // validate bucket if drivers.IsValidBucket(bucket) == false { return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } // validate object if drivers.IsValidObjectName(object) == false { return 0, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: object}, nil) } objectPath := filepath.Join(fs.root, bucket, object) filestat, err := os.Stat(objectPath) switch err := err.(type) { case nil: { if filestat.IsDir() { return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) } } default: { if os.IsNotExist(err) { return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) } return 0, iodine.New(err, nil) } } file, err := os.Open(objectPath) if err != nil { return 0, iodine.New(err, nil) } defer file.Close() _, err = file.Seek(start, os.SEEK_SET) if err != nil { return 0, iodine.New(err, nil) } count, err := io.CopyN(w, file, length) if err != nil { return count, iodine.New(err, nil) } return count, nil }
// GetObjectMetadata retrieves an object's metadata func (d donutDriver) GetObjectMetadata(bucketName, objectName string) (drivers.ObjectMetadata, error) { d.lock.RLock() defer d.lock.RUnlock() errParams := map[string]string{ "bucketName": bucketName, "objectName": objectName, } if d.donut == nil { return drivers.ObjectMetadata{}, iodine.New(drivers.InternalError{}, errParams) } if !drivers.IsValidBucket(bucketName) { return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, errParams) } if !drivers.IsValidObjectName(objectName) { return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, errParams) } if _, ok := d.storedBuckets[bucketName]; ok { storedBucket := d.storedBuckets[bucketName] objectKey := bucketName + "/" + objectName if object, ok := storedBucket.objectMetadata[objectKey]; ok { return object, nil } } metadata, err := d.donut.GetObjectMetadata(bucketName, objectName) if err != nil { return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{ Bucket: bucketName, Object: objectName, }, errParams) } objectMetadata := drivers.ObjectMetadata{ Bucket: bucketName, Key: objectName, ContentType: metadata.Metadata["contentType"], Created: metadata.Created, Md5: metadata.MD5Sum, Size: metadata.Size, } return objectMetadata, nil }
// GetObjectMetadata - get object metadata from cache func (cache *cacheDriver) GetObjectMetadata(bucket, key string) (drivers.ObjectMetadata, error) { cache.lock.RLock() defer cache.lock.RUnlock() // check if bucket exists if !drivers.IsValidBucket(bucket) { return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } if !drivers.IsValidObjectName(key) { return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] objectKey := bucket + "/" + key if object, ok := storedBucket.objectMetadata[objectKey]; ok == true { return object, nil } return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: key}, nil) }
// GetObject retrieves an object and writes it to a writer func (d donutDriver) GetObject(target io.Writer, bucketName, objectName string) (int64, error) { if d.donut == nil { return 0, iodine.New(drivers.InternalError{}, nil) } if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") { return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) } if !drivers.IsValidObjectName(objectName) || strings.TrimSpace(objectName) == "" { return 0, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil) } reader, size, err := d.donut.GetObject(bucketName, objectName) if err != nil { return 0, iodine.New(drivers.ObjectNotFound{ Bucket: bucketName, Object: objectName, }, nil) } n, err := io.CopyN(target, reader, size) return n, iodine.New(err, nil) }
// GetObjectMetadata retrieves an object's metadata func (d donutDriver) GetObjectMetadata(bucketName, objectName string) (drivers.ObjectMetadata, error) { errParams := map[string]string{ "bucketName": bucketName, "objectName": objectName, } if d.donut == nil { return drivers.ObjectMetadata{}, iodine.New(drivers.InternalError{}, errParams) } if !drivers.IsValidBucket(bucketName) || strings.Contains(bucketName, ".") { return drivers.ObjectMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, errParams) } if !drivers.IsValidObjectName(objectName) || strings.TrimSpace(objectName) == "" { return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, errParams) } metadata, err := d.donut.GetObjectMetadata(bucketName, objectName) if err != nil { return drivers.ObjectMetadata{}, iodine.New(drivers.ObjectNotFound{ Bucket: bucketName, Object: objectName, }, errParams) } created, err := time.Parse(time.RFC3339Nano, metadata["created"]) if err != nil { return drivers.ObjectMetadata{}, iodine.New(err, errParams) } size, err := strconv.ParseInt(metadata["size"], 10, 64) if err != nil { return drivers.ObjectMetadata{}, iodine.New(err, errParams) } objectMetadata := drivers.ObjectMetadata{ Bucket: bucketName, Key: objectName, ContentType: metadata["contentType"], Created: created, Md5: metadata["md5"], Size: size, } return objectMetadata, nil }
// ListObjects - returns list of objects func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketResourcesMetadata) ([]drivers.ObjectMetadata, drivers.BucketResourcesMetadata, error) { d.lock.RLock() defer d.lock.RUnlock() errParams := map[string]string{ "bucketName": bucketName, } if d.donut == nil { return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.InternalError{}, errParams) } if !drivers.IsValidBucket(bucketName) { return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) } if !drivers.IsValidObjectName(resources.Prefix) { return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil) } listObjects, err := d.donut.ListObjects(bucketName, resources.Prefix, resources.Marker, resources.Delimiter, resources.Maxkeys) if err != nil { return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams) } resources.CommonPrefixes = listObjects.CommonPrefixes resources.IsTruncated = listObjects.IsTruncated var results []drivers.ObjectMetadata for _, objMetadata := range listObjects.Objects { metadata := drivers.ObjectMetadata{ Key: objMetadata.Object, Created: objMetadata.Created, Size: objMetadata.Size, } results = append(results, metadata) } sort.Sort(byObjectName(results)) if resources.IsTruncated && resources.IsDelimiterSet() { resources.NextMarker = results[len(results)-1].Key } return results, resources, nil }
// GetObject - GET object from cache buffer func (cache *cacheDriver) GetObject(w io.Writer, bucket string, object string) (int64, error) { cache.lock.RLock() if !drivers.IsValidBucket(bucket) { cache.lock.RUnlock() return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } if !drivers.IsValidObjectName(object) { cache.lock.RUnlock() return 0, iodine.New(drivers.ObjectNameInvalid{Object: object}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { cache.lock.RUnlock() return 0, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } objectKey := bucket + "/" + object data, ok := cache.objects.Get(objectKey) if !ok { cache.lock.RUnlock() return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil) } written, err := io.Copy(w, bytes.NewBuffer(data)) cache.lock.RUnlock() return written, iodine.New(err, nil) }
func (memory *memoryDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) { memory.lock.RLock() if !drivers.IsValidBucket(bucket) { memory.lock.RUnlock() return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } if !drivers.IsValidObjectName(key) { memory.lock.RUnlock() return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) } if _, ok := memory.storedBuckets[bucket]; ok == false { memory.lock.RUnlock() return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } storedBucket := memory.storedBuckets[bucket] objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { memory.lock.RUnlock() return "", iodine.New(drivers.ObjectExists{Bucket: bucket, Object: key}, nil) } memory.lock.RUnlock() memory.lock.Lock() id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String()) uploadIDSum := sha512.Sum512(id) uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] memory.storedBuckets[bucket].multiPartSession[key] = multiPartSession{ uploadID: uploadID, initiated: time.Now(), totalParts: 0, } memory.lock.Unlock() return uploadID, nil }
// GetPartialObject retrieves an object range and writes it to a writer func (d donutDriver) GetPartialObject(w io.Writer, bucketName, objectName string, start, length int64) (int64, error) { d.lock.RLock() defer d.lock.RUnlock() if d.donut == nil { return 0, iodine.New(drivers.InternalError{}, nil) } errParams := map[string]string{ "bucketName": bucketName, "objectName": objectName, "start": strconv.FormatInt(start, 10), "length": strconv.FormatInt(length, 10), } if !drivers.IsValidBucket(bucketName) { return 0, iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, errParams) } if !drivers.IsValidObjectName(objectName) { return 0, iodine.New(drivers.ObjectNameInvalid{Object: objectName}, errParams) } if start < 0 { return 0, iodine.New(drivers.InvalidRange{ Start: start, Length: length, }, errParams) } if _, ok := d.storedBuckets[bucketName]; ok == false { return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil) } objectKey := bucketName + "/" + objectName data, ok := d.objects.Get(objectKey) if !ok { reader, size, err := d.donut.GetObject(bucketName, objectName) if err != nil { switch iodine.ToError(err).(type) { case donut.BucketNotFound: return 0, iodine.New(drivers.BucketNotFound{Bucket: bucketName}, nil) case donut.ObjectNotFound: return 0, iodine.New(drivers.ObjectNotFound{ Bucket: bucketName, Object: objectName, }, nil) default: return 0, iodine.New(drivers.InternalError{}, nil) } } defer reader.Close() if start > size || (start+length-1) > size { return 0, iodine.New(drivers.InvalidRange{ Start: start, Length: length, }, errParams) } _, err = io.CopyN(ioutil.Discard, reader, start) if err != nil { return 0, iodine.New(err, errParams) } n, err := io.CopyN(w, reader, length) if err != nil { return 0, iodine.New(err, errParams) } return n, nil } written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length) return written, iodine.New(err, nil) }
// createObject - PUT object to memory buffer func (memory *memoryDriver) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { memory.lock.RLock() if !drivers.IsValidBucket(bucket) { memory.lock.RUnlock() return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } if !drivers.IsValidObjectName(key) { memory.lock.RUnlock() return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) } if _, ok := memory.storedBuckets[bucket]; ok == false { memory.lock.RUnlock() return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } storedBucket := memory.storedBuckets[bucket] // get object key partKey := bucket + "/" + getMultipartKey(key, uploadID, partID) if _, ok := storedBucket.partMetadata[partKey]; ok == true { memory.lock.RUnlock() return storedBucket.partMetadata[partKey].ETag, nil } memory.lock.RUnlock() if contentType == "" { contentType = "application/octet-stream" } contentType = strings.TrimSpace(contentType) if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } // calculate md5 hash := md5.New() var readBytes []byte var err error var length int for err == nil { byteBuffer := make([]byte, 1024*1024) length, err = data.Read(byteBuffer) // While hash.Write() wouldn't mind a Nil byteBuffer // It is necessary for us to verify this and break if length == 0 { break } hash.Write(byteBuffer[0:length]) readBytes = append(readBytes, byteBuffer[0:length]...) } if err != io.EOF { return "", iodine.New(err, nil) } go debug.FreeOSMemory() md5SumBytes := hash.Sum(nil) totalLength := int64(len(readBytes)) memory.lock.Lock() memory.multiPartObjects.Set(partKey, readBytes) memory.lock.Unlock() // setting up for de-allocation readBytes = nil md5Sum := hex.EncodeToString(md5SumBytes) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) } } newPart := drivers.PartMetadata{ PartNumber: partID, LastModified: time.Now().UTC(), ETag: md5Sum, Size: totalLength, } memory.lock.Lock() storedBucket.partMetadata[partKey] = newPart multiPartSession := storedBucket.multiPartSession[key] multiPartSession.totalParts++ storedBucket.multiPartSession[key] = multiPartSession memory.storedBuckets[bucket] = storedBucket memory.lock.Unlock() return md5Sum, nil }
func (memory *memoryDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) { if !drivers.IsValidBucket(bucket) { return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } if !drivers.IsValidObjectName(key) { return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) } // Verify upload id memory.lock.RLock() if _, ok := memory.storedBuckets[bucket]; ok == false { memory.lock.RUnlock() return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } storedBucket := memory.storedBuckets[bucket] if storedBucket.multiPartSession[key].uploadID != uploadID { memory.lock.RUnlock() return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil) } memory.lock.RUnlock() memory.lock.Lock() var size int64 var fullObject bytes.Buffer for i := 1; i <= len(parts); i++ { recvMD5 := parts[i] object, ok := memory.multiPartObjects.Get(bucket + "/" + getMultipartKey(key, uploadID, i)) if ok == false { memory.lock.Unlock() return "", iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil) } size += int64(len(object)) calcMD5Bytes := md5.Sum(object) // complete multi part request header md5sum per part is hex encoded recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\"")) if err != nil { return "", iodine.New(drivers.InvalidDigest{Md5: recvMD5}, nil) } if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) { return "", iodine.New(drivers.BadDigest{Md5: recvMD5, Bucket: bucket, Key: getMultipartKey(key, uploadID, i)}, nil) } _, err = io.Copy(&fullObject, bytes.NewBuffer(object)) if err != nil { return "", iodine.New(err, nil) } object = nil go debug.FreeOSMemory() } memory.lock.Unlock() md5sumSlice := md5.Sum(fullObject.Bytes()) // this is needed for final verification inside CreateObject, do not convert this to hex md5sum := base64.StdEncoding.EncodeToString(md5sumSlice[:]) etag, err := memory.CreateObject(bucket, key, "", md5sum, size, &fullObject) if err != nil { // No need to call internal cleanup functions here, caller will call AbortMultipartUpload() // which would in-turn cleanup properly in accordance with S3 Spec return "", iodine.New(err, nil) } fullObject.Reset() memory.cleanupMultiparts(bucket, key, uploadID) memory.cleanupMultipartSession(bucket, key, uploadID) return etag, nil }
// CreateObject - PUT object func (fs *fsDriver) CreateObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { fs.lock.Lock() defer fs.lock.Unlock() // check bucket name valid if drivers.IsValidBucket(bucket) == false { return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } // check bucket exists if _, err := os.Stat(filepath.Join(fs.root, bucket)); os.IsNotExist(err) { return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } // verify object path legal if drivers.IsValidObjectName(key) == false { return "", iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil) } // verify content type if contentType == "" { contentType = "application/octet-stream" } contentType = strings.TrimSpace(contentType) // get object path objectPath := filepath.Join(fs.root, bucket, key) objectDir := filepath.Dir(objectPath) if _, err := os.Stat(objectDir); os.IsNotExist(err) { err = os.MkdirAll(objectDir, 0700) if err != nil { return "", iodine.New(err, nil) } } // check if object exists if _, err := os.Stat(objectPath); !os.IsNotExist(err) { return "", iodine.New(drivers.ObjectExists{ Bucket: bucket, Object: key, }, nil) } if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } // write object file, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600) if err != nil { return "", iodine.New(err, nil) } defer file.Close() h := md5.New() mw := io.MultiWriter(file, h) _, err = io.CopyN(mw, data, size) if err != nil { return "", iodine.New(err, nil) } file, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600) if err != nil { return "", iodine.New(err, nil) } defer file.Close() metadata := &Metadata{ ContentType: contentType, Md5sum: h.Sum(nil), } // serialize metadata to json encoder := json.NewEncoder(file) err = encoder.Encode(metadata) md5Sum := hex.EncodeToString(metadata.Md5sum) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) } } return md5Sum, nil }
func (fs *fsDriver) NewMultipartUpload(bucket, key, contentType string) (string, error) { fs.lock.Lock() defer fs.lock.Unlock() if !drivers.IsValidBucket(bucket) { return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } if !drivers.IsValidObjectName(key) { return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) } bucketPath := filepath.Join(fs.root, bucket) _, err := os.Stat(bucketPath) // check bucket exists if os.IsNotExist(err) { return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } if err != nil { return "", iodine.New(drivers.InternalError{}, nil) } objectPath := filepath.Join(bucketPath, key) objectDir := filepath.Dir(objectPath) if _, err := os.Stat(objectDir); os.IsNotExist(err) { err = os.MkdirAll(objectDir, 0700) if err != nil { return "", iodine.New(err, nil) } } // check if object exists if _, err := os.Stat(objectPath); !os.IsNotExist(err) { return "", iodine.New(drivers.ObjectExists{ Bucket: bucket, Object: key, }, nil) } var activeSessionFile *os.File _, err = os.Stat(bucketPath + "$activeSession") switch { case os.IsNotExist(err): activeSessionFile, err = os.OpenFile(bucketPath+"$activeSession", os.O_WRONLY|os.O_CREATE, 0600) if err != nil { return "", iodine.New(err, nil) } default: activeSessionFile, err = os.OpenFile(bucketPath+"$activeSession", os.O_WRONLY|os.O_APPEND, 0600) if err != nil { return "", iodine.New(err, nil) } } defer activeSessionFile.Close() id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String()) uploadIDSum := sha512.Sum512(id) uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_WRONLY|os.O_CREATE, 0600) if err != nil { return "", iodine.New(err, nil) } defer multiPartfile.Close() mpartSession := new(MultipartSession) mpartSession.TotalParts = 0 mpartSession.UploadID = uploadID mpartSession.Initiated = time.Now().UTC() var parts []*drivers.PartMetadata mpartSession.Parts = parts fs.multiparts.ActiveSession[key] = mpartSession encoder := json.NewEncoder(multiPartfile) err = encoder.Encode(mpartSession) if err != nil { return "", iodine.New(err, nil) } encoder = json.NewEncoder(activeSessionFile) err = encoder.Encode(fs.multiparts.ActiveSession) if err != nil { return "", iodine.New(err, nil) } return uploadID, nil }
func (fs *fsDriver) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { fs.lock.Lock() defer fs.lock.Unlock() if partID <= 0 { return "", iodine.New(errors.New("invalid part id, cannot be zero or less than zero"), nil) } // check bucket name valid if drivers.IsValidBucket(bucket) == false { return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } // verify object path legal if drivers.IsValidObjectName(key) == false { return "", iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil) } if !fs.isValidUploadID(key, uploadID) { return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil) } if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } bucketPath := filepath.Join(fs.root, bucket) _, err := os.Stat(bucketPath) // check bucket exists if os.IsNotExist(err) { return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } if err != nil { return "", iodine.New(drivers.InternalError{}, nil) } objectPath := filepath.Join(bucketPath, key) objectDir := filepath.Dir(objectPath) if _, err := os.Stat(objectDir); os.IsNotExist(err) { err = os.MkdirAll(objectDir, 0700) if err != nil { return "", iodine.New(err, nil) } } // check if object exists if _, err := os.Stat(objectPath); !os.IsNotExist(err) { return "", iodine.New(drivers.ObjectExists{ Bucket: bucket, Object: key, }, nil) } partMetadata, err := fs.writePart(objectPath, partID, size, data) if err != nil { return "", iodine.New(err, nil) } // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), partMetadata.ETag); err != nil { return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) } } multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDWR|os.O_APPEND, 0600) if err != nil { return "", iodine.New(err, nil) } defer multiPartfile.Close() var deserializedMultipartSession MultipartSession decoder := json.NewDecoder(multiPartfile) err = decoder.Decode(&deserializedMultipartSession) if err != nil { return "", iodine.New(err, nil) } deserializedMultipartSession.Parts = append(deserializedMultipartSession.Parts, &partMetadata) deserializedMultipartSession.TotalParts++ fs.multiparts.ActiveSession[key] = &deserializedMultipartSession sort.Sort(partNumber(deserializedMultipartSession.Parts)) encoder := json.NewEncoder(multiPartfile) err = encoder.Encode(&deserializedMultipartSession) if err != nil { return "", iodine.New(err, nil) } return partMetadata.ETag, nil }
func (fs *fsDriver) CompleteMultipartUpload(bucket, key, uploadID string, parts map[int]string) (string, error) { fs.lock.Lock() defer fs.lock.Unlock() // check bucket name valid if drivers.IsValidBucket(bucket) == false { return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } // verify object path legal if drivers.IsValidObjectName(key) == false { return "", iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil) } if !fs.isValidUploadID(key, uploadID) { return "", iodine.New(drivers.InvalidUploadID{UploadID: uploadID}, nil) } bucketPath := filepath.Join(fs.root, bucket) _, err := os.Stat(bucketPath) // check bucket exists if os.IsNotExist(err) { return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } if err != nil { return "", iodine.New(drivers.InternalError{}, nil) } objectPath := filepath.Join(bucketPath, key) // check if object exists if _, err := os.Stat(objectPath); !os.IsNotExist(err) { return "", iodine.New(drivers.ObjectExists{ Bucket: bucket, Object: key, }, nil) } file, err := os.OpenFile(objectPath, os.O_WRONLY|os.O_CREATE, 0600) if err != nil { return "", iodine.New(err, nil) } defer file.Close() h := md5.New() mw := io.MultiWriter(file, h) err = fs.concatParts(parts, objectPath, mw) if err != nil { return "", iodine.New(err, nil) } md5sum := hex.EncodeToString(h.Sum(nil)) delete(fs.multiparts.ActiveSession, key) for partNumber := range parts { err = os.Remove(objectPath + fmt.Sprintf("$%d", partNumber)) if err != nil { return "", iodine.New(err, nil) } } err = os.Remove(objectPath + "$multiparts") if err != nil { return "", iodine.New(err, nil) } file, err = os.OpenFile(objectPath+"$metadata", os.O_WRONLY|os.O_CREATE, 0600) if err != nil { return "", iodine.New(err, nil) } defer file.Close() metadata := &Metadata{ ContentType: "application/octet-stream", Md5sum: h.Sum(nil), } // serialize metadata to json encoder := json.NewEncoder(file) err = encoder.Encode(metadata) if err != nil { return "", iodine.New(err, nil) } activeSessionFile, err := os.OpenFile(bucketPath+"$activeSession", os.O_WRONLY|os.O_TRUNC, 0600) if err != nil { return "", iodine.New(err, nil) } defer activeSessionFile.Close() encoder = json.NewEncoder(activeSessionFile) err = encoder.Encode(fs.multiparts.ActiveSession) if err != nil { return "", iodine.New(err, nil) } return md5sum, nil }
func (fs *fsDriver) ListObjectParts(bucket, key string, resources drivers.ObjectResourcesMetadata) (drivers.ObjectResourcesMetadata, error) { fs.lock.Lock() defer fs.lock.Unlock() // load from disk fs.loadActiveSessions(bucket) // check bucket name valid if drivers.IsValidBucket(bucket) == false { return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } // verify object path legal if drivers.IsValidObjectName(key) == false { return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.ObjectNameInvalid{Bucket: bucket, Object: key}, nil) } if !fs.isValidUploadID(key, resources.UploadID) { return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.InvalidUploadID{UploadID: resources.UploadID}, nil) } objectResourcesMetadata := resources objectResourcesMetadata.Bucket = bucket objectResourcesMetadata.Key = key var startPartNumber int switch { case objectResourcesMetadata.PartNumberMarker == 0: startPartNumber = 1 default: startPartNumber = objectResourcesMetadata.PartNumberMarker } bucketPath := filepath.Join(fs.root, bucket) _, err := os.Stat(bucketPath) // check bucket exists if os.IsNotExist(err) { return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } if err != nil { return drivers.ObjectResourcesMetadata{}, iodine.New(drivers.InternalError{}, nil) } objectPath := filepath.Join(bucketPath, key) multiPartfile, err := os.OpenFile(objectPath+"$multiparts", os.O_RDONLY, 0600) if err != nil { return drivers.ObjectResourcesMetadata{}, iodine.New(err, nil) } defer multiPartfile.Close() var deserializedMultipartSession MultipartSession decoder := json.NewDecoder(multiPartfile) err = decoder.Decode(&deserializedMultipartSession) if err != nil { return drivers.ObjectResourcesMetadata{}, iodine.New(err, nil) } var parts []*drivers.PartMetadata for i := startPartNumber; i <= deserializedMultipartSession.TotalParts; i++ { if len(parts) > objectResourcesMetadata.MaxParts { sort.Sort(partNumber(parts)) objectResourcesMetadata.IsTruncated = true objectResourcesMetadata.Part = parts objectResourcesMetadata.NextPartNumberMarker = i return objectResourcesMetadata, nil } parts = append(parts, deserializedMultipartSession.Parts[i-1]) } sort.Sort(partNumber(parts)) objectResourcesMetadata.Part = parts return objectResourcesMetadata, nil }
// createObject - PUT object to cache buffer func (cache *cacheDriver) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader) (string, error) { cache.lock.RLock() if !drivers.IsValidBucket(bucket) { cache.lock.RUnlock() return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucket}, nil) } if !drivers.IsValidObjectName(key) { cache.lock.RUnlock() return "", iodine.New(drivers.ObjectNameInvalid{Object: key}, nil) } if _, ok := cache.storedBuckets[bucket]; ok == false { cache.lock.RUnlock() return "", iodine.New(drivers.BucketNotFound{Bucket: bucket}, nil) } storedBucket := cache.storedBuckets[bucket] // get object key objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { cache.lock.RUnlock() return "", iodine.New(drivers.ObjectExists{Bucket: bucket, Object: key}, nil) } cache.lock.RUnlock() if contentType == "" { contentType = "application/octet-stream" } contentType = strings.TrimSpace(contentType) if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { // pro-actively close the connection return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } // calculate md5 hash := md5.New() var readBytes []byte var err error var length int for err == nil { byteBuffer := make([]byte, 1024*1024) length, err = data.Read(byteBuffer) // While hash.Write() wouldn't mind a Nil byteBuffer // It is necessary for us to verify this and break if length == 0 { break } hash.Write(byteBuffer[0:length]) readBytes = append(readBytes, byteBuffer[0:length]...) } if err != io.EOF { return "", iodine.New(err, nil) } md5SumBytes := hash.Sum(nil) totalLength := len(readBytes) cache.lock.Lock() ok := cache.objects.Set(objectKey, readBytes) // setting up for de-allocation readBytes = nil go debug.FreeOSMemory() cache.lock.Unlock() if !ok { return "", iodine.New(drivers.InternalError{}, nil) } md5Sum := hex.EncodeToString(md5SumBytes) // Verify if the written object is equal to what is expected, only if it is requested as such if strings.TrimSpace(expectedMD5Sum) != "" { if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucket, Key: key}, nil) } } newObject := drivers.ObjectMetadata{ Bucket: bucket, Key: key, ContentType: contentType, Created: time.Now().UTC(), Md5: md5Sum, Size: int64(totalLength), } cache.lock.Lock() storedBucket.objectMetadata[objectKey] = newObject cache.storedBuckets[bucket] = storedBucket cache.lock.Unlock() return newObject.Md5, nil }
// CreateObject creates a new object func (d donutDriver) CreateObject(bucketName, objectName, contentType, expectedMD5Sum string, size int64, reader io.Reader) (string, error) { d.lock.Lock() defer d.lock.Unlock() errParams := map[string]string{ "bucketName": bucketName, "objectName": objectName, "contentType": contentType, } if d.donut == nil { return "", iodine.New(drivers.InternalError{}, errParams) } // TODO - Should be able to write bigger than cache if size > int64(d.maxSize) { generic := drivers.GenericObjectError{Bucket: bucketName, Object: objectName} return "", iodine.New(drivers.EntityTooLarge{ GenericObjectError: generic, Size: strconv.FormatInt(size, 10), MaxSize: strconv.FormatUint(d.maxSize, 10), }, nil) } if !drivers.IsValidBucket(bucketName) { return "", iodine.New(drivers.BucketNameInvalid{Bucket: bucketName}, nil) } if !drivers.IsValidObjectName(objectName) { return "", iodine.New(drivers.ObjectNameInvalid{Object: objectName}, nil) } storedBucket := d.storedBuckets[bucketName] // get object key objectKey := bucketName + "/" + objectName if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { return "", iodine.New(drivers.ObjectExists{Bucket: bucketName, Object: objectName}, nil) } if strings.TrimSpace(contentType) == "" { contentType = "application/octet-stream" } metadata := make(map[string]string) metadata["contentType"] = strings.TrimSpace(contentType) metadata["contentLength"] = strconv.FormatInt(size, 10) if strings.TrimSpace(expectedMD5Sum) != "" { expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) if err != nil { return "", iodine.New(drivers.InvalidDigest{Md5: expectedMD5Sum}, nil) } expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) } objMetadata, err := d.donut.PutObject(bucketName, objectName, expectedMD5Sum, reader, metadata) if err != nil { switch iodine.ToError(err).(type) { case donut.BadDigest: return "", iodine.New(drivers.BadDigest{Md5: expectedMD5Sum, Bucket: bucketName, Key: objectName}, nil) } return "", iodine.New(err, errParams) } newObject := drivers.ObjectMetadata{ Bucket: bucketName, Key: objectName, ContentType: objMetadata.Metadata["contentType"], Created: objMetadata.Created, Md5: objMetadata.MD5Sum, Size: objMetadata.Size, } storedBucket.objectMetadata[objectKey] = newObject d.storedBuckets[bucketName] = storedBucket return newObject.Md5, nil }