// NewMultipartUpload - initiate a new multipart session func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, error) { donut.lock.Lock() defer donut.lock.Unlock() if !IsValidBucket(bucket) { return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(key) { return "", iodine.New(ObjectNameInvalid{Object: key}, nil) } if !donut.storedBuckets.Exists(bucket) { return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { return "", iodine.New(ObjectExists{Object: key}, nil) } id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String()) uploadIDSum := sha512.Sum512(id) uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] storedBucket.multiPartSession[key] = MultiPartSession{ uploadID: uploadID, initiated: time.Now(), totalParts: 0, } multiPartCache := data.NewCache(0) multiPartCache.OnEvicted = donut.evictedPart donut.multiPartObjects[uploadID] = multiPartCache donut.storedBuckets.Set(bucket, storedBucket) return uploadID, nil }
// New instantiate a new donut func New() (Interface, *probe.Error) { var conf *Config var err *probe.Error conf, err = LoadConfig() if err != nil { conf = &Config{ Version: "0.0.1", MaxSize: 512000000, NodeDiskMap: nil, DonutName: "", } if err := quick.CheckData(conf); err != nil { return nil, err.Trace() } } a := API{config: conf} a.storedBuckets = metadata.NewCache() a.nodes = make(map[string]node) a.buckets = make(map[string]bucket) a.objects = data.NewCache(a.config.MaxSize) a.multiPartObjects = make(map[string]*data.Cache) a.objects.OnEvicted = a.evictedObject a.lock = new(sync.Mutex) if len(a.config.NodeDiskMap) > 0 { for k, v := range a.config.NodeDiskMap { if len(v) == 0 { return nil, probe.NewError(InvalidDisksArgument{}) } err := a.AttachNode(k, v) if err != nil { return nil, err.Trace() } } /// Initialization, populate all buckets into memory buckets, err := a.listBuckets() if err != nil { return nil, err.Trace() } for k, v := range buckets { var newBucket = storedBucket{} newBucket.bucketMetadata = v newBucket.objectMetadata = make(map[string]ObjectMetadata) newBucket.multiPartSession = make(map[string]MultiPartSession) newBucket.partMetadata = make(map[string]map[int]PartMetadata) a.storedBuckets.Set(k, newBucket) } a.Heal() } return a, nil }
// NewMultipartUpload - initiate a new multipart session func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() if !IsValidBucket(bucket) { return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return "", probe.NewError(ObjectNameInvalid{Object: key}) } if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { return "", err.Trace() } if !ok { return "", probe.NewError(SignatureDoesNotMatch{}) } } if len(donut.config.NodeDiskMap) > 0 { return donut.newMultipartUpload(bucket, key, contentType) } if !donut.storedBuckets.Exists(bucket) { return "", probe.NewError(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { return "", probe.NewError(ObjectExists{Object: key}) } id := []byte(strconv.Itoa(rand.Int()) + bucket + key + time.Now().UTC().String()) uploadIDSum := sha512.Sum512(id) uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] storedBucket.multiPartSession[key] = MultiPartSession{ UploadID: uploadID, Initiated: time.Now().UTC(), TotalParts: 0, } storedBucket.partMetadata[key] = make(map[int]PartMetadata) multiPartCache := data.NewCache(0) multiPartCache.OnEvicted = donut.evictedPart donut.multiPartObjects[uploadID] = multiPartCache donut.storedBuckets.Set(bucket, storedBucket) return uploadID, nil }
// NewMultipartUpload - initiate a new multipart session func (donut API) NewMultipartUpload(bucket, key, contentType string, signature *Signature) (string, error) { donut.lock.Lock() defer donut.lock.Unlock() if signature != nil { ok, err := signature.DoesSignatureMatch("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") if err != nil { return "", iodine.New(err, nil) } if !ok { return "", iodine.New(SignatureDoesNotMatch{}, nil) } } if !IsValidBucket(bucket) { return "", iodine.New(BucketNameInvalid{Bucket: bucket}, nil) } if !IsValidObjectName(key) { return "", iodine.New(ObjectNameInvalid{Object: key}, nil) } if !donut.storedBuckets.Exists(bucket) { return "", iodine.New(BucketNotFound{Bucket: bucket}, nil) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { return "", iodine.New(ObjectExists{Object: key}, nil) } id := []byte(strconv.FormatInt(rand.Int63(), 10) + bucket + key + time.Now().String()) uploadIDSum := sha512.Sum512(id) uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] storedBucket.multiPartSession[key] = MultiPartSession{ uploadID: uploadID, initiated: time.Now(), totalParts: 0, } multiPartCache := data.NewCache(0) multiPartCache.OnEvicted = donut.evictedPart donut.multiPartObjects[uploadID] = multiPartCache donut.storedBuckets.Set(bucket, storedBucket) return uploadID, nil }
// NewMultipartUpload - initiate a new multipart session func (donut API) NewMultipartUpload(bucket, key, contentType string) (string, *probe.Error) { donut.lock.Lock() defer donut.lock.Unlock() if !IsValidBucket(bucket) { return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return "", probe.NewError(ObjectNameInvalid{Object: key}) } // if len(donut.config.NodeDiskMap) > 0 { // return donut.newMultipartUpload(bucket, key, contentType) // } if !donut.storedBuckets.Exists(bucket) { return "", probe.NewError(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) objectKey := bucket + "/" + key if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { return "", probe.NewError(ObjectExists{Object: key}) } id := []byte(strconv.Itoa(rand.Int()) + bucket + key + time.Now().UTC().String()) uploadIDSum := sha512.Sum512(id) uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] storedBucket.multiPartSession[key] = MultiPartSession{ UploadID: uploadID, Initiated: time.Now().UTC(), TotalParts: 0, } storedBucket.partMetadata[key] = make(map[int]PartMetadata) multiPartCache := data.NewCache(0) multiPartCache.OnEvicted = donut.evictedPart donut.multiPartObjects[uploadID] = multiPartCache donut.storedBuckets.Set(bucket, storedBucket) return uploadID, nil }