// getStringToSign a string based on selected query values func (r rpcSignature) getStringToSign(canonicalRequest string, t time.Time) string { stringToSign := rpcAuthHeaderPrefix + "\n" + t.Format(iso8601Format) + "\n" stringToSign = stringToSign + r.getScope(t) + "\n" canonicalReqHashBytes := sha256.Sum256([]byte(canonicalRequest)) stringToSign = stringToSign + hex.EncodeToString(canonicalReqHashBytes[:]) return stringToSign }
// MakeBucket - create bucket in cache func (xl API) MakeBucket(bucketName, acl string, location io.Reader, signature *signv4.Signature) *probe.Error { xl.lock.Lock() defer xl.lock.Unlock() // do not have to parse location constraint, using this just for signature verification locationSum := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" if location != nil { locationConstraintBytes, err := ioutil.ReadAll(location) if err != nil { return probe.NewError(InternalError{}) } locationSum = hex.EncodeToString(sha256.Sum256(locationConstraintBytes)[:]) } if signature != nil { ok, err := signature.DoesSignatureMatch(locationSum) if err != nil { return err.Trace() } if !ok { return probe.NewError(signv4.DoesNotMatch{}) } } if xl.storedBuckets.Stats().Items == totalBuckets { return probe.NewError(TooManyBuckets{Bucket: bucketName}) } if !IsValidBucket(bucketName) { return probe.NewError(BucketNameInvalid{Bucket: bucketName}) } if !IsValidBucketACL(acl) { return probe.NewError(InvalidACL{ACL: acl}) } if xl.storedBuckets.Exists(bucketName) { return probe.NewError(BucketExists{Bucket: bucketName}) } if strings.TrimSpace(acl) == "" { // default is private acl = "private" } if len(xl.config.NodeDiskMap) > 0 { if err := xl.makeBucket(bucketName, BucketACL(acl)); err != nil { return err.Trace() } } var newBucket = storedBucket{} newBucket.objectMetadata = make(map[string]ObjectMetadata) newBucket.multiPartSession = make(map[string]MultiPartSession) newBucket.partMetadata = make(map[string]map[int]PartMetadata) newBucket.bucketMetadata = BucketMetadata{} newBucket.bucketMetadata.Name = bucketName newBucket.bucketMetadata.Created = time.Now().UTC() newBucket.bucketMetadata.ACL = BucketACL(acl) xl.storedBuckets.Set(bucketName, newBucket) return nil }
func (donut API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *signv4.Signature) (io.Reader, *probe.Error) { if !IsValidBucket(bucket) { return nil, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if !IsValidObjectName(key) { return nil, probe.NewError(ObjectNameInvalid{Object: key}) } // TODO: multipart support for donut is broken, since we haven't finalized the format in which // it can be stored, disabling this for now until we get the underlying layout stable. // // if len(donut.config.NodeDiskMap) > 0 { // donut.lock.Unlock() // return donut.completeMultipartUpload(bucket, key, uploadID, data, signature) // } if !donut.storedBuckets.Exists(bucket) { return nil, probe.NewError(BucketNotFound{Bucket: bucket}) } storedBucket := donut.storedBuckets.Get(bucket).(storedBucket) // Verify upload id if storedBucket.multiPartSession[key].UploadID != uploadID { return nil, probe.NewError(InvalidUploadID{UploadID: uploadID}) } partBytes, err := ioutil.ReadAll(data) if err != nil { return nil, probe.NewError(err) } if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256(partBytes)[:])) if err != nil { return nil, err.Trace() } if !ok { return nil, probe.NewError(signv4.DoesNotMatch{}) } } parts := &CompleteMultipartUpload{} if err := xml.Unmarshal(partBytes, parts); err != nil { return nil, probe.NewError(MalformedXML{}) } if !sort.IsSorted(completedParts(parts.Part)) { return nil, probe.NewError(InvalidPartOrder{}) } fullObjectReader, fullObjectWriter := io.Pipe() go donut.mergeMultipart(parts, uploadID, fullObjectWriter) return fullObjectReader, nil }
func (s rpcSignatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var signature *rpcSignature if isRequestSignatureRPC(r) { // Init signature V4 verification var err *probe.Error signature, err = initSignatureRPC(r) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, r, AuthorizationHeaderMalformed, r.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } } buffer := new(bytes.Buffer) if _, err := io.Copy(buffer, r.Body); err != nil { errorIf(probe.NewError(err), "Unable to read payload from request body.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } value := sha256.Sum256(buffer.Bytes()) ok, err := signature.DoesSignatureMatch(hex.EncodeToString(value[:])) if err != nil { errorIf(err.Trace(), "Unable to verify signature.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } if !ok { writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path) return } // Copy the buffer back into request body to be read by the RPC service callers r.Body = ioutil.NopCloser(buffer) s.handler.ServeHTTP(w, r) } else { writeErrorResponse(w, r, AccessDenied, r.URL.Path) } }
func (s signatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if isRequestPostPolicySignatureV4(r) && r.Method == "POST" { s.handler.ServeHTTP(w, r) return } var signature *signv4.Signature if isRequestSignatureV4(r) { // For PUT and POST requests with payload, send the call upwards for verification. // Or PUT and POST requests without payload, verify here. if (r.Body == nil && (r.Method == "PUT" || r.Method == "POST")) || (r.Method != "PUT" && r.Method != "POST") { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(r) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, r, AuthorizationHeaderMalformed, r.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } } ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256([]byte("")))) if err != nil { errorIf(err.Trace(), "Unable to verify signature.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } if !ok { writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path) return } } s.handler.ServeHTTP(w, r) return } if isRequestPresignedSignatureV4(r) { var err *probe.Error signature, err = initPresignedSignatureV4(r) if err != nil { switch err.ToGoError() { case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id requested.", nil) writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } } ok, err := signature.DoesPresignedSignatureMatch() if err != nil { errorIf(err.Trace(), "Unable to verify signature.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } if !ok { writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path) return } s.handler.ServeHTTP(w, r) return } writeErrorResponse(w, r, AccessDenied, r.URL.Path) }
// completeMultipartUpload complete an incomplete multipart upload func (xl API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signv4.Signature) (ObjectMetadata, *probe.Error) { if bucket == "" || strings.TrimSpace(bucket) == "" { return ObjectMetadata{}, probe.NewError(InvalidArgument{}) } if object == "" || strings.TrimSpace(object) == "" { return ObjectMetadata{}, probe.NewError(InvalidArgument{}) } if err := xl.listXLBuckets(); err != nil { return ObjectMetadata{}, err.Trace() } if _, ok := xl.buckets[bucket]; !ok { return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) } allBuckets, err := xl.getXLBucketMetadata() if err != nil { return ObjectMetadata{}, err.Trace() } bucketMetadata := allBuckets.Buckets[bucket] if _, ok := bucketMetadata.Multiparts[object]; !ok { return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) } if bucketMetadata.Multiparts[object].UploadID != uploadID { return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) } var partBytes []byte { var err error partBytes, err = ioutil.ReadAll(data) if err != nil { return ObjectMetadata{}, probe.NewError(err) } } if signature != nil { ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256(partBytes)[:])) if err != nil { return ObjectMetadata{}, err.Trace() } if !ok { return ObjectMetadata{}, probe.NewError(signv4.DoesNotMatch{}) } } parts := &CompleteMultipartUpload{} if err := xml.Unmarshal(partBytes, parts); err != nil { return ObjectMetadata{}, probe.NewError(MalformedXML{}) } if !sort.IsSorted(completedParts(parts.Part)) { return ObjectMetadata{}, probe.NewError(InvalidPartOrder{}) } for _, part := range parts.Part { if strings.Trim(part.ETag, "\"") != bucketMetadata.Multiparts[object].Parts[strconv.Itoa(part.PartNumber)].ETag { return ObjectMetadata{}, probe.NewError(InvalidPart{}) } } var finalETagBytes []byte var finalSize int64 totalParts := strconv.Itoa(bucketMetadata.Multiparts[object].TotalParts) for _, part := range bucketMetadata.Multiparts[object].Parts { partETagBytes, err := hex.DecodeString(part.ETag) if err != nil { return ObjectMetadata{}, probe.NewError(err) } finalETagBytes = append(finalETagBytes, partETagBytes...) finalSize += part.Size } finalETag := hex.EncodeToString(finalETagBytes) objMetadata := ObjectMetadata{} objMetadata.MD5Sum = finalETag + "-" + totalParts objMetadata.Object = object objMetadata.Bucket = bucket objMetadata.Size = finalSize objMetadata.Created = bucketMetadata.Multiparts[object].Parts[totalParts].LastModified return objMetadata, nil }