// mainCopy is bound to sub-command func mainCopy(ctx *cli.Context) { checkCopySyntax(ctx) console.SetCustomTheme(map[string]*color.Color{ "Copy": color.New(color.FgGreen, color.Bold), }) session := newSessionV2() var e error session.Header.CommandType = "cp" session.Header.RootPath, e = os.Getwd() if e != nil { session.Delete() fatalIf(probe.NewError(e), "Unable to get current working folder.") } // extract URLs. var err *probe.Error session.Header.CommandArgs, err = args2URLs(ctx.Args()) if err != nil { session.Delete() fatalIf(err.Trace(), "One or more unknown URL types passed.") } doCopyCmdSession(session) session.Delete() }
func mainMirror(ctx *cli.Context) { checkMirrorSyntax(ctx) console.SetCustomTheme(map[string]*color.Color{ "Mirror": color.New(color.FgGreen, color.Bold), }) var e error session := newSessionV2() session.Header.CommandType = "mirror" session.Header.RootPath, e = os.Getwd() if e != nil { session.Close() session.Delete() fatalIf(probe.NewError(e), "Unable to get current working folder.") } // extract URLs. var err *probe.Error session.Header.CommandArgs, err = args2URLs(ctx.Args()) if err != nil { session.Close() session.Delete() fatalIf(err.Trace(ctx.Args()...), fmt.Sprintf("One or more unknown argument types found in ‘%s’.", ctx.Args())) } doMirrorCmdSession(session) session.Close() session.Delete() }
// errorIf synonymous with fatalIf but doesn't exit on error != nil func errorIf(err *probe.Error, msg string) { if err == nil { return } if globalJSONFlag { errorMessage := ErrorMessage{ Message: msg, Type: "error", Cause: err.ToGoError(), SysInfo: err.SysInfo, } if globalDebugFlag { errorMessage.CallTrace = err.CallTrace } json, err := json.Marshal(struct { Error ErrorMessage `json:"error"` }{ Error: errorMessage, }) if err != nil { console.Fatalln(probe.NewError(err)) } console.Println(string(json)) return } if !globalDebugFlag { console.Errorln(fmt.Sprintf("%s %s", msg, err.ToGoError())) return } console.Errorln(fmt.Sprintf("%s %s", msg, err)) }
// mainCopy is bound to sub-command func mainCopy(ctx *cli.Context) { checkCopySyntax(ctx) setCopyPalette(ctx.GlobalString("colors")) session := newSessionV2() var e error session.Header.CommandType = "cp" session.Header.RootPath, e = os.Getwd() if e != nil { session.Delete() fatalIf(probe.NewError(e), "Unable to get current working folder.") } // extract URLs. var err *probe.Error session.Header.CommandArgs, err = args2URLs(ctx.Args()) if err != nil { session.Delete() fatalIf(err.Trace(), "One or more unknown URL types passed.") } doCopySession(session) session.Delete() }
// getBucketMetadataReaders - readers are returned in map rather than slice func (xl API) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) { readers := make(map[int]io.ReadCloser) disks := make(map[int]block.Block) var err *probe.Error for _, node := range xl.nodes { nDisks := make(map[int]block.Block) nDisks, err = node.ListDisks() if err != nil { return nil, err.Trace() } for k, v := range nDisks { disks[k] = v } } var bucketMetaDataReader io.ReadCloser for order, disk := range disks { bucketMetaDataReader, err = disk.Open(filepath.Join(xl.config.XLName, bucketMetadataConfig)) if err != nil { continue } readers[order] = bucketMetaDataReader } if err != nil { return nil, err.Trace() } return readers, nil }
// fatalIf wrapper function which takes error and selectively prints stack frames if available on debug func fatalIf(err *probe.Error, msg string) { if err == nil { return } if globalJSON { errorMsg := errorMessage{ Message: msg, Type: "fatal", Cause: causeMessage{ Message: err.ToGoError().Error(), Error: err.ToGoError(), }, SysInfo: err.SysInfo, } if globalDebug { errorMsg.CallTrace = err.CallTrace } json, e := json.Marshal(struct { Status string `json:"status"` Error errorMessage `json:"error"` }{ Status: "error", Error: errorMsg, }) if e != nil { console.Fatalln(probe.NewError(e)) } console.Println(string(json)) console.Fatalln() } if !globalDebug { console.Fatalln(fmt.Sprintf("%s %s", msg, err.ToGoError())) } console.Fatalln(fmt.Sprintf("%s %s", msg, err)) }
func mainMirror(ctx *cli.Context) { checkMirrorSyntax(ctx) setMirrorPalette(ctx.GlobalString("colors")) var e error session := newSessionV2() session.Header.CommandType = "mirror" session.Header.RootPath, e = os.Getwd() if e != nil { session.Delete() fatalIf(probe.NewError(e), "Unable to get current working folder.") } // extract URLs. var err *probe.Error session.Header.CommandArgs, err = args2URLs(ctx.Args()) if err != nil { session.Delete() fatalIf(err.Trace(ctx.Args()...), fmt.Sprintf("One or more unknown argument types found in ‘%s’.", ctx.Args())) } doMirrorSession(session) session.Delete() }
// getObjectReaders - func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.ReadCloser, *probe.Error) { readers := make(map[int]io.ReadCloser) var disks map[int]disk.Disk var err *probe.Error nodeSlice := 0 for _, node := range b.nodes { disks, err = node.ListDisks() if err != nil { return nil, err.Trace() } for order, disk := range disks { var objectSlice io.ReadCloser bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order) objectPath := filepath.Join(b.donutName, bucketSlice, objectName, objectMeta) objectSlice, err = disk.Open(objectPath) if err == nil { readers[order] = objectSlice } } nodeSlice = nodeSlice + 1 } if err != nil { return nil, err.Trace() } return readers, nil }
// CompleteMultipartUploadHandler - Complete multipart upload func (api API) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { // Ticket master block { op := APIOperation{} op.ProceedCh = make(chan struct{}) api.OP <- op // block until Ticket master gives us a go <-op.ProceedCh } vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] objectResourcesMetadata := getObjectResources(req.URL.Query()) var signature *signv4.Signature if !api.Anonymous { if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } metadata, err := api.Donut.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, req.Body, signature) if err != nil { errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil) switch err.ToGoError().(type) { case donut.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) case donut.InvalidPart: writeErrorResponse(w, req, InvalidPart, req.URL.Path) case donut.InvalidPartOrder: writeErrorResponse(w, req, InvalidPartOrder, req.URL.Path) case signv4.MissingDateHeader: writeErrorResponse(w, req, RequestTimeTooSkewed, req.URL.Path) case signv4.DoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case donut.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) case donut.MalformedXML: writeErrorResponse(w, req, MalformedXML, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } response := generateCompleteMultpartUploadResponse(bucket, object, "", metadata.MD5Sum) encodedSuccessResponse := encodeSuccessResponse(response) // write headers setCommonHeaders(w, len(encodedSuccessResponse)) // write body w.Write(encodedSuccessResponse) }
// NewMultipartUploadHandler - New multipart upload func (api MinioAPI) NewMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { // Ticket master block { op := APIOperation{} op.ProceedCh = make(chan struct{}) api.OP <- op // block until Ticket master gives us a go <-op.ProceedCh } acceptsContentType := getContentType(req) if !api.isValidOp(w, req, acceptsContentType) { return } if !isRequestUploads(req.URL.Query()) { writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path) return } var object, bucket string vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) return } } uploadID, err := api.Donut.NewMultipartUpload(bucket, object, req.Header.Get("Content-Type"), signature) if err != nil { errorIf(err.Trace(), "NewMultipartUpload failed.", nil) switch err.ToGoError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.ObjectExists: writeErrorResponse(w, req, MethodNotAllowed, acceptsContentType, req.URL.Path) default: writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } return } response := generateInitiateMultipartUploadResponse(bucket, object, uploadID) encodedSuccessResponse := encodeSuccessResponse(response, acceptsContentType) // write headers setCommonHeaders(w, getContentTypeString(acceptsContentType), len(encodedSuccessResponse)) // write body w.Write(encodedSuccessResponse) }
// HeadObjectHandler - HEAD Object // ----------- // The HEAD operation retrieves metadata from an object without returning the object itself. func (api MinioAPI) HeadObjectHandler(w http.ResponseWriter, req *http.Request) { // ticket master block { op := APIOperation{} op.ProceedCh = make(chan struct{}) api.OP <- op // block until Ticket master gives us a go <-op.ProceedCh } acceptsContentType := getContentType(req) if !api.isValidOp(w, req, acceptsContentType) { return } var object, bucket string vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) return } } metadata, err := api.Donut.GetObjectMetadata(bucket, object, signature) if err != nil { errorIf(err.Trace(), "GetObjectMetadata failed.", nil) switch err.ToGoError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.BucketNameInvalid: writeErrorResponse(w, req, InvalidBucketName, acceptsContentType, req.URL.Path) case donut.BucketNotFound: writeErrorResponse(w, req, NoSuchBucket, acceptsContentType, req.URL.Path) case donut.ObjectNotFound: writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) case donut.ObjectNameInvalid: writeErrorResponse(w, req, NoSuchKey, acceptsContentType, req.URL.Path) default: writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } return } setObjectHeaders(w, metadata, nil) w.WriteHeader(http.StatusOK) }
func fatalIf(err *probe.Error, msg string, fields map[string]interface{}) { if err == nil { return } if fields == nil { fields = make(map[string]interface{}) } fields["error"] = err.ToGoError() if jsonErr, e := json.Marshal(err); e == nil { fields["probe"] = string(jsonErr) } log.WithFields(fields).Fatal(msg) }
// AbortMultipartUploadHandler - Abort multipart upload func (api MinioAPI) AbortMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { // Ticket master block { op := APIOperation{} op.ProceedCh = make(chan struct{}) api.OP <- op // block until Ticket master gives us a go <-op.ProceedCh } acceptsContentType := getContentType(req) if !api.isValidOp(w, req, acceptsContentType) { return } vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] objectResourcesMetadata := getObjectResources(req.URL.Query()) var signature *donut.Signature if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) return } } err := api.Donut.AbortMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, signature) if err != nil { errorIf(err.Trace(), "AbortMutlipartUpload failed.", nil) switch err.ToGoError().(type) { case donut.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, acceptsContentType, req.URL.Path) case donut.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, acceptsContentType, req.URL.Path) default: writeErrorResponse(w, req, InternalError, acceptsContentType, req.URL.Path) } return } setCommonHeaders(w, getContentTypeString(acceptsContentType), 0) w.WriteHeader(http.StatusNoContent) }
// ListObjects - GET bucket (list objects) func (fs API) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) { if !IsValidBucket(bucket) { return nil, resources, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if resources.Prefix != "" && IsValidObjectName(resources.Prefix) == false { return nil, resources, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix}) } p := bucketDir{} rootPrefix := filepath.Join(fs.path, bucket) // check bucket exists if _, err := os.Stat(rootPrefix); os.IsNotExist(err) { return nil, resources, probe.NewError(BucketNotFound{Bucket: bucket}) } p.root = rootPrefix err := filepath.Walk(rootPrefix, p.getAllFiles) if err != nil { return nil, resources, probe.NewError(err) } var metadataList []ObjectMetadata var metadata ObjectMetadata // Populate filtering mode for _, content := range p.files { if len(metadataList) == resources.Maxkeys { resources.IsTruncated = true if resources.IsTruncated && resources.Delimiter != "" { resources.NextMarker = metadataList[len(metadataList)-1].Object } break } if content.Prefix > resources.Marker { var err *probe.Error metadata, resources, err = fs.filterObjects(bucket, content, resources) if err != nil { return nil, resources, err.Trace() } if metadata.Bucket != "" { metadataList = append(metadataList, metadata) } } } return metadataList, resources, nil }
// listXLBuckets - func (xl API) listXLBuckets() *probe.Error { var disks map[int]block.Block var err *probe.Error for _, node := range xl.nodes { disks, err = node.ListDisks() if err != nil { return err.Trace() } } var dirs []os.FileInfo for _, disk := range disks { dirs, err = disk.ListDir(xl.config.XLName) if err == nil { break } } // if all disks are missing then return error if err != nil { return err.Trace() } for _, dir := range dirs { splitDir := strings.Split(dir.Name(), "$") if len(splitDir) < 3 { return probe.NewError(CorruptedBackend{Backend: dir.Name()}) } bucketName := splitDir[0] // we dont need this once we cache from makeXLBucket() bkt, _, err := newBucket(bucketName, "private", xl.config.XLName, xl.nodes) if err != nil { return err.Trace() } xl.buckets[bucketName] = bkt } return nil }
// New instantiate a new donut func New(path string) (CloudStorage, *probe.Error) { var err *probe.Error // load multiparts session from disk var multiparts *Multiparts multiparts, err = loadMultipartsSession() if err != nil { if os.IsNotExist(err.ToGoError()) { multiparts = &Multiparts{ Version: "1", ActiveSession: make(map[string]*MultipartSession), } if err := SaveMultipartsSession(multiparts); err != nil { return nil, err.Trace() } } else { return nil, err.Trace() } } a := API{ path: path, lock: new(sync.Mutex), } a.multiparts = multiparts return a, nil }
// getNewClient gives a new client interface func getNewClient(urlStr string, auth hostConfig) (client.Client, *probe.Error) { url := client.NewURL(urlStr) switch url.Type { case client.Object: // Minio and S3 compatible cloud storage s3Config := new(client.Config) s3Config.AccessKeyID = func() string { if auth.AccessKeyID == globalAccessKeyID { return "" } return auth.AccessKeyID }() s3Config.SecretAccessKey = func() string { if auth.SecretAccessKey == globalSecretAccessKey { return "" } return auth.SecretAccessKey }() s3Config.AppName = "Minio" s3Config.AppVersion = globalMCVersion s3Config.AppComments = []string{os.Args[0], runtime.GOOS, runtime.GOARCH} s3Config.HostURL = urlStr s3Config.Debug = globalDebugFlag var s3Client client.Client var err *probe.Error if auth.API == "S3v2" { s3Client, err = s3v2.New(s3Config) } else { s3Client, err = s3v4.New(s3Config) } if err != nil { return nil, err.Trace() } return s3Client, nil case client.Filesystem: fsClient, err := fs.New(urlStr) if err != nil { return nil, err.Trace() } return fsClient, nil } return nil, errInitClient(urlStr).Trace() }
// doList - list all entities inside a folder. func doList(clnt client.Client, recursive, multipleArgs bool) *probe.Error { var err *probe.Error var parentContent *client.Content parentContent, err = clnt.Stat() if err != nil { return err.Trace(clnt.URL().String()) } for contentCh := range clnt.List(recursive) { if contentCh.Err != nil { switch contentCh.Err.ToGoError().(type) { // handle this specifically for filesystem case client.BrokenSymlink: errorIf(contentCh.Err.Trace(), "Unable to list broken link.") continue case client.TooManyLevelsSymlink: errorIf(contentCh.Err.Trace(), "Unable to list too many levels link.") continue } if os.IsNotExist(contentCh.Err.ToGoError()) || os.IsPermission(contentCh.Err.ToGoError()) { if contentCh.Content != nil { if contentCh.Content.Type.IsDir() && (contentCh.Content.Type&os.ModeSymlink == os.ModeSymlink) { errorIf(contentCh.Err.Trace(), "Unable to list broken folder link.") continue } } errorIf(contentCh.Err.Trace(), "Unable to list.") continue } err = contentCh.Err.Trace() break } if multipleArgs && parentContent.Type.IsDir() { contentCh.Content.Name = filepath.Join(parentContent.Name, strings.TrimPrefix(contentCh.Content.Name, parentContent.Name)) } Prints("%s\n", parseContent(contentCh.Content)) } if err != nil { return err.Trace() } return nil }
// getBucketMetadata - func (b bucket) getBucketMetadata() (*AllBuckets, *probe.Error) { metadata := new(AllBuckets) var readers map[int]io.ReadCloser { var err *probe.Error readers, err = b.getBucketMetadataReaders() if err != nil { return nil, err.Trace() } } for _, reader := range readers { defer reader.Close() } var err error for _, reader := range readers { jenc := json.NewDecoder(reader) if err = jenc.Decode(metadata); err == nil { return metadata, nil } } return nil, probe.NewError(err) }
// getBucketMetadataReaders - func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) { readers := make(map[int]io.ReadCloser) var disks map[int]disk.Disk var err *probe.Error for _, node := range b.nodes { disks, err = node.ListDisks() if err != nil { return nil, err.Trace() } } var bucketMetaDataReader io.ReadCloser for order, disk := range disks { bucketMetaDataReader, err = disk.Open(filepath.Join(b.donutName, bucketMetadataConfig)) if err != nil { continue } readers[order] = bucketMetaDataReader } if err != nil { return nil, err.Trace() } return readers, nil }
// catURL displays contents of a URL to stdout. func catURL(sourceURL string) *probe.Error { var reader io.Reader size := int64(-1) switch sourceURL { case "-": reader = os.Stdin default: var err *probe.Error client, content, err := url2Stat(sourceURL) if err != nil { return err.Trace(sourceURL) } // Ignore size for filesystem objects since os.Stat() would not // return proper size all the time, for example with /proc files. if client.GetURL().Type == objectStorage { size = content.Size } if reader, err = getSourceStream(sourceURL); err != nil { return err.Trace(sourceURL) } } return catOut(reader, size).Trace(sourceURL) }
// doList - list all entities inside a folder. func doList(clnt client.Client, recursive bool) *probe.Error { var err *probe.Error for contentCh := range clnt.List(recursive) { if contentCh.Err != nil { switch contentCh.Err.ToGoError().(type) { // handle this specifically for filesystem case client.ISBrokenSymlink: errorIf(contentCh.Err.Trace(), "Unable to list broken link.") continue } if os.IsNotExist(contentCh.Err.ToGoError()) || os.IsPermission(contentCh.Err.ToGoError()) { errorIf(contentCh.Err.Trace(), "Unable to list.") continue } err = contentCh.Err.Trace() break } console.Println(parseContent(contentCh.Content)) } if err != nil { return err.Trace() } return nil }
// getInheritedListeners - look for LISTEN_FDS in environment variables and populate listeners accordingly func (n *minNet) getInheritedListeners() *probe.Error { var retErr *probe.Error n.inheritOnce.Do(func() { n.mutex.Lock() defer n.mutex.Unlock() countStr := os.Getenv(envCountKey) if countStr == "" { return } count, err := strconv.Atoi(countStr) if err != nil { retErr = probe.NewError(fmt.Errorf("found invalid count value: %s=%s", envCountKey, countStr)) return } fdStart := 3 for i := fdStart; i < fdStart+count; i++ { file := os.NewFile(uintptr(i), "listener") l, err := net.FileListener(file) if err != nil { file.Close() retErr = probe.NewError(err) return } if err := file.Close(); err != nil { retErr = probe.NewError(err) return } n.inheritedListeners = append(n.inheritedListeners, l) } }) if retErr != nil { return retErr.Trace() } return nil }
// New instantiate a new donut func New() (Interface, *probe.Error) { var conf *Config var err *probe.Error conf, err = LoadConfig() if err != nil { conf = &Config{ Version: "0.0.1", MaxSize: 512000000, NodeDiskMap: nil, DonutName: "", } if err := quick.CheckData(conf); err != nil { return nil, err.Trace() } } a := API{config: conf} a.storedBuckets = metadata.NewCache() a.nodes = make(map[string]node) a.buckets = make(map[string]bucket) a.objects = data.NewCache(a.config.MaxSize) a.multiPartObjects = make(map[string]*data.Cache) a.objects.OnEvicted = a.evictedObject a.lock = new(sync.Mutex) if len(a.config.NodeDiskMap) > 0 { for k, v := range a.config.NodeDiskMap { if len(v) == 0 { return nil, probe.NewError(InvalidDisksArgument{}) } err := a.AttachNode(k, v) if err != nil { return nil, err.Trace() } } /// Initialization, populate all buckets into memory buckets, err := a.listBuckets() if err != nil { return nil, err.Trace() } for k, v := range buckets { var newBucket = storedBucket{} newBucket.bucketMetadata = v newBucket.objectMetadata = make(map[string]ObjectMetadata) newBucket.multiPartSession = make(map[string]MultiPartSession) newBucket.partMetadata = make(map[string]map[int]PartMetadata) a.storedBuckets.Set(k, newBucket) } a.Heal() } return a, nil }
func isErrIgnored(err *probe.Error) (ignored bool) { // For all non critical errors we can continue for the remaining files. switch err.ToGoError().(type) { // Handle these specifically for filesystem related errors. case BrokenSymlink, TooManyLevelsSymlink, PathNotFound, PathInsufficientPermission: ignored = true // Handle these specifically for object storage related errors. case BucketNameEmpty, ObjectMissing, ObjectAlreadyExists: ignored = true case ObjectAlreadyExistsAsDirectory, BucketDoesNotExist, BucketInvalid, ObjectOnGlacier: ignored = true default: ignored = false } return ignored }
// Parses signature version '4' header of the following form. // // Authorization: algorithm Credential=access key ID/credential scope, \ // SignedHeaders=SignedHeaders, Signature=signature // func parseSignV4(v4Auth string) (signValues, *probe.Error) { // Replace all spaced strings, some clients can send spaced // parameters and some won't. So we pro-actively remove any spaces // to make parsing easier. v4Auth = strings.Replace(v4Auth, " ", "", -1) if v4Auth == "" { return signValues{}, ErrAuthHeaderEmpty("Auth header empty.").Trace(v4Auth) } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v4Auth, signV4Algorithm) { return signValues{}, ErrUnsuppSignAlgo("Unsupported algorithm in authorization header.", v4Auth).Trace(v4Auth) } // Strip off the Algorithm prefix. v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { return signValues{}, ErrMissingFields("Missing fields in authorization header.", v4Auth).Trace(v4Auth) } // Initialize signature version '4' structured header. signV4Values := signValues{} var err *probe.Error // Save credentail values. signV4Values.Creds, err = parseCredential(authFields[0]) if err != nil { return signValues{}, err.Trace(v4Auth) } // Save signed headers. signV4Values.SignedHeaders, err = parseSignedHeaders(authFields[1]) if err != nil { return signValues{}, err.Trace(v4Auth) } // Save signature. signV4Values.Signature, err = parseSignature(authFields[2]) if err != nil { return signValues{}, err.Trace(v4Auth) } // Return the structure here. return signV4Values, nil }
// Parses signature version '4' query string of the following form. // // querystring = X-Amz-Algorithm=algorithm // querystring += &X-Amz-Credential= urlencode(access_key_ID + '/' + credential_scope) // querystring += &X-Amz-Date=date // querystring += &X-Amz-Expires=timeout interval // querystring += &X-Amz-SignedHeaders=signed_headers // querystring += &X-Amz-Signature=signature // func parsePreSignV4(query url.Values) (preSignValues, *probe.Error) { // Verify if the query algorithm is supported or not. if query.Get("X-Amz-Algorithm") != signV4Algorithm { return preSignValues{}, ErrUnsuppSignAlgo("Unsupported algorithm in query string.", query.Get("X-Amz-Algorithm")) } // Initialize signature version '4' structured header. preSignV4Values := preSignValues{} var err *probe.Error // Save credentail values. preSignV4Values.Creds, err = parseCredential("Credential=" + query.Get("X-Amz-Credential")) if err != nil { return preSignValues{}, err.Trace(query.Get("X-Amz-Credential")) } var e error // Save date in native time.Time. preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date")) if e != nil { return preSignValues{}, ErrMalformedDate("Malformed date string.", query.Get("X-Amz-Date")).Trace(query.Get("X-Amz-Date")) } // Save expires in native time.Duration. preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s") if e != nil { return preSignValues{}, ErrMalformedExpires("Malformed expires string.", query.Get("X-Amz-Expires")).Trace(query.Get("X-Amz-Expires")) } // Save signed headers. preSignV4Values.SignedHeaders, err = parseSignedHeaders("SignedHeaders=" + query.Get("X-Amz-SignedHeaders")) if err != nil { return preSignValues{}, err.Trace(query.Get("X-Amz-SignedHeaders")) } // Save signature. preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature")) if err != nil { return preSignValues{}, err.Trace(query.Get("X-Amz-Signature")) } // Return structed form of signature query string. return preSignV4Values, nil }
// PutObjectHandler - PUT Object // ---------- // This implementation of the PUT operation adds an object to a bucket. func (api API) PutObjectHandler(w http.ResponseWriter, req *http.Request) { // Ticket master block { op := APIOperation{} op.ProceedCh = make(chan struct{}) api.OP <- op // block until Ticket master gives us a go <-op.ProceedCh } if !api.isValidOp(w, req) { return } var object, bucket string vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] // get Content-MD5 sent by client and verify if valid md5 := req.Header.Get("Content-MD5") if !isValidMD5(md5) { writeErrorResponse(w, req, InvalidDigest, req.URL.Path) return } /// if Content-Length missing, deny the request size := req.Header.Get("Content-Length") if size == "" { writeErrorResponse(w, req, MissingContentLength, req.URL.Path) return } /// maximum Upload size for objects in a single operation if isMaxObjectSize(size) { writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) return } /// minimum Upload size for objects in a single operation // // Surprisingly while Amazon in their document states that S3 objects have 1byte // as the minimum limit, they do not seem to enforce it one can successfully // create a 0byte file using a regular putObject() operation // // if isMinObjectSize(size) { // writeErrorResponse(w, req, EntityTooSmall, req.URL.Path) // return // } var sizeInt64 int64 { var err error sizeInt64, err = strconv.ParseInt(size, 10, 64) if err != nil { writeErrorResponse(w, req, InvalidRequest, req.URL.Path) return } } var signature *signv4.Signature if !api.Anonymous { if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } metadata, err := api.Donut.CreateObject(bucket, object, md5, sizeInt64, req.Body, nil, signature) if err != nil { errorIf(err.Trace(), "CreateObject failed.", nil) switch err.ToGoError().(type) { case donut.BucketNotFound: writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) case donut.BucketNameInvalid: writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) case donut.ObjectExists: writeErrorResponse(w, req, MethodNotAllowed, req.URL.Path) case donut.BadDigest: writeErrorResponse(w, req, BadDigest, req.URL.Path) case signv4.MissingDateHeader: writeErrorResponse(w, req, RequestTimeTooSkewed, req.URL.Path) case signv4.DoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case donut.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) case donut.EntityTooLarge: writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) case donut.InvalidDigest: writeErrorResponse(w, req, InvalidDigest, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } w.Header().Set("ETag", metadata.MD5Sum) writeSuccessResponse(w) }
func (s rpcSignatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var signature *rpcSignature if isRequestSignatureRPC(r) { // Init signature V4 verification var err *probe.Error signature, err = initSignatureRPC(r) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, r, AuthorizationHeaderMalformed, r.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } } buffer := new(bytes.Buffer) if _, err := io.Copy(buffer, r.Body); err != nil { errorIf(probe.NewError(err), "Unable to read payload from request body.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } value := sha256.Sum256(buffer.Bytes()) ok, err := signature.DoesSignatureMatch(hex.EncodeToString(value[:])) if err != nil { errorIf(err.Trace(), "Unable to verify signature.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } if !ok { writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path) return } s.handler.ServeHTTP(w, r) return } writeErrorResponse(w, r, AccessDenied, r.URL.Path) }
// PutObjectPartHandler - Upload part func (api API) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) { // Ticket master block { op := APIOperation{} op.ProceedCh = make(chan struct{}) api.OP <- op // block until Ticket master gives us a go <-op.ProceedCh } if !api.isValidOp(w, req) { return } // get Content-MD5 sent by client and verify if valid md5 := req.Header.Get("Content-MD5") if !isValidMD5(md5) { writeErrorResponse(w, req, InvalidDigest, req.URL.Path) return } /// if Content-Length missing, throw away size := req.Header.Get("Content-Length") if size == "" { writeErrorResponse(w, req, MissingContentLength, req.URL.Path) return } /// maximum Upload size for multipart objects in a single operation if isMaxObjectSize(size) { writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) return } var sizeInt64 int64 { var err error sizeInt64, err = strconv.ParseInt(size, 10, 64) if err != nil { writeErrorResponse(w, req, InvalidRequest, req.URL.Path) return } } vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] uploadID := req.URL.Query().Get("uploadId") partIDString := req.URL.Query().Get("partNumber") var partID int { var err error partID, err = strconv.Atoi(partIDString) if err != nil { writeErrorResponse(w, req, InvalidPart, req.URL.Path) return } } var signature *signv4.Signature if !api.Anonymous { if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } calculatedMD5, err := api.Donut.CreateObjectPart(bucket, object, uploadID, partID, "", md5, sizeInt64, req.Body, signature) if err != nil { errorIf(err.Trace(), "CreateObjectPart failed.", nil) switch err.ToGoError().(type) { case donut.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) case donut.ObjectExists: writeErrorResponse(w, req, MethodNotAllowed, req.URL.Path) case donut.BadDigest: writeErrorResponse(w, req, BadDigest, req.URL.Path) case signv4.DoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case donut.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) case donut.EntityTooLarge: writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) case donut.InvalidDigest: writeErrorResponse(w, req, InvalidDigest, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } w.Header().Set("ETag", calculatedMD5) writeSuccessResponse(w) }