func mainMirror(ctx *cli.Context) { checkMirrorSyntax(ctx) // Additional command speific theme customization. console.SetColor("Mirror", color.New(color.FgGreen, color.Bold)) var e error session := newSessionV3() session.Header.CommandType = "mirror" session.Header.RootPath, e = os.Getwd() if e != nil { session.Delete() fatalIf(probe.NewError(e), "Unable to get current working folder.") } // If force flag is set save it with in session session.Header.CommandBoolFlag.Key = "force" session.Header.CommandBoolFlag.Value = ctx.Bool("force") // extract URLs. var err *probe.Error session.Header.CommandArgs, err = args2URLs(ctx.Args()) if err != nil { session.Delete() fatalIf(err.Trace(ctx.Args()...), fmt.Sprintf("One or more unknown argument types found in ‘%s’.", ctx.Args())) } doMirrorSession(session) session.Delete() }
// CompleteMultipartUploadHandler - Complete multipart upload func (api API) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { // Ticket master block { op := APIOperation{} op.ProceedCh = make(chan struct{}) api.OP <- op // block until Ticket master gives us a go <-op.ProceedCh } vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] objectResourcesMetadata := getObjectResources(req.URL.Query()) var signature *signv4.Signature if !api.Anonymous { if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } metadata, err := api.XL.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, req.Body, signature) if err != nil { errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil) switch err.ToGoError().(type) { case xl.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) case xl.InvalidPart: writeErrorResponse(w, req, InvalidPart, req.URL.Path) case xl.InvalidPartOrder: writeErrorResponse(w, req, InvalidPartOrder, req.URL.Path) case signv4.MissingDateHeader: writeErrorResponse(w, req, RequestTimeTooSkewed, req.URL.Path) case signv4.DoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case xl.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) case xl.MalformedXML: writeErrorResponse(w, req, MalformedXML, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } response := generateCompleteMultpartUploadResponse(bucket, object, "", metadata.MD5Sum) encodedSuccessResponse := encodeSuccessResponse(response) // write headers setCommonHeaders(w, len(encodedSuccessResponse)) // write body w.Write(encodedSuccessResponse) }
// getObjectReaders - func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.ReadCloser, *probe.Error) { readers := make(map[int]io.ReadCloser) var disks map[int]disk.Disk var err *probe.Error nodeSlice := 0 for _, node := range b.nodes { disks, err = node.ListDisks() if err != nil { return nil, err.Trace() } for order, disk := range disks { var objectSlice io.ReadCloser bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order) objectPath := filepath.Join(b.donutName, bucketSlice, objectName, objectMeta) objectSlice, err = disk.Open(objectPath) if err == nil { readers[order] = objectSlice } } nodeSlice = nodeSlice + 1 } if err != nil { return nil, err.Trace() } return readers, nil }
func mainMirror(ctx *cli.Context) { checkMirrorSyntax(ctx) setMirrorPalette(ctx.GlobalString("colors")) var e error session := newSessionV2() session.Header.CommandType = "mirror" session.Header.RootPath, e = os.Getwd() if e != nil { session.Delete() fatalIf(probe.NewError(e), "Unable to get current working folder.") } // extract URLs. var err *probe.Error session.Header.CommandArgs, err = args2URLs(ctx.Args()) if err != nil { session.Delete() fatalIf(err.Trace(ctx.Args()...), fmt.Sprintf("One or more unknown argument types found in ‘%s’.", ctx.Args())) } doMirrorSession(session) session.Delete() }
// mainCopy is bound to sub-command func mainCopy(ctx *cli.Context) { checkCopySyntax(ctx) setCopyPalette(ctx.GlobalString("colors")) session := newSessionV2() var e error session.Header.CommandType = "cp" session.Header.RootPath, e = os.Getwd() if e != nil { session.Delete() fatalIf(probe.NewError(e), "Unable to get current working folder.") } // extract URLs. var err *probe.Error session.Header.CommandArgs, err = args2URLs(ctx.Args()) if err != nil { session.Delete() fatalIf(err.Trace(), "One or more unknown URL types passed.") } doCopySession(session) session.Delete() }
// getBucketMetadataReaders - readers are returned in map rather than slice func (xl API) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) { readers := make(map[int]io.ReadCloser) disks := make(map[int]disk.Disk) var err *probe.Error for _, node := range xl.nodes { nDisks := make(map[int]disk.Disk) nDisks, err = node.ListDisks() if err != nil { return nil, err.Trace() } for k, v := range nDisks { disks[k] = v } } var bucketMetaDataReader io.ReadCloser for order, disk := range disks { bucketMetaDataReader, err = disk.Open(filepath.Join(xl.config.XLName, bucketMetadataConfig)) if err != nil { continue } readers[order] = bucketMetaDataReader } if err != nil { return nil, err.Trace() } return readers, nil }
// New instantiate a new donut func New(rootPath string, minFreeDisk int64) (Filesystem, *probe.Error) { setFSBucketsMetadataPath(filepath.Join(rootPath, "$buckets.json")) setFSMultipartsMetadataPath(filepath.Join(rootPath, "$multiparts-session.json")) var err *probe.Error // load multiparts session from disk var multiparts *Multiparts multiparts, err = loadMultipartsSession() if err != nil { if os.IsNotExist(err.ToGoError()) { multiparts = &Multiparts{ Version: "1", ActiveSession: make(map[string]*MultipartSession), } if err := saveMultipartsSession(*multiparts); err != nil { return Filesystem{}, err.Trace() } } else { return Filesystem{}, err.Trace() } } var buckets *Buckets buckets, err = loadBucketsMetadata() if err != nil { if os.IsNotExist(err.ToGoError()) { buckets = &Buckets{ Version: "1", Metadata: make(map[string]*BucketMetadata), } if err := saveBucketsMetadata(*buckets); err != nil { return Filesystem{}, err.Trace() } } else { return Filesystem{}, err.Trace() } } fs := Filesystem{ rwLock: &sync.RWMutex{}, } fs.path = rootPath fs.multiparts = multiparts fs.buckets = buckets /// Defaults // minium free disk required for i/o operations to succeed. fs.minFreeDisk = minFreeDisk // Start list goroutine. if err = fs.listObjectsService(); err != nil { return Filesystem{}, err.Trace(rootPath) } // Return here. return fs, nil }
// catURL displays contents of a URL to stdout. func catURL(sourceURL string) *probe.Error { var reader io.ReadSeeker switch sourceURL { case "-": reader = os.Stdin default: // Ignore size, since os.Stat() would not return proper size all the // time for local filesystem for example /proc files. var err *probe.Error if reader, err = getSource(sourceURL); err != nil { return err.Trace(sourceURL) } } return catOut(reader).Trace(sourceURL) }
// getNewClient gives a new client interface func getNewClient(urlStr string, auth hostConfig) (client.Client, *probe.Error) { url := client.NewURL(urlStr) switch url.Type { case client.Object: // Minio and S3 compatible cloud storage s3Config := new(client.Config) s3Config.AccessKeyID = func() string { if auth.AccessKeyID == globalAccessKeyID { return "" } return auth.AccessKeyID }() s3Config.SecretAccessKey = func() string { if auth.SecretAccessKey == globalSecretAccessKey { return "" } return auth.SecretAccessKey }() s3Config.AppName = "Minio" s3Config.AppVersion = globalMCVersion s3Config.AppComments = []string{os.Args[0], runtime.GOOS, runtime.GOARCH} s3Config.HostURL = urlStr s3Config.Debug = globalDebugFlag var s3Client client.Client var err *probe.Error if auth.API == "S3v2" { s3Client, err = s3v2.New(s3Config) } else { s3Client, err = s3v4.New(s3Config) } if err != nil { return nil, err.Trace() } return s3Client, nil case client.Filesystem: fsClient, err := fs.New(urlStr) if err != nil { return nil, err.Trace() } return fsClient, nil } return nil, errInitClient(urlStr).Trace() }
// doListIncomplete - list all incomplete uploads entities inside a folder. func doListIncomplete(clnt client.Client, recursive, multipleArgs bool) *probe.Error { var err *probe.Error var parentContent *client.Content parentContent, err = clnt.Stat() if err != nil { return err.Trace(clnt.URL().String()) } for contentCh := range clnt.List(recursive, true) { if contentCh.Err != nil { switch contentCh.Err.ToGoError().(type) { // handle this specifically for filesystem case client.BrokenSymlink: errorIf(contentCh.Err.Trace(), "Unable to list broken link.") continue case client.TooManyLevelsSymlink: errorIf(contentCh.Err.Trace(), "Unable to list too many levels link.") continue } if os.IsNotExist(contentCh.Err.ToGoError()) || os.IsPermission(contentCh.Err.ToGoError()) { if contentCh.Content != nil { if contentCh.Content.Type.IsDir() && (contentCh.Content.Type&os.ModeSymlink == os.ModeSymlink) { errorIf(contentCh.Err.Trace(), "Unable to list broken folder link.") continue } } errorIf(contentCh.Err.Trace(), "Unable to list.") continue } err = contentCh.Err.Trace() break } if multipleArgs && parentContent.Type.IsDir() { contentCh.Content.Name = filepath.Join(parentContent.Name, strings.TrimPrefix(contentCh.Content.Name, parentContent.Name)) } Prints("%s\n", parseContent(contentCh.Content)) } if err != nil { return err.Trace() } return nil }
// listXLBuckets - func (xl API) listXLBuckets() *probe.Error { var disks map[int]disk.Disk var err *probe.Error for _, node := range xl.nodes { disks, err = node.ListDisks() if err != nil { return err.Trace() } } var dirs []os.FileInfo for _, disk := range disks { dirs, err = disk.ListDir(xl.config.XLName) if err == nil { break } } // if all disks are missing then return error if err != nil { return err.Trace() } for _, dir := range dirs { splitDir := strings.Split(dir.Name(), "$") if len(splitDir) < 3 { return probe.NewError(CorruptedBackend{Backend: dir.Name()}) } bucketName := splitDir[0] // we dont need this once we cache from makeXLBucket() bkt, _, err := newBucket(bucketName, "private", xl.config.XLName, xl.nodes) if err != nil { return err.Trace() } xl.buckets[bucketName] = bkt } return nil }
// getBucketMetadata - func (b bucket) getBucketMetadata() (*AllBuckets, *probe.Error) { metadata := new(AllBuckets) var readers map[int]io.ReadCloser { var err *probe.Error readers, err = b.getBucketMetadataReaders() if err != nil { return nil, err.Trace() } } for _, reader := range readers { defer reader.Close() } var err error for _, reader := range readers { jenc := json.NewDecoder(reader) if err = jenc.Decode(metadata); err == nil { return metadata, nil } } return nil, probe.NewError(err) }
// New instantiate a new donut func New() (Filesystem, *probe.Error) { var err *probe.Error // load multiparts session from disk var multiparts *Multiparts multiparts, err = loadMultipartsSession() if err != nil { if os.IsNotExist(err.ToGoError()) { multiparts = &Multiparts{ Version: "1", ActiveSession: make(map[string]*MultipartSession), } if err := SaveMultipartsSession(multiparts); err != nil { return Filesystem{}, err.Trace() } } else { return Filesystem{}, err.Trace() } } a := Filesystem{lock: new(sync.Mutex)} a.multiparts = multiparts return a, nil }
// getBucketMetadataReaders - func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) { readers := make(map[int]io.ReadCloser) var disks map[int]disk.Disk var err *probe.Error for _, node := range b.nodes { disks, err = node.ListDisks() if err != nil { return nil, err.Trace() } } var bucketMetaDataReader io.ReadCloser for order, disk := range disks { bucketMetaDataReader, err = disk.Open(filepath.Join(b.donutName, bucketMetadataConfig)) if err != nil { continue } readers[order] = bucketMetaDataReader } if err != nil { return nil, err.Trace() } return readers, nil }
func (s rpcSignatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var signature *rpcSignature if isRequestSignatureRPC(r) { // Init signature V4 verification var err *probe.Error signature, err = initSignatureRPC(r) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, r, AuthorizationHeaderMalformed, r.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } } buffer := new(bytes.Buffer) if _, err := io.Copy(buffer, r.Body); err != nil { errorIf(probe.NewError(err), "Unable to read payload from request body.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } value := sha256.Sum256(buffer.Bytes()) ok, err := signature.DoesSignatureMatch(hex.EncodeToString(value[:])) if err != nil { errorIf(err.Trace(), "Unable to verify signature.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } if !ok { writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path) return } // Copy the buffer back into request body to be read by the RPC service callers r.Body = ioutil.NopCloser(buffer) s.handler.ServeHTTP(w, r) } else { writeErrorResponse(w, r, AccessDenied, r.URL.Path) } }
// getInheritedListeners - look for LISTEN_FDS in environment variables and populate listeners accordingly func (n *minNet) getInheritedListeners() *probe.Error { var retErr *probe.Error n.inheritOnce.Do(func() { n.mutex.Lock() defer n.mutex.Unlock() countStr := os.Getenv(envCountKey) if countStr == "" { return } count, err := strconv.Atoi(countStr) if err != nil { retErr = probe.NewError(fmt.Errorf("found invalid count value: %s=%s", envCountKey, countStr)) return } fdStart := 3 for i := fdStart; i < fdStart+count; i++ { file := os.NewFile(uintptr(i), "listener") l, err := net.FileListener(file) if err != nil { file.Close() retErr = probe.NewError(err) return } if err := file.Close(); err != nil { retErr = probe.NewError(err) return } n.inheritedListeners = append(n.inheritedListeners, l) } }) if retErr != nil { return retErr.Trace() } return nil }
// New instantiate a new donut func New(rootPath string) (Filesystem, *probe.Error) { setFSBucketsConfigPath(filepath.Join(rootPath, "$buckets.json")) setFSMultipartsConfigPath(filepath.Join(rootPath, "$multiparts-session.json")) var err *probe.Error // load multiparts session from disk var multiparts *Multiparts multiparts, err = loadMultipartsSession() if err != nil { if os.IsNotExist(err.ToGoError()) { multiparts = &Multiparts{ Version: "1", ActiveSession: make(map[string]*MultipartSession), } if err := saveMultipartsSession(multiparts); err != nil { return Filesystem{}, err.Trace() } } else { return Filesystem{}, err.Trace() } } var buckets *Buckets buckets, err = loadBucketsMetadata() if err != nil { if os.IsNotExist(err.ToGoError()) { buckets = &Buckets{ Version: "1", Metadata: make(map[string]*BucketMetadata), } if err := saveBucketsMetadata(buckets); err != nil { return Filesystem{}, err.Trace() } } else { return Filesystem{}, err.Trace() } } a := Filesystem{lock: new(sync.Mutex)} a.path = rootPath a.multiparts = multiparts a.buckets = buckets return a, nil }
// New instantiate a new xl func New() (Interface, *probe.Error) { var conf *Config var err *probe.Error conf, err = LoadConfig() if err != nil { conf = &Config{ Version: "0.0.1", MaxSize: 512000000, NodeDiskMap: nil, XLName: "", } if err := quick.CheckData(conf); err != nil { return nil, err.Trace() } } a := API{config: conf} a.storedBuckets = metadata.NewCache() a.nodes = make(map[string]node) a.buckets = make(map[string]bucket) a.objects = data.NewCache(a.config.MaxSize) a.multiPartObjects = make(map[string]*data.Cache) a.objects.OnEvicted = a.evictedObject a.lock = new(sync.Mutex) if len(a.config.NodeDiskMap) > 0 { for k, v := range a.config.NodeDiskMap { if len(v) == 0 { return nil, probe.NewError(InvalidDisksArgument{}) } err := a.AttachNode(k, v) if err != nil { return nil, err.Trace() } } /// Initialization, populate all buckets into memory buckets, err := a.listBuckets() if err != nil { return nil, err.Trace() } for k, v := range buckets { var newBucket = storedBucket{} newBucket.bucketMetadata = v newBucket.objectMetadata = make(map[string]ObjectMetadata) newBucket.multiPartSession = make(map[string]MultiPartSession) newBucket.partMetadata = make(map[string]map[int]PartMetadata) a.storedBuckets.Set(k, newBucket) } a.Heal() } return a, nil }
// PutObjectPartHandler - Upload part func (api API) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) { // Ticket master block { op := APIOperation{} op.ProceedCh = make(chan struct{}) api.OP <- op // block until Ticket master gives us a go <-op.ProceedCh } // get Content-MD5 sent by client and verify if valid md5 := req.Header.Get("Content-MD5") if !isValidMD5(md5) { writeErrorResponse(w, req, InvalidDigest, req.URL.Path) return } /// if Content-Length missing, throw away size := req.Header.Get("Content-Length") if size == "" { writeErrorResponse(w, req, MissingContentLength, req.URL.Path) return } /// maximum Upload size for multipart objects in a single operation if isMaxObjectSize(size) { writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) return } var sizeInt64 int64 { var err error sizeInt64, err = strconv.ParseInt(size, 10, 64) if err != nil { writeErrorResponse(w, req, InvalidRequest, req.URL.Path) return } } vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] uploadID := req.URL.Query().Get("uploadId") partIDString := req.URL.Query().Get("partNumber") var partID int { var err error partID, err = strconv.Atoi(partIDString) if err != nil { writeErrorResponse(w, req, InvalidPart, req.URL.Path) return } } var signature *signv4.Signature if !api.Anonymous { if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } calculatedMD5, err := api.XL.CreateObjectPart(bucket, object, uploadID, partID, "", md5, sizeInt64, req.Body, signature) if err != nil { errorIf(err.Trace(), "CreateObjectPart failed.", nil) switch err.ToGoError().(type) { case xl.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) case xl.ObjectExists: writeErrorResponse(w, req, MutableWriteNotAllowed, req.URL.Path) case xl.BadDigest: writeErrorResponse(w, req, BadDigest, req.URL.Path) case signv4.DoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case xl.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) case xl.EntityTooLarge: writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) case xl.InvalidDigest: writeErrorResponse(w, req, InvalidDigest, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } w.Header().Set("ETag", calculatedMD5) writeSuccessResponse(w) }
// PutBucketHandler - PUT Bucket // ---------- // This implementation of the PUT operation creates a new bucket for authenticated request func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket := vars["bucket"] if !api.Anonymous { if isRequestRequiresACLCheck(req) { writeErrorResponse(w, req, AccessDenied, req.URL.Path) return } } // read from 'x-amz-acl' aclType := getACLType(req) if aclType == unsupportedACLType { writeErrorResponse(w, req, NotImplemented, req.URL.Path) return } var signature *fs.Signature if !api.Anonymous { // Init signature V4 verification if isRequestSignatureV4(req) { var err *probe.Error signature, err = initSignatureV4(req) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } } // if body of request is non-nil then check for validity of Content-Length if req.Body != nil { /// if Content-Length is unknown/missing, deny the request if req.ContentLength == -1 { writeErrorResponse(w, req, MissingContentLength, req.URL.Path) return } if signature != nil { locationBytes, err := ioutil.ReadAll(req.Body) if err != nil { sh := sha256.New() sh.Write(locationBytes) ok, perr := signature.DoesSignatureMatch(hex.EncodeToString(sh.Sum(nil))) if perr != nil { errorIf(perr.Trace(), "MakeBucket failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } if !ok { writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) return } } } } err := api.Filesystem.MakeBucket(bucket, getACLTypeString(aclType)) if err != nil { errorIf(err.Trace(), "MakeBucket failed.", nil) switch err.ToGoError().(type) { case fs.BucketNameInvalid: writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) case fs.BucketExists: writeErrorResponse(w, req, BucketAlreadyExists, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } // Make sure to add Location information here only for bucket w.Header().Set("Location", "/"+bucket) writeSuccessResponse(w, nil) }
func serverMain(c *cli.Context) { checkServerSyntax(c) perr := initServer() fatalIf(perr.Trace(), "Failed to read config for minio.", nil) certFile := c.GlobalString("cert") keyFile := c.GlobalString("key") if (certFile != "" && keyFile == "") || (certFile == "" && keyFile != "") { fatalIf(probe.NewError(errInvalidArgument), "Both certificate and key are required to enable https.", nil) } var minFreeDisk int64 minFreeDiskSet := false // Default minFreeDisk = 10 var expiration time.Duration expirationSet := false args := c.Args() for len(args) >= 2 { switch args.First() { case "min-free-disk": if minFreeDiskSet { fatalIf(probe.NewError(errInvalidArgument), "Minimum free disk should be set only once.", nil) } args = args.Tail() var err *probe.Error minFreeDisk, err = parsePercentToInt(args.First(), 64) fatalIf(err.Trace(args.First()), "Invalid minium free disk size "+args.First()+" passed.", nil) args = args.Tail() minFreeDiskSet = true case "expiry": if expirationSet { fatalIf(probe.NewError(errInvalidArgument), "Expiration should be set only once.", nil) } args = args.Tail() var err error expiration, err = time.ParseDuration(args.First()) fatalIf(probe.NewError(err), "Invalid expiration time "+args.First()+" passed.", nil) args = args.Tail() expirationSet = true default: cli.ShowCommandHelpAndExit(c, "server", 1) // last argument is exit code } } path := strings.TrimSpace(c.Args().Last()) // Last argument is always path if _, err := os.Stat(path); err != nil { fatalIf(probe.NewError(err), "Unable to validate the path", nil) } tls := (certFile != "" && keyFile != "") apiServerConfig := cloudServerConfig{ Address: c.GlobalString("address"), AccessLog: c.GlobalBool("enable-accesslog"), Anonymous: c.GlobalBool("anonymous"), Path: path, MinFreeDisk: minFreeDisk, Expiry: expiration, TLS: tls, CertFile: certFile, KeyFile: keyFile, RateLimit: c.GlobalInt("ratelimit"), } perr = startServer(apiServerConfig) errorIf(perr.Trace(), "Failed to start the minio server.", nil) }
// mainList - is a handler for mc ls command func mainList(ctx *cli.Context) { setListPalette(ctx.GlobalString("colors")) checkListSyntax(ctx) args := ctx.Args() // Operating system tool behavior if globalMimicFlag && !ctx.Args().Present() { args = []string{"."} } var targetURLs []string var err *probe.Error if args.First() == "incomplete" { targetURLs, err = args2URLs(args.Tail()) fatalIf(err.Trace(args...), "One or more unknown URL types passed.") for _, targetURL := range targetURLs { // if recursive strip off the "..." var clnt client.Client clnt, err = url2Client(stripRecursiveURL(targetURL)) fatalIf(err.Trace(targetURL), "Unable to initialize target ‘"+targetURL+"’.") err = doListIncomplete(clnt, isURLRecursive(targetURL), len(targetURLs) > 1) fatalIf(err.Trace(clnt.URL().String()), "Unable to list target ‘"+clnt.URL().String()+"’.") } } else { targetURLs, err = args2URLs(args) fatalIf(err.Trace(args...), "One or more unknown URL types passed.") for _, targetURL := range targetURLs { // if recursive strip off the "..." var clnt client.Client clnt, err = url2Client(stripRecursiveURL(targetURL)) fatalIf(err.Trace(targetURL), "Unable to initialize target ‘"+targetURL+"’.") err = doList(clnt, isURLRecursive(targetURL), len(targetURLs) > 1) fatalIf(err.Trace(clnt.URL().String()), "Unable to list target ‘"+clnt.URL().String()+"’.") } } }
func (s signatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if isRequestPostPolicySignatureV4(r) && r.Method == "POST" { s.handler.ServeHTTP(w, r) return } var signature *signv4.Signature if isRequestSignatureV4(r) { // For PUT and POST requests with payload, send the call upwards for verification. // Or PUT and POST requests without payload, verify here. if (r.Body == nil && (r.Method == "PUT" || r.Method == "POST")) || (r.Method != "PUT" && r.Method != "POST") { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(r) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, r, AuthorizationHeaderMalformed, r.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } } ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256([]byte("")))) if err != nil { errorIf(err.Trace(), "Unable to verify signature.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } if !ok { writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path) return } } s.handler.ServeHTTP(w, r) return } if isRequestPresignedSignatureV4(r) { var err *probe.Error signature, err = initPresignedSignatureV4(r) if err != nil { switch err.ToGoError() { case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id requested.", nil) writeErrorResponse(w, r, InvalidAccessKeyID, r.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } } ok, err := signature.DoesPresignedSignatureMatch() if err != nil { errorIf(err.Trace(), "Unable to verify signature.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) return } if !ok { writeErrorResponse(w, r, SignatureDoesNotMatch, r.URL.Path) return } s.handler.ServeHTTP(w, r) return } writeErrorResponse(w, r, AccessDenied, r.URL.Path) }
// CompleteMultipartUploadHandler - Complete multipart upload func (api CloudStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] if !api.Anonymous { if isRequestRequiresACLCheck(req) { writeErrorResponse(w, req, AccessDenied, req.URL.Path) return } } objectResourcesMetadata := getObjectResources(req.URL.Query()) var signature *fs.Signature if !api.Anonymous { if isRequestSignatureV4(req) { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } } metadata, err := api.Filesystem.CompleteMultipartUpload(bucket, object, objectResourcesMetadata.UploadID, req.Body, signature) if err != nil { errorIf(err.Trace(), "CompleteMultipartUpload failed.", nil) switch err.ToGoError().(type) { case fs.BucketNameInvalid: writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) case fs.BucketNotFound: writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) case fs.ObjectNotFound: writeErrorResponse(w, req, NoSuchKey, req.URL.Path) case fs.ObjectNameInvalid: writeErrorResponse(w, req, NoSuchKey, req.URL.Path) case fs.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) case fs.InvalidPart: writeErrorResponse(w, req, InvalidPart, req.URL.Path) case fs.InvalidPartOrder: writeErrorResponse(w, req, InvalidPartOrder, req.URL.Path) case fs.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case fs.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) case fs.MalformedXML: writeErrorResponse(w, req, MalformedXML, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } response := generateCompleteMultpartUploadResponse(bucket, object, req.URL.String(), metadata.Md5) encodedSuccessResponse := encodeSuccessResponse(response) // write headers setCommonHeaders(w) // write success response. writeSuccessResponse(w, encodedSuccessResponse) }
// PutObjectPartHandler - Upload part func (api CloudStorageAPI) PutObjectPartHandler(w http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) bucket := vars["bucket"] object := vars["object"] if !api.Anonymous { if isRequestRequiresACLCheck(req) { writeErrorResponse(w, req, AccessDenied, req.URL.Path) return } } // get Content-MD5 sent by client and verify if valid md5 := req.Header.Get("Content-MD5") if !isValidMD5(md5) { writeErrorResponse(w, req, InvalidDigest, req.URL.Path) return } /// if Content-Length is unknown/missing, throw away size := req.ContentLength if size == -1 { writeErrorResponse(w, req, MissingContentLength, req.URL.Path) return } /// maximum Upload size for multipart objects in a single operation if isMaxObjectSize(size) { writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) return } uploadID := req.URL.Query().Get("uploadId") partIDString := req.URL.Query().Get("partNumber") var partID int { var err error partID, err = strconv.Atoi(partIDString) if err != nil { writeErrorResponse(w, req, InvalidPart, req.URL.Path) return } } var signature *fs.Signature if !api.Anonymous { if isRequestSignatureV4(req) { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } } calculatedMD5, err := api.Filesystem.CreateObjectPart(bucket, object, uploadID, md5, partID, size, req.Body, signature) if err != nil { errorIf(err.Trace(), "CreateObjectPart failed.", nil) switch err.ToGoError().(type) { case fs.RootPathFull: writeErrorResponse(w, req, RootPathFull, req.URL.Path) case fs.InvalidUploadID: writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) case fs.BadDigest: writeErrorResponse(w, req, BadDigest, req.URL.Path) case fs.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case fs.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) case fs.EntityTooLarge: writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) case fs.InvalidDigest: writeErrorResponse(w, req, InvalidDigest, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } w.Header().Set("ETag", "\""+calculatedMD5+"\"") writeSuccessResponse(w, nil) }
// PutObjectHandler - PUT Object // ---------- // This implementation of the PUT operation adds an object to a bucket. func (api CloudStorageAPI) PutObjectHandler(w http.ResponseWriter, req *http.Request) { var object, bucket string vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] if !api.Anonymous { if isRequestRequiresACLCheck(req) { if api.Filesystem.IsPrivateBucket(bucket) || api.Filesystem.IsReadOnlyBucket(bucket) { writeErrorResponse(w, req, AccessDenied, req.URL.Path) return } } } // get Content-MD5 sent by client and verify if valid md5 := req.Header.Get("Content-MD5") if !isValidMD5(md5) { writeErrorResponse(w, req, InvalidDigest, req.URL.Path) return } /// if Content-Length is unknown/missing, deny the request size := req.ContentLength if size == -1 { writeErrorResponse(w, req, MissingContentLength, req.URL.Path) return } /// maximum Upload size for objects in a single operation if isMaxObjectSize(size) { writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) return } var signature *fs.Signature if !api.Anonymous { if isRequestSignatureV4(req) { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { switch err.ToGoError() { case errInvalidRegion: errorIf(err.Trace(), "Unknown region in authorization header.", nil) writeErrorResponse(w, req, AuthorizationHeaderMalformed, req.URL.Path) return case errAccessKeyIDInvalid: errorIf(err.Trace(), "Invalid access key id.", nil) writeErrorResponse(w, req, InvalidAccessKeyID, req.URL.Path) return default: errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } } metadata, err := api.Filesystem.CreateObject(bucket, object, md5, size, req.Body, signature) if err != nil { errorIf(err.Trace(), "CreateObject failed.", nil) switch err.ToGoError().(type) { case fs.RootPathFull: writeErrorResponse(w, req, RootPathFull, req.URL.Path) case fs.BucketNotFound: writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) case fs.BucketNameInvalid: writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) case fs.BadDigest: writeErrorResponse(w, req, BadDigest, req.URL.Path) case fs.MissingDateHeader: writeErrorResponse(w, req, RequestTimeTooSkewed, req.URL.Path) case fs.SignatureDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case fs.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) case fs.EntityTooLarge: writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) case fs.InvalidDigest: writeErrorResponse(w, req, InvalidDigest, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } w.Header().Set("ETag", "\""+metadata.Md5+"\"") writeSuccessResponse(w, nil) }
// PutBucketHandler - PUT Bucket // ---------- // This implementation of the PUT operation creates a new bucket for authenticated request func (api API) PutBucketHandler(w http.ResponseWriter, req *http.Request) { // Ticket master block { op := APIOperation{} op.ProceedCh = make(chan struct{}) api.OP <- op // block until Ticket master gives us a go <-op.ProceedCh } if _, err := stripAccessKeyID(req.Header.Get("Authorization")); err != nil { writeErrorResponse(w, req, AccessDenied, req.URL.Path) return } // read from 'x-amz-acl' aclType := getACLType(req) if aclType == unsupportedACLType { writeErrorResponse(w, req, NotImplemented, req.URL.Path) return } vars := mux.Vars(req) bucket := vars["bucket"] var signature *signv4.Signature if !api.Anonymous { if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } // if body of request is non-nil then check for validity of Content-Length if req.Body != nil { /// if Content-Length missing, deny the request size := req.Header.Get("Content-Length") if size == "" { writeErrorResponse(w, req, MissingContentLength, req.URL.Path) return } } err := api.Donut.MakeBucket(bucket, getACLTypeString(aclType), req.Body, signature) if err != nil { errorIf(err.Trace(), "MakeBucket failed.", nil) switch err.ToGoError().(type) { case signv4.DoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case donut.TooManyBuckets: writeErrorResponse(w, req, TooManyBuckets, req.URL.Path) case donut.BucketNameInvalid: writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) case donut.BucketExists: writeErrorResponse(w, req, BucketAlreadyExists, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } // Make sure to add Location information here only for bucket w.Header().Set("Location", "/"+bucket) writeSuccessResponse(w) }
// ListObjects - GET bucket (list objects) func (fs Filesystem) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) { fs.lock.Lock() defer fs.lock.Unlock() if !IsValidBucketName(bucket) { return nil, resources, probe.NewError(BucketNameInvalid{Bucket: bucket}) } if resources.Prefix != "" && IsValidObjectName(resources.Prefix) == false { return nil, resources, probe.NewError(ObjectNameInvalid{Bucket: bucket, Object: resources.Prefix}) } p := bucketDir{} rootPrefix := filepath.Join(fs.path, bucket) // check bucket exists if _, err := os.Stat(rootPrefix); os.IsNotExist(err) { return nil, resources, probe.NewError(BucketNotFound{Bucket: bucket}) } p.root = rootPrefix /// automatically treat incoming "/" as "\\" on windows due to its path constraints. if runtime.GOOS == "windows" { if resources.Prefix != "" { resources.Prefix = strings.Replace(resources.Prefix, "/", string(os.PathSeparator), -1) } if resources.Delimiter != "" { resources.Delimiter = strings.Replace(resources.Delimiter, "/", string(os.PathSeparator), -1) } if resources.Marker != "" { resources.Marker = strings.Replace(resources.Marker, "/", string(os.PathSeparator), -1) } } // if delimiter is supplied and not prefix then we are the very top level, list everything and move on. if resources.Delimiter != "" && resources.Prefix == "" { files, err := ioutil.ReadDir(rootPrefix) if err != nil { if os.IsNotExist(err) { return nil, resources, probe.NewError(BucketNotFound{Bucket: bucket}) } return nil, resources, probe.NewError(err) } for _, fl := range files { if strings.HasSuffix(fl.Name(), "$multiparts") { continue } p.files = append(p.files, contentInfo{ Prefix: fl.Name(), Size: fl.Size(), Mode: fl.Mode(), ModTime: fl.ModTime(), FileInfo: fl, }) } } // If delimiter and prefix is supplied make sure that paging doesn't go deep, treat it as simple directory listing. if resources.Delimiter != "" && resources.Prefix != "" { if !strings.HasSuffix(resources.Prefix, resources.Delimiter) { fl, err := os.Stat(filepath.Join(rootPrefix, resources.Prefix)) if err != nil { if os.IsNotExist(err) { return nil, resources, probe.NewError(ObjectNotFound{Bucket: bucket, Object: resources.Prefix}) } return nil, resources, probe.NewError(err) } p.files = append(p.files, contentInfo{ Prefix: resources.Prefix, Size: fl.Size(), Mode: os.ModeDir, ModTime: fl.ModTime(), FileInfo: fl, }) } else { var prefixPath string if runtime.GOOS == "windows" { prefixPath = rootPrefix + string(os.PathSeparator) + resources.Prefix } else { prefixPath = rootPrefix + string(os.PathSeparator) + resources.Prefix } files, err := ioutil.ReadDir(prefixPath) if err != nil { switch err := err.(type) { case *os.PathError: if err.Op == "open" { return nil, resources, probe.NewError(ObjectNotFound{Bucket: bucket, Object: resources.Prefix}) } } return nil, resources, probe.NewError(err) } for _, fl := range files { if strings.HasSuffix(fl.Name(), "$multiparts") { continue } prefix := fl.Name() if resources.Prefix != "" { prefix = filepath.Join(resources.Prefix, fl.Name()) } p.files = append(p.files, contentInfo{ Prefix: prefix, Size: fl.Size(), Mode: fl.Mode(), ModTime: fl.ModTime(), FileInfo: fl, }) } } } if resources.Delimiter == "" { var files []contentInfo getAllFiles := func(fp string, fl os.FileInfo, err error) error { // If any error return back quickly if err != nil { return err } if strings.HasSuffix(fp, "$multiparts") { return nil } // if file pointer equals to rootPrefix - discard it if fp == p.root { return nil } if len(files) > resources.Maxkeys { return ErrSkipFile } // Split the root prefix from the incoming file pointer realFp := "" if runtime.GOOS == "windows" { if splits := strings.Split(fp, (p.root + string(os.PathSeparator))); len(splits) > 1 { realFp = splits[1] } } else { if splits := strings.Split(fp, (p.root + string(os.PathSeparator))); len(splits) > 1 { realFp = splits[1] } } // If path is a directory and has a prefix verify if the file pointer // has the prefix if it does not skip the directory. if fl.Mode().IsDir() { if resources.Prefix != "" { // Skip the directory on following situations // - when prefix is part of file pointer along with the root path // - when file pointer is part of the prefix along with root path if !strings.HasPrefix(fp, filepath.Join(p.root, resources.Prefix)) && !strings.HasPrefix(filepath.Join(p.root, resources.Prefix), fp) { return ErrSkipDir } } } // If path is a directory and has a marker verify if the file split file pointer // is lesser than the Marker top level directory if yes skip it. if fl.Mode().IsDir() { if resources.Marker != "" { if realFp != "" { // For windows split with its own os.PathSeparator if runtime.GOOS == "windows" { if realFp < strings.Split(resources.Marker, string(os.PathSeparator))[0] { return ErrSkipDir } } else { if realFp < strings.Split(resources.Marker, string(os.PathSeparator))[0] { return ErrSkipDir } } } } } // If regular file verify if fl.Mode().IsRegular() { // If marker is present this will be used to check if filepointer is // lexically higher than then Marker if realFp != "" { if resources.Marker != "" { if realFp > resources.Marker { files = append(files, contentInfo{ Prefix: realFp, Size: fl.Size(), Mode: fl.Mode(), ModTime: fl.ModTime(), FileInfo: fl, }) } } else { files = append(files, contentInfo{ Prefix: realFp, Size: fl.Size(), Mode: fl.Mode(), ModTime: fl.ModTime(), FileInfo: fl, }) } } } // If file is a symlink follow it and populate values. if fl.Mode()&os.ModeSymlink == os.ModeSymlink { st, err := os.Stat(fp) if err != nil { return nil } // If marker is present this will be used to check if filepointer is // lexically higher than then Marker if realFp != "" { if resources.Marker != "" { if realFp > resources.Marker { files = append(files, contentInfo{ Prefix: realFp, Size: st.Size(), Mode: st.Mode(), ModTime: st.ModTime(), FileInfo: st, }) } } else { files = append(files, contentInfo{ Prefix: realFp, Size: st.Size(), Mode: st.Mode(), ModTime: st.ModTime(), FileInfo: st, }) } } } p.files = files return nil } // If no delimiter is specified, crawl through everything. err := Walk(rootPrefix, getAllFiles) if err != nil { if os.IsNotExist(err) { return nil, resources, probe.NewError(ObjectNotFound{Bucket: bucket, Object: resources.Prefix}) } return nil, resources, probe.NewError(err) } } var metadataList []ObjectMetadata var metadata ObjectMetadata // Filter objects for _, content := range p.files { if len(metadataList) == resources.Maxkeys { resources.IsTruncated = true if resources.IsTruncated && resources.Delimiter != "" { resources.NextMarker = metadataList[len(metadataList)-1].Object } break } if content.Prefix > resources.Marker { var err *probe.Error metadata, resources, err = fs.filterObjects(bucket, content, resources) if err != nil { return nil, resources, err.Trace() } // If windows replace all the incoming paths to API compatible paths if runtime.GOOS == "windows" { metadata.Object = sanitizeWindowsPath(metadata.Object) } if metadata.Bucket != "" { metadataList = append(metadataList, metadata) } } } // Sanitize common prefixes back into API compatible paths if runtime.GOOS == "windows" { resources.CommonPrefixes = sanitizeWindowsPaths(resources.CommonPrefixes...) } return metadataList, resources, nil }
func (fs Filesystem) filterObjects(bucket string, content contentInfo, resources BucketResourcesMetadata) (ObjectMetadata, BucketResourcesMetadata, *probe.Error) { var err *probe.Error var metadata ObjectMetadata name := content.Prefix switch true { // Both delimiter and Prefix is present case resources.Delimiter != "" && resources.Prefix != "": if strings.HasPrefix(name, resources.Prefix) { trimmedName := strings.TrimPrefix(name, resources.Prefix) delimitedName := delimiter(trimmedName, resources.Delimiter) switch true { case name == resources.Prefix: // Use resources.Prefix to filter out delimited file metadata, err = getMetadata(fs.path, bucket, name) if err != nil { return ObjectMetadata{}, resources, err.Trace() } if metadata.Mode.IsDir() { resources.CommonPrefixes = append(resources.CommonPrefixes, name+resources.Delimiter) return ObjectMetadata{}, resources, nil } case delimitedName == content.FileInfo.Name(): // Use resources.Prefix to filter out delimited files metadata, err = getMetadata(fs.path, bucket, name) if err != nil { return ObjectMetadata{}, resources, err.Trace() } if metadata.Mode.IsDir() { resources.CommonPrefixes = append(resources.CommonPrefixes, name+resources.Delimiter) return ObjectMetadata{}, resources, nil } case delimitedName != "": resources.CommonPrefixes = append(resources.CommonPrefixes, resources.Prefix+delimitedName) } } // Delimiter present and Prefix is absent case resources.Delimiter != "" && resources.Prefix == "": delimitedName := delimiter(name, resources.Delimiter) switch true { case delimitedName == "": metadata, err = getMetadata(fs.path, bucket, name) if err != nil { return ObjectMetadata{}, resources, err.Trace() } if metadata.Mode.IsDir() { resources.CommonPrefixes = append(resources.CommonPrefixes, name+resources.Delimiter) return ObjectMetadata{}, resources, nil } case delimitedName == content.FileInfo.Name(): metadata, err = getMetadata(fs.path, bucket, name) if err != nil { return ObjectMetadata{}, resources, err.Trace() } if metadata.Mode.IsDir() { resources.CommonPrefixes = append(resources.CommonPrefixes, name+resources.Delimiter) return ObjectMetadata{}, resources, nil } case delimitedName != "": resources.CommonPrefixes = append(resources.CommonPrefixes, delimitedName) } // Delimiter is absent and only Prefix is present case resources.Delimiter == "" && resources.Prefix != "": if strings.HasPrefix(name, resources.Prefix) { // Do not strip prefix object output metadata, err = getMetadata(fs.path, bucket, name) if err != nil { return ObjectMetadata{}, resources, err.Trace() } } default: metadata, err = getMetadata(fs.path, bucket, name) if err != nil { return ObjectMetadata{}, resources, err.Trace() } } sortUnique(sort.StringSlice(resources.CommonPrefixes)) return metadata, resources, nil }
// PutObjectHandler - PUT Object // ---------- // This implementation of the PUT operation adds an object to a bucket. func (api API) PutObjectHandler(w http.ResponseWriter, req *http.Request) { // Ticket master block { op := APIOperation{} op.ProceedCh = make(chan struct{}) api.OP <- op // block until Ticket master gives us a go <-op.ProceedCh } var object, bucket string vars := mux.Vars(req) bucket = vars["bucket"] object = vars["object"] // get Content-MD5 sent by client and verify if valid md5 := req.Header.Get("Content-MD5") if !isValidMD5(md5) { writeErrorResponse(w, req, InvalidDigest, req.URL.Path) return } /// if Content-Length missing, deny the request size := req.Header.Get("Content-Length") if size == "" { writeErrorResponse(w, req, MissingContentLength, req.URL.Path) return } /// maximum Upload size for objects in a single operation if isMaxObjectSize(size) { writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) return } /// minimum Upload size for objects in a single operation // // Surprisingly while Amazon in their document states that S3 objects have 1byte // as the minimum limit, they do not seem to enforce it one can successfully // create a 0byte file using a regular putObject() operation // // if isMinObjectSize(size) { // writeErrorResponse(w, req, EntityTooSmall, req.URL.Path) // return // } var sizeInt64 int64 { var err error sizeInt64, err = strconv.ParseInt(size, 10, 64) if err != nil { writeErrorResponse(w, req, InvalidRequest, req.URL.Path) return } } var signature *signv4.Signature if !api.Anonymous { if _, ok := req.Header["Authorization"]; ok { // Init signature V4 verification var err *probe.Error signature, err = initSignatureV4(req) if err != nil { errorIf(err.Trace(), "Initializing signature v4 failed.", nil) writeErrorResponse(w, req, InternalError, req.URL.Path) return } } } metadata, err := api.XL.CreateObject(bucket, object, md5, sizeInt64, req.Body, nil, signature) if err != nil { errorIf(err.Trace(), "CreateObject failed.", nil) switch err.ToGoError().(type) { case xl.BucketNotFound: writeErrorResponse(w, req, NoSuchBucket, req.URL.Path) case xl.BucketNameInvalid: writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) case xl.ObjectExists: writeErrorResponse(w, req, MutableWriteNotAllowed, req.URL.Path) case xl.BadDigest: writeErrorResponse(w, req, BadDigest, req.URL.Path) case signv4.MissingDateHeader: writeErrorResponse(w, req, RequestTimeTooSkewed, req.URL.Path) case signv4.DoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case xl.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) case xl.EntityTooLarge: writeErrorResponse(w, req, EntityTooLarge, req.URL.Path) case xl.InvalidDigest: writeErrorResponse(w, req, InvalidDigest, req.URL.Path) default: writeErrorResponse(w, req, InternalError, req.URL.Path) } return } w.Header().Set("ETag", metadata.MD5Sum) writeSuccessResponse(w) }