// doDiffMain runs the diff. func doDiffMain(firstURL, secondURL string) { // Source and targets are always directories sourceSeparator := string(client.NewURL(firstURL).Separator) if !strings.HasSuffix(firstURL, sourceSeparator) { firstURL = firstURL + sourceSeparator } targetSeparator := string(client.NewURL(secondURL).Separator) if !strings.HasSuffix(secondURL, targetSeparator) { secondURL = secondURL + targetSeparator } // Expand aliased urls. firstAlias, firstURL, _ := mustExpandAlias(firstURL) secondAlias, secondURL, _ := mustExpandAlias(secondURL) firstClient, err := newClientFromAlias(firstAlias, firstURL) if err != nil { fatalIf(err.Trace(firstAlias, firstURL, secondAlias, secondURL), fmt.Sprintf("Failed to diff '%s' and '%s'", firstURL, secondURL)) } difference, err := objectDifferenceFactory(secondAlias, secondURL) if err != nil { fatalIf(err.Trace(firstAlias, firstURL, secondAlias, secondURL), fmt.Sprintf("Failed to diff '%s' and '%s'", firstURL, secondURL)) } isRecursive := true isIncomplete := false for sourceContent := range firstClient.List(isRecursive, isIncomplete) { if sourceContent.Err != nil { switch sourceContent.Err.ToGoError().(type) { // Handle this specifically for filesystem related errors. case client.BrokenSymlink, client.TooManyLevelsSymlink, client.PathNotFound, client.PathInsufficientPermission: errorIf(sourceContent.Err.Trace(firstURL, secondURL), fmt.Sprintf("Failed on '%s'", firstURL)) default: fatalIf(sourceContent.Err.Trace(firstURL, secondURL), fmt.Sprintf("Failed on '%s'", firstURL)) } continue } if sourceContent.Type.IsDir() { continue } suffix := strings.TrimPrefix(sourceContent.URL.String(), firstURL) differ, err := difference(suffix, sourceContent.Type, sourceContent.Size) if err != nil { errorIf(sourceContent.Err.Trace(secondURL, suffix), fmt.Sprintf("Failed on '%s'", urlJoinPath(secondURL, suffix))) continue } if differ == differNone { continue } printMsg(diffMessage{ FirstURL: sourceContent.URL.String(), SecondURL: urlJoinPath(secondURL, suffix), Diff: differ, }) } }
// urlJoinPath Join a path to existing URL. func urlJoinPath(url1, url2 string) string { u1 := client.NewURL(url1) u2 := client.NewURL(url2) if u1.Path != string(u1.Separator) { u1.Path = filepath.Join(u1.Path, u2.Path) } else { u1.Path = u2.Path } return u1.String() }
// SINGLE SOURCE - Type C: copy(d1..., d2) -> []copy(d1/f, d1/d2/f) -> []A // prepareCopyRecursiveURLTypeC - prepares target and source URLs for copying. func prepareCopyURLsTypeC(sourceURL, targetURL string) <-chan copyURLs { copyURLsCh := make(chan copyURLs) go func(sourceURL, targetURL string, copyURLsCh chan copyURLs) { defer close(copyURLsCh) if !isURLRecursive(sourceURL) { // Source is not of recursive type. copyURLsCh <- copyURLs{Error: errSourceNotRecursive(sourceURL).Trace()} return } // add `/` after trimming off `...` to emulate folders sourceURL = stripRecursiveURL(sourceURL) sourceClient, sourceContent, err := url2Stat(sourceURL) if err != nil { // Source does not exist or insufficient privileges. copyURLsCh <- copyURLs{Error: err.Trace(sourceURL)} return } if !sourceContent.Type.IsDir() { // Source is not a dir. copyURLsCh <- copyURLs{Error: errSourceIsNotDir(sourceURL).Trace()} return } for sourceContent := range sourceClient.List(true) { if sourceContent.Err != nil { // Listing failed. copyURLsCh <- copyURLs{Error: sourceContent.Err.Trace()} continue } if !sourceContent.Content.Type.IsRegular() { // Source is not a regular file. Skip it for copy. continue } // All OK.. We can proceed. Type B: source is a file, target is a folder and exists. sourceURLParse := client.NewURL(sourceURL) targetURLParse := client.NewURL(targetURL) sourceURLDelimited := sourceURLParse.String()[:strings.LastIndex(sourceURLParse.String(), string(sourceURLParse.Separator))+1] sourceContentName := sourceContent.Content.Name sourceContentURL := sourceURLDelimited + sourceContentName sourceContentParse := client.NewURL(sourceContentURL) // Construct target path from recursive path of source without its prefix dir. newTargetURLParse := *targetURLParse newTargetURLParse.Path = filepath.Join(newTargetURLParse.Path, sourceContentName) copyURLsCh <- prepareCopyURLsTypeA(sourceContentParse.String(), newTargetURLParse.String()) } }(sourceURL, targetURL, copyURLsCh) return copyURLsCh }
// this code is necessary since, share only operates on cloud storage URLs not filesystem func isObjectKeyPresent(url string) bool { u := client.NewURL(url) path := u.Path matchS3, _ := filepath.Match("*.s3*.amazonaws.com", u.Host) if matchS3 { hostSplits := strings.SplitN(u.Host, ".", 2) path = string(u.Separator) + hostSplits[0] + u.Path } matchGcs, _ := filepath.Match("*.storage.googleapis.com", u.Host) if matchGcs { hostSplits := strings.SplitN(u.Host, ".", 2) path = string(u.Separator) + hostSplits[0] + u.Path } pathSplits := strings.SplitN(path, "?", 2) splits := strings.SplitN(pathSplits[0], string(u.Separator), 3) switch len(splits) { case 0, 1: return false case 2: return false case 3: if splits[2] == "" { return false } return true } return false }
// Validate command line arguments. func checkRmSyntax(ctx *cli.Context) { args := ctx.Args() ishelp := ctx.GlobalBool("help") isForce := ctx.Bool("force") if !args.Present() || ishelp { exitCode := 1 cli.ShowCommandHelpAndExit(ctx, "rm", exitCode) } URLs, err := args2URLs(args) fatalIf(err.Trace(ctx.Args()...), "Unable to parse arguments.") // If input validation fails then provide context sensitive help without displaying generic help message. // The context sensitive help is shown per argument instead of all arguments to keep the help display // as well as the code simple. Also most of the times there will be just one arg for _, url := range URLs { u := client.NewURL(url) if strings.HasSuffix(url, string(u.Separator)) { fatalIf(errDummy().Trace(), "‘"+url+"’ is a folder. To remove this folder recursively, please try ‘"+url+"...’ as argument.") } if isURLRecursive(url) && !isForce { fatalIf(errDummy().Trace(), "Recursive removal requires --force option. Please review carefully before performing this operation.") } } }
// Remove all objects recursively. func rmAll(url string, isIncomplete bool) { // Initialize new client. clnt, err := url2Client(url) if err != nil { errorIf(err.Trace(url), "Invalid URL ‘"+url+"’.") return // End of journey. } isRecursive := false // Disable recursion and only list this folder's contents. for entry := range clnt.List(isRecursive, isIncomplete) { if entry.Err != nil { errorIf(entry.Err.Trace(url), "Unable to list ‘"+url+"’.") return // End of journey. } if entry.Content.Type.IsDir() { // Add separator at the end to remove all its contents. url := entry.Content.URL.String() u := client.NewURL(url) url = url + string(u.Separator) // Recursively remove contents of this directory. rmAll(url, isIncomplete) } // Regular type. rm(entry.Content.URL.String(), isIncomplete) } }
// getHostConfig retrieves host specific configuration such as access keys, certs. func getHostConfig(URL string) (hostConfig, *probe.Error) { config, err := getMcConfig() if err != nil { return hostConfig{}, err.Trace() } url := client.NewURL(URL) // No host matching or keys needed for filesystem requests if url.Type == client.Filesystem { hostCfg := hostConfig{ AccessKeyID: "", SecretAccessKey: "", API: "fs", } return hostCfg, nil } if _, ok := config.Hosts[url.Host]; ok { return config.Hosts[url.Host], nil } for globURL, hostCfg := range config.Hosts { match, err := filepath.Match(globURL, url.Host) if err != nil { return hostConfig{}, errInvalidGlobURL(globURL, URL).Trace() } if match { return hostCfg, nil } } return hostConfig{}, errNoMatchingHost(URL).Trace() }
// New - instantiate a new fs client func New(path string) (client.Client, *probe.Error) { if strings.TrimSpace(path) == "" { return nil, probe.NewError(client.EmptyPath{}) } return &fsClient{ PathURL: client.NewURL(normalizePath(path)), }, nil }
// guessURLContentType - guess content-type of the URL. // on failure just return 'application/octet-stream'. func guessURLContentType(urlStr string) string { url := client.NewURL(urlStr) extension := strings.TrimPrefix(filepath.Ext(url.Path), ".") contentType, e := contentdb.Lookup(extension) if e != nil { return "application/octet-stream" } return contentType }
// SINGLE SOURCE - Type B: copy(f, d) -> copy(f, d/f) -> A // prepareCopyURLsTypeB - prepares target and source URLs for copying. func prepareCopyURLsTypeB(sourceURL string, targetURL string) copyURLs { _, sourceContent, err := url2Stat(sourceURL) if err != nil { // Source does not exist or insufficient privileges. return copyURLs{Error: err.Trace(sourceURL)} } if !sourceContent.Type.IsRegular() { // Source is not a regular file. return copyURLs{Error: errInvalidSource(sourceURL).Trace()} } // All OK.. We can proceed. Type B: source is a file, target is a folder and exists. { sourceURLParse := client.NewURL(sourceURL) targetURLParse := client.NewURL(targetURL) targetURLParse.Path = filepath.Join(targetURLParse.Path, filepath.Base(sourceURLParse.Path)) return prepareCopyURLsTypeA(sourceURL, targetURLParse.String()) } }
func checkRmSyntax(ctx *cli.Context) { args := ctx.Args() var force bool var incomplete bool if !args.Present() || args.First() == "help" { cli.ShowCommandHelpAndExit(ctx, "rm", 1) // last argument is exit code. } if len(args) == 1 && args.Get(0) == "force" { return } if len(args) == 2 && args.Get(0) == "force" && args.Get(1) == "incomplete" || len(args) == 2 && args.Get(1) == "force" && args.Get(0) == "incomplete" { return } if args.Last() == "force" { force = true args = args[:len(args)-1] } if args.Last() == "incomplete" { incomplete = true args = args[:len(args)-1] } // By this time we have sanitized the input args and now we have only the URLs parse them properly // and validate. URLs, err := args2URLs(args) fatalIf(err.Trace(ctx.Args()...), "Unable to parse arguments.") // If input validation fails then provide context sensitive help without displaying generic help message. // The context sensitive help is shown per argument instead of all arguments to keep the help display // as well as the code simple. Also most of the times there will be just one arg for _, url := range URLs { u := client.NewURL(url) var helpStr string if strings.HasSuffix(url, string(u.Separator)) { if incomplete { helpStr = "Usage : mc rm " + url + recursiveSeparator + " incomplete force" } else { helpStr = "Usage : mc rm " + url + recursiveSeparator + " force" } fatalIf(errDummy().Trace(), helpStr) } if isURLRecursive(url) && !force { if incomplete { helpStr = "Usage : mc rm " + url + " incomplete force" } else { helpStr = "Usage : mc rm " + url + " force" } fatalIf(errDummy().Trace(), helpStr) } } }
// SINGLE SOURCE - Type C: copy(d1..., d2) -> []copy(d1/f, d1/d2/f) -> []A // prepareCopyRecursiveURLTypeC - prepares target and source URLs for copying. func prepareCopyURLsTypeC(sourceURL, targetURL string) <-chan copyURLs { copyURLsCh := make(chan copyURLs) go func(sourceURL, targetURL string, copyURLsCh chan copyURLs) { defer close(copyURLsCh) if !isURLRecursive(sourceURL) { // Source is not of recursive type. copyURLsCh <- copyURLs{Error: errSourceNotRecursive(sourceURL).Trace()} return } // add `/` after trimming off `...` to emulate folders sourceURL = stripRecursiveURL(sourceURL) sourceClient, sourceContent, err := url2Stat(sourceURL) if err != nil { // Source does not exist or insufficient privileges. copyURLsCh <- copyURLs{Error: err.Trace(sourceURL)} return } if !sourceContent.Type.IsDir() { // Source is not a dir. copyURLsCh <- copyURLs{Error: errSourceIsNotDir(sourceURL).Trace()} return } for sourceContent := range sourceClient.List(true, false) { if sourceContent.Err != nil { // Listing failed. copyURLsCh <- copyURLs{Error: sourceContent.Err.Trace()} continue } if !sourceContent.Content.Type.IsRegular() { // Source is not a regular file. Skip it for copy. continue } // All OK.. We can proceed. Type B: source is a file, target is a folder and exists. newTargetURL := client.NewURL(targetURL) newTargetURL.Path = filepath.Join(newTargetURL.Path, strings.TrimPrefix(sourceContent.Content.URL.Path, url2Dir(sourceURL))) // verify if destination exists, and cpForceFlag is not set do not proceed. _, _, err := url2Stat(newTargetURL.String()) if err == nil && !cpForceFlag { copyURLsCh <- copyURLs{Error: errOverWriteNotAllowed(newTargetURL.String()).Trace()} return } copyURLsCh <- prepareCopyURLsTypeA(sourceContent.Content.URL.String(), newTargetURL.String()) } }(sourceURL, targetURL, copyURLsCh) return copyURLsCh }
// just like filepath.Dir but always has a trailing url.Seperator func url2Dir(urlStr string) string { url := client.NewURL(urlStr) if strings.HasSuffix(urlStr, string(url.Separator)) { return urlStr } lastIndex := strings.LastIndex(urlStr, string(url.Separator)) dirname := urlStr[:lastIndex+1] if dirname == "" { return "." } return dirname }
// isValidHostURL - validate input host url. func isValidHostURL(hostURL string) bool { if strings.TrimSpace(hostURL) == "" { return false } url := client.NewURL(hostURL) if url.Scheme != "https" && url.Scheme != "http" { return false } if url.Path != "" && url.Path != "/" { return false } return true }
// JSON json message for share command func (s shareMessage) JSON() string { var shareMessageBytes []byte var err error if len(s.DownloadURL) > 0 { shareMessageBytes, err = json.Marshal(struct { Expiry humanizedTime `json:"expiry"` DownloadURL string `json:"downloadUrl"` Key string `json:"keyName"` }{ Expiry: timeDurationToHumanizedTime(s.Expiry), DownloadURL: s.DownloadURL, Key: s.Key, }) } else { var key string URL := client.NewURL(s.Key) postURL := URL.Scheme + URL.SchemeSeparator + URL.Host + string(URL.Separator) if !isBucketVirtualStyle(URL.Host) { postURL = postURL + s.UploadInfo["bucket"] } postURL = postURL + " " curlCommand := "curl " + postURL for k, v := range s.UploadInfo { if k == "key" { key = v continue } curlCommand = curlCommand + fmt.Sprintf("-F %s=%s ", k, v) } curlCommand = curlCommand + fmt.Sprintf("-F key=%s ", key) + "-F file=@<FILE> " shareMessageBytes, err = json.Marshal(struct { Expiry humanizedTime `json:"expiry"` UploadCommand string `json:"uploadCommand"` Key string `json:"keyName"` }{ Expiry: timeDurationToHumanizedTime(s.Expiry), UploadCommand: curlCommand, Key: s.Key, }) } fatalIf(probe.NewError(err), "Failed to marshal into JSON.") // json encoding escapes ampersand into its unicode character which is not usable directly for share // and fails with cloud storage. convert them back so that they are usable shareMessageBytes = bytes.Replace(shareMessageBytes, []byte("\\u0026"), []byte("&"), -1) shareMessageBytes = bytes.Replace(shareMessageBytes, []byte("\\u003c"), []byte("<"), -1) shareMessageBytes = bytes.Replace(shareMessageBytes, []byte("\\u003e"), []byte(">"), -1) return string(shareMessageBytes) }
// checkMirrorSyntax(URLs []string) func checkMirrorSyntax(ctx *cli.Context) { if len(ctx.Args()) < 2 || ctx.Args().First() == "help" { cli.ShowCommandHelpAndExit(ctx, "mirror", 1) // last argument is exit code. } // extract URLs. URLs, err := args2URLs(ctx.Args()) fatalIf(err.Trace(ctx.Args()...), "Unable to parse arguments.") srcURL := URLs[0] tgtURLs := URLs[1:] /****** Generic rules *******/ // Recursive source URL. newSrcURL := stripRecursiveURL(srcURL) _, srcContent, err := url2Stat(newSrcURL) fatalIf(err.Trace(srcURL), "Unable to stat source ‘"+newSrcURL+"’.") if !srcContent.Type.IsDir() { fatalIf(errInvalidArgument().Trace(srcContent.URL.String(), srcContent.Type.String()), fmt.Sprintf("Source ‘%s’ is not a folder. Only folders are supported by mirror.", srcURL)) } if len(tgtURLs) == 0 && tgtURLs == nil { fatalIf(errInvalidArgument().Trace(), "Invalid number of target arguments to mirror command.") } for _, tgtURL := range tgtURLs { // Recursive URLs are not allowed in target. if isURLRecursive(tgtURL) { fatalIf(errDummy().Trace(), fmt.Sprintf("Recursive option is not supported for target ‘%s’ argument.", tgtURL)) } url := client.NewURL(tgtURL) if url.Host != "" { if url.Path == string(url.Separator) { fatalIf(errInvalidArgument().Trace(), fmt.Sprintf("Target ‘%s’ does not contain bucket name.", tgtURL)) } } _, content, err := url2Stat(tgtURL) fatalIf(err.Trace(tgtURL), "Unable to stat target ‘"+tgtURL+"’.") if content != nil { if !content.Type.IsDir() { fatalIf(errInvalidArgument().Trace(), "Target ‘"+tgtURL+"’ is not a folder.") } } } }
// main for rm command. func mainRm(ctx *cli.Context) { checkRmSyntax(ctx) // rm specific flags. isForce := ctx.Bool("force") isIncomplete := ctx.Bool("incomplete") // Set color. console.SetColor("Remove", color.New(color.FgGreen, color.Bold)) // Parse args. URLs, err := args2URLs(ctx.Args()) fatalIf(err.Trace(ctx.Args()...), "Unable to parse arguments.") // Support multiple targets. for _, url := range URLs { if isURLRecursive(url) && isForce { url := stripRecursiveURL(url) removeTopFolder := false // find if the URL is dir or not. _, content, err := url2Stat(url) fatalIf(err.Trace(url), "Unable to stat ‘"+url+"’.") if content.Type.IsDir() { /* Determine whether to remove the top folder or only its contents. If the URL does not end with a separator, then include the top folder as well, otherwise not. */ u := client.NewURL(url) if !strings.HasSuffix(url, string(u.Separator)) { // Add separator at the end to remove all its contents. url = url + string(u.Separator) // Remember to remove the top most folder. removeTopFolder = true } } // Remove contents of this folder. rmAll(url, isIncomplete) if removeTopFolder { // Remove top folder as well. rm(url, isIncomplete) } } else { rm(url, isIncomplete) } } }
// Check if the target URL represents folder. It may or may not exist yet. func isTargetURLDir(targetURL string) bool { targetURLParse := client.NewURL(targetURL) _, targetContent, perr := url2Stat(targetURL) if perr != nil { if targetURLParse.Path == string(targetURLParse.Separator) && targetURLParse.Scheme != "" { return false } if strings.HasSuffix(targetURLParse.Path, string(targetURLParse.Separator)) { return true } return false } if !targetContent.Type.IsDir() { // Target is a dir. return false } return true }
func checkCopySyntax(ctx *cli.Context) { if len(ctx.Args()) < 2 { cli.ShowCommandHelpAndExit(ctx, "cp", 1) // last argument is exit code. } // extract URLs. URLs := ctx.Args() if len(URLs) < 2 { fatalIf(errDummy().Trace(ctx.Args()...), fmt.Sprintf("Unable to parse source and target arguments.")) } srcURLs := URLs[:len(URLs)-1] tgtURL := URLs[len(URLs)-1] isRecursive := ctx.Bool("recursive") /****** Generic Invalid Rules *******/ // Check if bucket name is passed for URL type arguments. url := client.NewURL(tgtURL) if url.Host != "" { // This check is for type URL. if !isURLVirtualHostStyle(url.Host) { if url.Path == string(url.Separator) { fatalIf(errInvalidArgument().Trace(), fmt.Sprintf("Target ‘%s’ does not contain bucket name.", tgtURL)) } } } // Guess CopyURLsType based on source and target URLs. copyURLsType, err := guessCopyURLType(srcURLs, tgtURL, isRecursive) if err != nil { fatalIf(errInvalidArgument().Trace(), "Unable to guess the type of copy operation.") } switch copyURLsType { case copyURLsTypeA: // File -> File. checkCopySyntaxTypeA(srcURLs, tgtURL) case copyURLsTypeB: // File -> Folder. checkCopySyntaxTypeB(srcURLs, tgtURL) case copyURLsTypeC: // Folder... -> Folder. checkCopySyntaxTypeC(srcURLs, tgtURL, isRecursive) case copyURLsTypeD: // File1...FileN -> Folder. checkCopySyntaxTypeD(srcURLs, tgtURL) default: fatalIf(errInvalidArgument().Trace(), "Unable to guess the type of copy operation.") } }
func rmAll(url string) { urlDir := url2Dir(url) for rmListCh := range rmList(url) { if rmListCh.err != nil { // if rmList throws an error die here. fatalIf(rmListCh.err.Trace(), "Unable to list : "+url+" .") } newURL := client.NewURL(urlDir) newURL.Path = filepath.Join(newURL.Path, rmListCh.keyName) newClnt, err := url2Client(newURL.String()) if err != nil { errorIf(err.Trace(newURL.String()), "Unable to create client object : "+newURL.String()+" .") continue } err = newClnt.Remove(false) errorIf(err.Trace(newURL.String()), "Unable to remove : "+newURL.String()+" .") } }
// NOTE: All the parse rules should reduced to A: Copy(Source, Target). // // * VALID RULES // ======================= // A: copy(f, f) -> copy(f, f) // B: copy(f, d) -> copy(f, d/f) -> A // C: copy(d1..., d2) -> []copy(d1/f, d2/d1/f) -> []A // D: copy([]{d1... | f}, d2) -> []{copy(d1/f, d2/d1/f) | copy(f, d2/f )} -> []A // // * INVALID RULES // ========================= // A: copy(d, *) // B: copy(d..., f) // C: copy(*, d...) // func checkCopySyntax(ctx *cli.Context) { if len(ctx.Args()) < 2 || ctx.Args().First() == "help" { cli.ShowCommandHelpAndExit(ctx, "cp", 1) // last argument is exit code. } // extract URLs. URLs, err := args2URLs(ctx.Args()) fatalIf(err.Trace(ctx.Args()...), fmt.Sprintf("One or more unknown URL types passed.")) srcURLs := URLs[:len(URLs)-1] tgtURL := URLs[len(URLs)-1] /****** Generic rules *******/ // Recursive URLs are not allowed in target. if isURLRecursive(tgtURL) { fatalIf(errDummy().Trace(), fmt.Sprintf("Recursive option is not supported for target ‘%s’ argument.", tgtURL)) } // scope locally { url := client.NewURL(tgtURL) if url.Host != "" { // This check is for type URL. if url.Path == string(url.Separator) { fatalIf(errInvalidArgument().Trace(), fmt.Sprintf("Target ‘%s’ does not contain bucket name.", tgtURL)) } if strings.Count(url.Path, "/") < 2 { if err := bucketExists(tgtURL); err != nil { fatalIf(err.Trace(), fmt.Sprintf("Unable to stat target ‘%s’.", tgtURL)) } } } } switch guessCopyURLType(srcURLs, tgtURL) { case copyURLsTypeA: // File -> File. checkCopySyntaxTypeA(srcURLs, tgtURL) case copyURLsTypeB: // File -> Folder. checkCopySyntaxTypeB(srcURLs, tgtURL) case copyURLsTypeC: // Folder... -> Folder. checkCopySyntaxTypeC(srcURLs, tgtURL) case copyURLsTypeD: // File | Folder... -> Folder. checkCopySyntaxTypeD(srcURLs, tgtURL) default: fatalIf(errInvalidArgument().Trace(), "Invalid arguments to copy command.") } }
func rmAllIncompleteUploads(url string) { clnt, err := url2Client(url) if err != nil { errorIf(err.Trace(url), "Unable to get client object for "+url+" .") return } urlDir := url2Dir(url) for entry := range clnt.List(true, true) { newURL := client.NewURL(urlDir) newURL.Path = filepath.Join(newURL.Path, entry.Content.Name) newClnt, err := url2Client(newURL.String()) if err != nil { errorIf(err.Trace(newURL.String()), "Unable to create client object : "+newURL.String()+" .") continue } err = newClnt.Remove(true) errorIf(err.Trace(newURL.String()), "Unable to remove : "+newURL.String()+" .") } }
// getNewClient gives a new client interface func getNewClient(urlStr string, auth hostConfig) (client.Client, *probe.Error) { url := client.NewURL(urlStr) switch url.Type { case client.Object: // Minio and S3 compatible cloud storage s3Config := new(client.Config) s3Config.AccessKeyID = func() string { if auth.AccessKeyID == globalAccessKeyID { return "" } return auth.AccessKeyID }() s3Config.SecretAccessKey = func() string { if auth.SecretAccessKey == globalSecretAccessKey { return "" } return auth.SecretAccessKey }() s3Config.AppName = "Minio" s3Config.AppVersion = globalMCVersion s3Config.AppComments = []string{os.Args[0], runtime.GOOS, runtime.GOARCH} s3Config.HostURL = urlStr s3Config.Debug = globalDebugFlag var s3Client client.Client var err *probe.Error if auth.API == "S3v2" { s3Client, err = s3v2.New(s3Config) } else { s3Client, err = s3v4.New(s3Config) } if err != nil { return nil, err.Trace() } return s3Client, nil case client.Filesystem: fsClient, err := fs.New(urlStr) if err != nil { return nil, err.Trace() } return fsClient, nil } return nil, errInitClient(urlStr).Trace() }
// String - regular colorized message func (s ShareMessage) String() string { if len(s.DownloadURL) > 0 { return console.Colorize("Share", fmt.Sprintf("%s", s.DownloadURL)) } var key string URL := client.NewURL(s.Key) postURL := URL.Scheme + URL.SchemeSeparator + URL.Host + string(URL.Separator) + s.UploadInfo["bucket"] + " " curlCommand := "curl " + postURL for k, v := range s.UploadInfo { if k == "key" { key = v continue } curlCommand = curlCommand + fmt.Sprintf("-F %s=%s ", k, v) } curlCommand = curlCommand + fmt.Sprintf("-F key=%s ", key) + "-F file=@<FILE> " emphasize := console.Colorize("File", "<FILE>") curlCommand = strings.Replace(curlCommand, "<FILE>", emphasize, -1) return console.Colorize("Share", fmt.Sprintf("%s", curlCommand)) }
// checkMirrorSyntax(URLs []string) func checkMirrorSyntax(ctx *cli.Context) { if len(ctx.Args()) != 2 { cli.ShowCommandHelpAndExit(ctx, "mirror", 1) // last argument is exit code. } // extract URLs. URLs := ctx.Args() srcURL := URLs[0] tgtURL := URLs[1] /****** Generic rules *******/ _, srcContent, err := url2Stat(srcURL) // incomplete uploads are not necessary for copy operation, no need to verify for them. isIncomplete := false if err != nil && !isURLPrefixExists(srcURL, isIncomplete) { fatalIf(err.Trace(srcURL), "Unable to stat source ‘"+srcURL+"’.") } if err == nil && !srcContent.Type.IsDir() { fatalIf(errInvalidArgument().Trace(srcContent.URL.String(), srcContent.Type.String()), fmt.Sprintf("Source ‘%s’ is not a folder. Only folders are supported by mirror command.", srcURL)) } if len(tgtURL) == 0 && tgtURL == "" { fatalIf(errInvalidArgument().Trace(), "Invalid target arguments to mirror command.") } url := client.NewURL(tgtURL) if url.Host != "" { if !isURLVirtualHostStyle(url.Host) { if url.Path == string(url.Separator) { fatalIf(errInvalidArgument().Trace(tgtURL), fmt.Sprintf("Target ‘%s’ does not contain bucket name.", tgtURL)) } } } _, _, err = url2Stat(tgtURL) // we die on any error other than client.PathNotFound - destination directory need not exist. if _, ok := err.ToGoError().(client.PathNotFound); !ok { fatalIf(err.Trace(tgtURL), fmt.Sprintf("Unable to stat %s", tgtURL)) } }
// New returns an initialized s3Client structure. if debug use a internal trace transport func New(config *client.Config) (client.Client, *probe.Error) { u := client.NewURL(config.HostURL) transport := http.DefaultTransport if config.Debug == true { transport = httptracer.GetNewTraceTransport(NewTrace(), http.DefaultTransport) } s3Conf := minio.Config{ AccessKeyID: config.AccessKeyID, SecretAccessKey: config.SecretAccessKey, Transport: transport, Endpoint: u.Scheme + u.SchemeSeparator + u.Host, } s3Conf.AccessKeyID = config.AccessKeyID s3Conf.SecretAccessKey = config.SecretAccessKey s3Conf.Transport = transport s3Conf.SetUserAgent(config.AppName, config.AppVersion, config.AppComments...) s3Conf.Endpoint = u.Scheme + u.SchemeSeparator + u.Host api, err := minio.New(s3Conf) if err != nil { return nil, probe.NewError(err) } return &s3Client{api: api, hostURL: u}, nil }
// checkShareUploadSyntax - validate command-line args. func checkShareUploadSyntax(ctx *cli.Context) { args := ctx.Args() if !args.Present() { cli.ShowCommandHelpAndExit(ctx, "upload", 1) // last argument is exit code. } // Set command flags from context. isRecursive := ctx.Bool("recursive") expireArg := ctx.String("expire") // Parse expiry. expiry := shareDefaultExpiry if expireArg != "" { var e error expiry, e = time.ParseDuration(expireArg) fatalIf(probe.NewError(e), "Unable to parse expire=‘"+expireArg+"’.") } // Validate expiry. if expiry.Seconds() < 1 { fatalIf(errDummy().Trace(expiry.String()), "Expiry cannot be lesser than 1 second.") } if expiry.Seconds() > 604800 { fatalIf(errDummy().Trace(expiry.String()), "Expiry cannot be larger than 7 days.") } for _, targetURL := range ctx.Args() { url := client.NewURL(targetURL) if strings.HasSuffix(targetURL, string(url.Separator)) && !isRecursive { fatalIf(errInvalidArgument().Trace(targetURL), "Use --recursive option to generate curl command for prefixes.") } } }
// makeCurlCmd constructs curl command-line. func makeCurlCmd(key string, isRecursive bool, uploadInfo map[string]string) string { URL := client.NewURL(key) postURL := URL.Scheme + URL.SchemeSeparator + URL.Host + string(URL.Separator) if !isBucketVirtualStyle(URL.Host) { postURL = postURL + uploadInfo["bucket"] } postURL += " " curlCommand := "curl " + postURL for k, v := range uploadInfo { if k == "key" { key = v continue } curlCommand += fmt.Sprintf("-F %s=%s ", k, v) } // If key starts with is enabled prefix it with the output. if isRecursive { curlCommand += fmt.Sprintf("-F key=%s<NAME> ", key) // Object name. } else { curlCommand += fmt.Sprintf("-F key=%s ", key) // Object name. } curlCommand += "-F file=@<FILE>" // File to upload. return curlCommand }
// New returns an initialized s3Client structure. if debug use a internal trace transport func New(config *client.Config) (client.Client, *probe.Error) { u := client.NewURL(config.HostURL) transport := http.DefaultTransport if config.Debug == true { if config.Signature == "S3v4" { transport = httptracer.GetNewTraceTransport(NewTraceV4(), http.DefaultTransport) } if config.Signature == "S3v2" { transport = httptracer.GetNewTraceTransport(NewTraceV2(), http.DefaultTransport) } } s3Conf := minio.Config{ AccessKeyID: config.AccessKeyID, SecretAccessKey: config.SecretAccessKey, Transport: transport, Endpoint: u.Scheme + u.SchemeSeparator + u.Host, Signature: func() minio.SignatureType { if config.Signature == "S3v2" { return minio.SignatureV2 } return minio.SignatureV4 }(), } s3Conf.SetUserAgent(config.AppName, config.AppVersion, config.AppComments...) api, err := minio.New(s3Conf) if err != nil { return nil, probe.NewError(err) } s3Clnt := &s3Client{ mu: new(sync.Mutex), api: api, hostURL: u, virtualStyle: isVirtualHostStyle(u.Host), } return s3Clnt, nil }
// makeCopyContentTypeB - CopyURLs content for copying. func makeCopyContentTypeB(sourceAlias string, sourceContent *client.Content, targetAlias string, targetURL string) copyURLs { // All OK.. We can proceed. Type B: source is a file, target is a folder and exists. targetURLParse := client.NewURL(targetURL) targetURLParse.Path = filepath.Join(targetURLParse.Path, filepath.Base(sourceContent.URL.Path)) return makeCopyContentTypeA(sourceAlias, sourceContent, targetAlias, targetURLParse.String()) }