// lookup prints all filenames matching the pattern func lookup(data []*DirEntry, query string) { query = strings.ToLower(query) elapsedTime := time.Now() results := 0 checked := 0 for _, dir := range data { match, err := filepath.Match(query, strings.ToLower(filepath.Base(dir.Path))) fatal(err) if match { fmt.Println(blue + dir.Path + string(filepath.Separator) + reset) results++ } checked++ for _, file := range dir.Files { match, err := filepath.Match(query, strings.ToLower(file.Name)) fatal(err) if match { fmt.Println(filepath.Join(dir.Path, red+file.Name+reset)) results++ } checked++ } } fmt.Println("Results:", results, "/", checked, "Time:", time.Since(elapsedTime)) }
func (p *Configvars) ProcessData(src []byte, relPath string, fileName string) ([]byte, error) { shouldProcess := true if len(p.include) > 0 { shouldProcess = false for _, match := range p.include { if matched, _ := filepath.Match(match, fileName); matched { shouldProcess = true break } } } else { for _, match := range p.exclude { if matched, _ := filepath.Match(match, fileName); matched { shouldProcess = false break } } } if shouldProcess { data := string(src) for name, value := range p.vars { data = strings.Replace(data, "{{"+name+"}}", value, -1) } return []byte(data), nil } return src, nil }
// New - instantiate minio client API with your input Config{}. func New(config Config) (CloudStorageAPI, error) { if strings.TrimSpace(config.Region) == "" || len(config.Region) == 0 { u, err := url.Parse(config.Endpoint) if err != nil { return API{}, err } match, _ := filepath.Match("*.s3*.amazonaws.com", u.Host) if match { config.isVirtualStyle = true hostSplits := strings.SplitN(u.Host, ".", 2) u.Host = hostSplits[1] } matchGoogle, _ := filepath.Match("*.storage.googleapis.com", u.Host) if matchGoogle { config.isVirtualStyle = true hostSplits := strings.SplitN(u.Host, ".", 2) u.Host = hostSplits[1] } config.Region = getRegion(u.Host) if config.Region == "google" { // Google cloud storage is signature V2 config.Signature = SignatureV2 } } config.SetUserAgent(LibraryName, LibraryVersion, runtime.GOOS, runtime.GOARCH) config.isUserAgentSet = false // default return API{apiCore{&config}}, nil }
func (rules RuleMap) MatchedRule(path string) (string, *Rule) { if rules[path] != nil { return path, rules[path] } _, name := filepath.Split(path) if rules[name] != nil { return name, rules[name] } for pat, rule := range rules { matched, err := filepath.Match(pat, path) errhandle(err) if matched { return pat, rule } } for pat, rule := range rules { matched, err := filepath.Match(pat, name) errhandle(err) if matched { return pat, rule } } return "", nil }
func deleteOldLogs() { dirname := "." + string(filepath.Separator) d, err := os.Open(dirname) if err != nil { fmt.Println(err) os.Exit(1) } defer d.Close() files, err := d.Readdir(-1) if err != nil { fmt.Println(err) os.Exit(1) } for _, file := range files { if boolvar, err := filepath.Match("log*", file.Name()); err == nil && boolvar == true { os.Remove("file.Name()") } else if boolvar, err := filepath.Match("statelog*", file.Name()); err == nil && boolvar == true { os.Remove("file.Name()") } else if boolvar, err := filepath.Match("testlog*", file.Name()); err == nil && boolvar == true { os.Remove("file.Name()") } } }
func (dsns *DataSourceNames) fsFind(pattern string) []*FsFindNode { dsns.RLock() defer dsns.RUnlock() dots := strings.Count(pattern, ".") set := make(map[string]*FsFindNode) for k, dsId := range dsns.names { if yes, _ := filepath.Match(pattern, k); yes && dots == strings.Count(k, ".") { set[k] = &FsFindNode{Name: k, Leaf: true, dsId: dsId} } } for k, _ := range dsns.prefixes { if yes, _ := filepath.Match(pattern, k); yes && dots == strings.Count(k, ".") { set[k] = &FsFindNode{Name: k, Leaf: false} } } // convert to array result := make(fsNodes, 0) for _, v := range set { result = append(result, v) } // so that results are consistently ordered, or Grafanas get confused sort.Sort(result) return result }
func (p pathPattern) Matches(path string, fi os.FileInfo) bool { if p.matchDirOnly && !fi.IsDir() { return false } if runtime.GOOS == "windows" { path = filepath.ToSlash(path) } if p.leadingSlash { res, err := filepath.Match(p.content, path) if err != nil { return false } return res } else { slashes := 0 pos := 0 for pos = len(path) - 1; pos >= 0; pos-- { if path[pos:pos+1] == "/" { slashes++ if slashes > p.depth { break } } } if slashes < p.depth { return false } checkpath := path[pos+1:] res, err := filepath.Match(p.content, checkpath) if err != nil { return false } return res } }
// isIgnoredFile returns true if 'filename' is on the exclude list. func isIgnoredFile(filename string) bool { matchFile := path.Base(filename) // OS specific ignore list. for _, ignoredFile := range ignoreFiles[runtime.GOOS] { matched, err := filepath.Match(ignoredFile, matchFile) if err != nil { panic(err) } if matched { return true } } // Default ignore list for all OSes. for _, ignoredFile := range ignoreFiles["default"] { matched, err := filepath.Match(ignoredFile, matchFile) if err != nil { panic(err) } if matched { return true } } return false }
func listFiles(path string, f os.FileInfo, err error) error { if f == nil { return err } if f.IsDir() { return nil } matched, _ := filepath.Match("disposition*.csv", filepath.Base(path)) if matched { e := handleDisposition(*output_path, path) if e != nil { fmt.Println("failed to deal with ", path) } } matched, _ = filepath.Match("lease*.csv", filepath.Base(path)) if matched { e := handleLease(*output_path, path) if e != nil { fmt.Println("failed to deal with ", path) } } matched, _ = filepath.Match("production*.csv", filepath.Base(path)) if matched { e := handleProduction(*output_path, path) if e != nil { fmt.Println("failed to deal with ", path) } } return nil }
func (f *filelist) Contains(pathname string) bool { // Ignore dot files _, filename := filepath.Split(pathname) if strings.HasPrefix(filename, ".") { return true } cwd, _ := os.Getwd() abs, _ := filepath.Abs(pathname) for _, pattern := range *f { // Also check working directory rel := path.Join(cwd, pattern) // Match pattern directly if matched, _ := filepath.Match(pattern, pathname); matched { return true } // Also check pattern relative to working directory if matched, _ := filepath.Match(rel, pathname); matched { return true } // Finally try absolute path st, e := os.Stat(rel) if os.IsExist(e) && st.IsDir() && strings.HasPrefix(abs, rel) { return true } } return false }
func shouldAnalyse(p string) (b bool) { for e := IgnoredFiles.Front(); e != nil; e = e.Next() { pattern, ok := e.Value.(string) if strings.HasSuffix(pattern, "/") && (strings.Index(pattern, "*") < 0) { if strings.HasPrefix(p, pattern) { return false } } else { if !ok { return false } matched, error := filepath.Match(pattern, p) //fmt.Println(pattern+": "+p) //fmt.Println(matched) if error != nil { return false } if matched { return false } _, file := filepath.Split(p) matched, error = filepath.Match(file, p) if matched { return false } } } return true }
func (i *GitIgnorer) Ignore(fn string, isdir bool) bool { fullpath := filepath.Join(i.basepath, i.prefix, fn) prefpath := filepath.Join(i.prefix, fn) base := filepath.Base(prefpath) dirpath := prefpath[:len(prefpath)-len(base)] if isdir && base == ".git" { return true } for _, pat := range i.globs { if strings.Index(pat, "/") != -1 { if m, _ := filepath.Match(pat, fullpath); m { return true } } else if m, _ := filepath.Match(pat, fn); m { return true } } for _, pat := range i.res { if pat.Match([]byte(fn)) { return true } } for _, dir := range i.dirs { if strings.Contains(dirpath, dir) { return true } } return false }
func loadDispoFiles(path string, f os.FileInfo, err error) error { if f == nil { return err } if f.IsDir() { return nil } file, err := os.Open(path) if err != nil { fmt.Printf("failed to open file %s", path) return nil } defer file.Close() matched, _ := filepath.Match("disposition*.gas.json", filepath.Base(path)) if matched { var detail []DisposGas jsonParser := json.NewDecoder(file) if err = jsonParser.Decode(&detail); err != nil { fmt.Printf("Fail to parsing file %s : %s", f.Name, err.Error()) } else { disposGasDetail = append(disposGasDetail, detail...) } } matched, _ = filepath.Match("disposition*.oil.json", filepath.Base(path)) if matched { var detail []DisposOil jsonParser := json.NewDecoder(file) if err = jsonParser.Decode(&detail); err != nil { fmt.Printf("Fail to parsing file %s : %s", f.Name, err.Error()) } else { disposOilDetail = append(disposOilDetail, detail...) } } return nil }
// Matches returns true if the branch matches the include patterns and // does not match any of the exclude patterns. func (b *Branch) Matches(branch string) bool { // when no includes or excludes automatically match if len(b.Include) == 0 && len(b.Exclude) == 0 { return true } // exclusions are processed first. So we can include everything and // then selectively exclude certain sub-patterns. for _, pattern := range b.Exclude { if pattern == branch { return false } if ok, _ := filepath.Match(pattern, branch); ok { return false } } for _, pattern := range b.Include { if pattern == branch { return true } if ok, _ := filepath.Match(pattern, branch); ok { return true } } return false }
// this code is necessary since, share only operates on cloud storage URLs not filesystem func isObjectKeyPresent(url string) bool { u := client.NewURL(url) path := u.Path matchS3, _ := filepath.Match("*.s3*.amazonaws.com", u.Host) if matchS3 { hostSplits := strings.SplitN(u.Host, ".", 2) path = string(u.Separator) + hostSplits[0] + u.Path } matchGcs, _ := filepath.Match("*.storage.googleapis.com", u.Host) if matchGcs { hostSplits := strings.SplitN(u.Host, ".", 2) path = string(u.Separator) + hostSplits[0] + u.Path } pathSplits := strings.SplitN(path, "?", 2) splits := strings.SplitN(pathSplits[0], string(u.Separator), 3) switch len(splits) { case 0, 1: return false case 2: return false case 3: if splits[2] == "" { return false } return true } return false }
// Return whether a given filename passes the include / exclude path filters // Only paths that are in includePaths and outside excludePaths are passed // If includePaths is empty that filter always passes and the same with excludePaths // Both path lists support wildcard matches func FilenamePassesIncludeExcludeFilter(filename string, includePaths, excludePaths []string) bool { if len(includePaths) == 0 && len(excludePaths) == 0 { return true } // For Win32, because git reports files with / separators cleanfilename := filepath.Clean(filename) if len(includePaths) > 0 { matched := false for _, inc := range includePaths { // Special case local dir, matches all (inc subpaths) if _, local := localDirSet[inc]; local { matched = true break } matched, _ = filepath.Match(inc, filename) if !matched && IsWindows() { // Also Win32 match matched, _ = filepath.Match(inc, cleanfilename) } if !matched { // Also support matching a parent directory without a wildcard if strings.HasPrefix(cleanfilename, inc+string(filepath.Separator)) { matched = true } } if matched { break } } if !matched { return false } } if len(excludePaths) > 0 { for _, ex := range excludePaths { // Special case local dir, matches all (inc subpaths) if _, local := localDirSet[ex]; local { return false } matched, _ := filepath.Match(ex, filename) if !matched && IsWindows() { // Also Win32 match matched, _ = filepath.Match(ex, cleanfilename) } if matched { return false } // Also support matching a parent directory without a wildcard if strings.HasPrefix(cleanfilename, ex+string(filepath.Separator)) { return false } } } return true }
// Run this in a go routine to listen for IR Key Press Events func (l *Router) Run() { var rb remoteButton for { event := <-l.receive match := 0 // Check for exact match rb.remote = event.Remote rb.button = event.Button if h, ok := l.handlers[rb]; ok { h(event) continue } // Check for pattern matches for k, h := range l.handlers { remoteMatched, _ := filepath.Match(k.remote, event.Remote) buttonMatched, _ := filepath.Match(k.button, event.Button) if remoteMatched && buttonMatched { h(event) match = 1 } } if match == 0 { log.Println("No match for ", event) } } }
func (handler *AccountHandler) RebuildAccountFiles() (rootBlocks []BlockSource) { // Open each dataset and check the chains dir, err := os.Open(filepath.Join(datDirectory, "account")) PanicOn(err) defer dir.Close() dirlist, err := dir.Readdir(-1) PanicOn(err) for _, info := range dirlist { // Clear all cached dataset information from the info files name := info.Name() if m, _ := filepath.Match("??????????????????????.info", name); m { // Read the accountNameH from the filename var accountNameH core.Byte128 { decoded, err := base64.RawURLEncoding.DecodeString(name[:22]) PanicOn(err) accountNameH.Set(decoded) } info := readInfoFile(accountNameH) if info != nil { info.Datasets = nil writeInfoFile(accountNameH, *info) } } } for _, info := range dirlist { name := info.Name() if m, _ := filepath.Match("??????????????????????.??????????????????????.trn", name); m { // Read the accountNameH from the filename var accountName string var accountNameH core.Byte128 { decoded, err := base64.RawURLEncoding.DecodeString(name[:22]) PanicOn(err) accountNameH.Set(decoded) info := readInfoFile(accountNameH) if info != nil { accountName = string(info.AccountName) } } datasetName := getDatasetNameFromFile(name) core.Log(core.LogDebug, "Regenerating file %s.db (%s.%s)", name[:45], accountName, datasetName) // Generate the DB file from transactions states := stateArrayFromTransactions(accountNameH, datasetName) sort.Sort(states) writeDBFile(accountNameH, datasetName, &dbStateCollection{States: states}) for _, e := range states { rootBlocks = append(rootBlocks, BlockSource{BlockID: e.State.BlockID, StateID: e.State.StateID, DatasetName: datasetName, AccountNameH: accountNameH, AccountName: string(accountName)}) } } } return rootBlocks }
// setBucketRegion fetches the region and updates config, // additionally it also constructs a proper endpoint based on that region. func (c *Config) setBucketRegion() error { u, err := url.Parse(c.Endpoint) if err != nil { return err } if !c.isVirtualHostedStyle { c.Region = getRegion(u.Host) return nil } var bucket, host string hostIndex := strings.Index(u.Host, "s3") if hostIndex == -1 { hostIndex = strings.Index(u.Host, "storage.googleapis.com") } if hostIndex > 0 { host = u.Host[hostIndex:] bucket = u.Host[:hostIndex-1] } genericGoogle, _ := filepath.Match("*.storage.googleapis.com", u.Host) if genericGoogle { // returning standard region for google for now, can be changed in future // to query for region in case it is useful c.Region = getRegion(host) return nil } genericS3, _ := filepath.Match("*.s3.amazonaws.com", u.Host) if !genericS3 { c.Region = getRegion(host) return nil } // query aws s3 for the region for case of bucketName.s3.amazonaws.com u.Host = host tempConfig := Config{} tempConfig.AccessKeyID = c.AccessKeyID tempConfig.SecretAccessKey = c.SecretAccessKey tempConfig.Endpoint = u.String() tempConfig.Region = getRegion(u.Host) tempConfig.isVirtualHostedStyle = false s3API := API{s3API{&tempConfig}} region, err := s3API.getBucketLocation(bucket) if err != nil { return err } // if region returned from getBucketLocation is null // and if genericS3 is enabled - set back to 'us-east-1'. if region == "" { if genericS3 { region = "us-east-1" } } c.Region = region c.setEndpoint(region, bucket, u.Scheme) return nil }
func match_file(fname string, files []string) (bool, error) { var matched bool var err error for _, file := range files { /* Direct match */ matched, err = filepath.Match(file, fname) if err != nil { return false, err } if matched { return true, nil } /* There are three cases that match_file can handle: * * dirname/filename * dirname/* * dirname/ * * The first case is an exact match, the second case is a * direct match of everything in that directory, and the third * is a direct match of everything in that directory and its * subdirectories. * * The first two cases are handled above, the code below is * only for that latter case, so if file doesn't end in /, * skip to the next file. */ if file[len(file)-1] != '/' { continue } /* Remove / because we add it again below */ file = file[:len(file)-1] /* Maximum tree depth, as calculated by * $(( `git ls-files | tr -d "[a-z][A-Z][0-9]\-\_\." | \ * sort -u | tail -1 | wc -c` - 1 )) * 11 */ max_depth := 11 for i := 0; i < max_depth; i++ { /* Subdirectory match */ file += "/*" if matched, err = filepath.Match(file, fname); err != nil { return false, err } if matched { return true, nil } } } return false, nil }
func (s *WatchStep) watch(root string) (*fsnotify.Watcher, error) { // Set up the filesystem watcher watcher, err := fsnotify.NewWatcher() if err != nil { return nil, err } filters := []string{ fmt.Sprintf("%s*", s.options.StepPath()), fmt.Sprintf("%s*", s.options.ProjectDownloadPath()), fmt.Sprintf("%s*", s.options.BuildPath()), ".*", "_*", } watchCount := 0 // import a .gitignore if it exists filters = append(filters, s.filterGitignore(root)...) err = filepath.Walk(root, func(path string, info os.FileInfo, err error) error { if info.IsDir() { if err != nil { return err } partialPath := filepath.Base(path) s.logger.Debugln("check path", path, partialPath) for _, pattern := range filters { matchFull, err := filepath.Match(pattern, path) if err != nil { s.logger.Warnln("Bad exclusion pattern: %s", pattern) } if matchFull { s.logger.Debugf("exclude (%s): %s", pattern, path) return filepath.SkipDir } matchPartial, _ := filepath.Match(pattern, partialPath) if matchPartial { s.logger.Debugf("exclude (%s): %s", pattern, partialPath) return filepath.SkipDir } } s.logger.Debugln("Watching:", path) watchCount = watchCount + 1 if err := watcher.Add(path); err != nil { return err } } return nil }) if err != nil { return nil, err } s.logger.Debugf("Watching %d directories", watchCount) return watcher, nil }
// EnsureDirState ensures that directory content matches expectations. // // EnsureDirState enumerates all the files in the specified directory that // match the provided pattern (glob). Each enumerated file is checked to ensure // that the contents, permissions are what is desired. Unexpected files are // removed. Missing files are created and differing files are corrected. Files // not matching the pattern are ignored. // // Note that EnsureDirState only checks for permissions and content. Other // security mechanisms, including file ownership and extended attributes are // *not* supported. // // The content map describes each of the files that are intended to exist in // the directory. Map keys must be file names relative to the directory. // Sub-directories in the name are not allowed. // // If writing any of the files fails, EnsureDirState switches to erase mode // where *all* of the files managed by the glob pattern are removed (including // those that may have been already written). The return value is an empty list // of changed files, the real list of removed files and the first error. // // If an error happens while removing files then such a file is not removed but // the removal continues until the set of managed files matching the glob is // exhausted. // // In all cases, the function returns the first error it has encountered. func EnsureDirState(dir, glob string, content map[string]*FileState) (changed, removed []string, err error) { if _, err := filepath.Match(glob, "foo"); err != nil { panic(fmt.Sprintf("EnsureDirState got invalid pattern %q: %s", glob, err)) } for baseName := range content { if filepath.Base(baseName) != baseName { panic(fmt.Sprintf("EnsureDirState got filename %q which has a path component", baseName)) } if ok, _ := filepath.Match(glob, baseName); !ok { panic(fmt.Sprintf("EnsureDirState got filename %q which doesn't match the glob pattern %q", baseName, glob)) } } // Change phase (create/change files described by content) var firstErr error for baseName, fileState := range content { filePath := filepath.Join(dir, baseName) err := EnsureFileState(filePath, fileState) if err == ErrSameState { continue } if err != nil { // On write failure, switch to erase mode. Desired content is set // to nothing (no content) changed files are forgotten and the // writing loop stops. The subsequent erase loop will remove all // the managed content. firstErr = err content = nil changed = nil break } changed = append(changed, baseName) } // Delete phase (remove files matching the glob that are not in content) matches, err := filepath.Glob(filepath.Join(dir, glob)) if err != nil { sort.Strings(changed) return changed, nil, err } for _, filePath := range matches { baseName := filepath.Base(filePath) if content[baseName] != nil { continue } err := os.Remove(filePath) if err != nil { if firstErr == nil { firstErr = err } continue } removed = append(removed, baseName) } sort.Strings(changed) sort.Strings(removed) return changed, removed, firstErr }
func validFileType(path string) bool { gz, _ := filepath.Match("*.tar.gz", filepath.Base(path)) tar, _ := filepath.Match("*.tar", filepath.Base(path)) if gz || tar { return true } else { return false } }
// Figure out if the URL is of 'virtual host' style. // Currently only supported hosts with virtual style are Amazon S3 and Google Cloud Storage. func isVirtualHostStyle(hostURL string) bool { matchS3, _ := filepath.Match("*.s3*.amazonaws.com", hostURL) if matchS3 { return true } matchGoogle, _ := filepath.Match("*.storage.googleapis.com", hostURL) if matchGoogle { return true } return false }
// Test if a metric hits this rule. // // 1. For trend related conditions, index.Score will be used. // 2. For value related conditions, metric.Value will be used. // func (rule *Rule) Test(m *Metric, idx *Index, cfg *config.Config) bool { // RLock if shared. rule.RLock() defer rule.RUnlock() // Default thresholds. thresholdMax := rule.ThresholdMax thresholdMin := rule.ThresholdMin if thresholdMax == 0 && cfg != nil { // Check defaults for p, v := range cfg.Detector.DefaultThresholdMaxs { if ok, _ := filepath.Match(p, m.Name); ok && v != 0 { thresholdMax = v break } } } if thresholdMin == 0 && cfg != nil { // Check defaults for p, v := range cfg.Detector.DefaultThresholdMins { if ok, _ := filepath.Match(p, m.Name); ok && v != 0 { thresholdMin = v break } } } // Conditions ok := false if !ok && rule.TrendUp && thresholdMax == 0 { // TrendUp ok = idx.Score > 1 } if !ok && rule.TrendUp && thresholdMax != 0 { // TrendUp And Value >= X ok = idx.Score > 1 && m.Value >= thresholdMax } if !ok && !rule.TrendUp && thresholdMax != 0 { // Value >= X ok = m.Value >= thresholdMax } if !ok && rule.TrendDown && thresholdMin == 0 { // TrendDown ok = idx.Score < -1 } if !ok && rule.TrendDown && thresholdMin != 0 { // TrendDown And Value <= X ok = idx.Score < -1 && m.Value <= thresholdMin } if !ok && !rule.TrendDown && thresholdMin != 0 { // Value <= X ok = m.Value <= thresholdMin } return ok }
// isShellPatternMatch returns whether fullpath matches the shell pattern, as defined by http://golang.org/pkg/path/filepath/#Match. As an additional special case, when the pattern looks like a basename, the last path element of fullpath is also checked against it. func isShellPatternMatch(shellPattern, fullpath string) bool { match, _ := filepath.Match(shellPattern, fullpath) if match { return true } if !strings.Contains(shellPattern, filepathSeparatorString) { match, _ := filepath.Match(shellPattern, filepath.Base(fullpath)) if match { return true } } return false }
// pathType will check the name of the of the file and based on this // decide what type of distribution it is. func pathType(path string) DistType { base := filepath.Base(path) if isTgz, _ := filepath.Match("*.tar.gz", base); isTgz { return TGZ_PATH } else if isTar, _ := filepath.Match("*.tar", base); isTar { return TAR_PATH } else if isZip, _ := filepath.Match("*.zip", base); isZip { return ZIP_PATH } else if finfo, err := os.Stat(path); err == nil && finfo.IsDir() { return DIR_PATH } return UNKNOWN_PATH }
// collectTests traverses the input directory and finds all // unit test files. func collectTests() <-chan string { c := make(chan string) go func() { defer close(c) stat, _ := os.Lstat(input) if !stat.IsDir() { _, name := filepath.Split(input) ok, err := filepath.Match("*_test.dasm", name) if !ok || err != nil { return } c <- input return } filepath.Walk(input, func(file string, info os.FileInfo, err error) error { if info.IsDir() { return nil } _, name := filepath.Split(file) ok, err := filepath.Match("*_test.dasm", name) if !ok || err != nil { return err } parts := strings.Split(file, string(filepath.Separator)) for i := range parts { if len(parts[i]) == 0 { continue } if parts[i][0] == '_' { return nil } } c <- file return nil }) }() return c }
func KeySender(cmd Cmd, key_chan chan s3.Key) { limit := 1000 marker := "" path := strings.Replace(cmd.url.Path, "/", "", 1) list, err := cmd.bucket.List("", "/", marker, limit) if err != nil { panic(err) } for _, item := range list.Contents { marker = item.Key if path != "" { matched, err := filepath.Match(path, item.Key) if err != nil { panic(err) } if !matched && path != "" { continue } } key_chan <- item } for list.IsTruncated == true { if marker == list.Marker { break } list, err = cmd.bucket.List("", "/", marker, limit) for _, item := range list.Contents { marker = item.Key matched, err := filepath.Match(path, item.Key) if err != nil { panic(err) } if !matched && path != "" { continue } key_chan <- item } } }
// check whether the given target url matches the glob url, which may have // glob wild cards in the host name. // // Examples: // globUrl=*.docker.io, targetUrl=blah.docker.io => match // globUrl=*.docker.io, targetUrl=not.right.io => no match // // Note that we don't support wildcards in ports and paths yet. func urlsMatch(globUrl *url.URL, targetUrl *url.URL) (bool, error) { globUrlParts, globPort := splitUrl(globUrl) targetUrlParts, targetPort := splitUrl(targetUrl) if globPort != targetPort { // port doesn't match return false, nil } if len(globUrlParts) != len(targetUrlParts) { // host name does not have the same number of parts return false, nil } if !strings.HasPrefix(targetUrl.Path, globUrl.Path) { // the path of the credential must be a prefix return false, nil } for k, globUrlPart := range globUrlParts { targetUrlPart := targetUrlParts[k] matched, err := filepath.Match(globUrlPart, targetUrlPart) if err != nil { return false, err } if !matched { // glob mismatch for some part return false, nil } } // everything matches return true, nil }