// NewTemp create new Temp structure func NewTemp(args ...string) (*Temp, error) { tempDir := path.Clean(Dir) if len(args) != 0 { tempDir = path.Clean(args[0]) } if !fsutil.IsExist(tempDir) { return nil, fmt.Errorf("Directory %s is not exist", tempDir) } if !fsutil.IsDir(tempDir) { return nil, fmt.Errorf("%s is not a directory", tempDir) } if !fsutil.IsWritable(tempDir) { return nil, fmt.Errorf("Directory %s is not writable", tempDir) } return &Temp{ Dir: tempDir, DirPerms: DefaultDirPerms, FilePerms: DefaultFilePerms, }, nil }
func arrangeFilename(filename string) string { if len(filename) == 0 { return filename } gopath := path.Join(os.Getenv("GOPATH"), "/src/") pwd, _ := os.Getwd() mayBeRelPath := path.Clean(path.Join(pwd, filename)) if strings.HasPrefix(mayBeRelPath, gopath) { s := mayBeRelPath[len(gopath):] if s[0:1] == "/" { filename = s[1:] } } mayBeAbsPath := path.Clean(filename) if strings.HasPrefix(mayBeAbsPath, gopath) { s := mayBeAbsPath[len(gopath):] if s[0:1] == "/" { filename = s[1:] } } filename = strings.TrimPrefix(filename, sourceDir) return filename }
/* Returns absolute path of executing file. WARNING: this must be called before changing the current directory */ func discoverExecName() string { if DEBUG { log.Print("Debug: discoverExecName\n") } f := os.Args[0] if path.IsAbs(f) { return f } wd, err := os.Getwd() if err != nil { panic(fmt.Sprintf("Getwd failed: %s", err)) } _, err = os.Stat(f) if err == nil { // relative file exists return path.Clean(path.Join(wd, f)) } // not exists? lookup in path f2, err := exec.LookPath(f) if err != nil { panic(fmt.Sprintf("lookpath failed: %s", err)) } if path.IsAbs(f2) { return f2 } return path.Clean(path.Join(wd, f2)) }
func (b *BlobStore) listBlobFiles(baseURL *url.URL) ([]blob.Blob, error) { listResp, err := b.doListRequest(baseURL) if err != nil { return nil, err } var blobFiles []blob.Blob for _, resp := range listResp.Responses { u, err := url.Parse(resp.HREF) if err != nil { return nil, err } if path.Clean(u.Path) == path.Clean(baseURL.Path) { continue } blobFiles = append(blobFiles, blob.Blob{ Path: strings.Replace(path.Clean(u.Path), "/blobs/", "", 1), Created: time.Time(resp.LastModified), Size: resp.ContentLength, }) } return blobFiles, nil }
func (v *VolumeOptions) setVolumeMount(spec *kapi.PodSpec, info *resource.Info) error { opts := v.AddOpts containers, _ := selectContainers(spec.Containers, v.Containers) if len(containers) == 0 && v.Containers != "*" { fmt.Fprintf(v.Err, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, v.Containers) return nil } for _, c := range containers { for _, m := range c.VolumeMounts { if path.Clean(m.MountPath) == path.Clean(opts.MountPath) && m.Name != v.Name { return fmt.Errorf("volume mount '%s' already exists for container '%s'", opts.MountPath, c.Name) } } for i, m := range c.VolumeMounts { if m.Name == v.Name { c.VolumeMounts = append(c.VolumeMounts[:i], c.VolumeMounts[i+1:]...) break } } volumeMount := &kapi.VolumeMount{ Name: v.Name, MountPath: path.Clean(opts.MountPath), } c.VolumeMounts = append(c.VolumeMounts, *volumeMount) } return nil }
// posixRel returns a relative path that is lexically equivalent to targpath when // joined to basepath with an intervening separator. // // That is, Join(basepath, Rel(basepath, targpath)) is equivalent to targpath itself. // On success, the returned path will always be relative to basepath, // even if basepath and targpath share no elements. // An error is returned if targpath can't be made relative to basepath or if // knowing the current working directory would be necessary to compute it. // // Copy-pasted & slightly edited from Go's lib path/filepath/path.go . // // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. func posixRel(basepath, targpath string) (string, error) { base := path.Clean(basepath) targ := path.Clean(targpath) if targ == base { return ".", nil } if base == "." { base = "" } if path.IsAbs(base) != path.IsAbs(targ) { return "", errors.New("Rel: can't make " + targ + " relative to " + base) } // Position base[b0:bi] and targ[t0:ti] at the first differing elements. bl := len(base) tl := len(targ) var b0, bi, t0, ti int for { for bi < bl && base[bi] != '/' { bi++ } for ti < tl && targ[ti] != '/' { ti++ } if targ[t0:ti] != base[b0:bi] { break } if bi < bl { bi++ } if ti < tl { ti++ } b0 = bi t0 = ti } if base[b0:bi] == ".." { return "", errors.New("Rel: can't make " + targ + " relative to " + base) } if b0 != bl { // Base elements left. Must go up before going down. seps := strings.Count(base[b0:bl], string('/')) size := 2 + seps*3 if tl != t0 { size += 1 + tl - t0 } buf := make([]byte, size) n := copy(buf, "..") for i := 0; i < seps; i++ { buf[n] = '/' copy(buf[n+1:], "..") n += 3 } if t0 != tl { buf[n] = '/' copy(buf[n+1:], targ[t0:]) } return string(buf), nil } return targ[t0:], nil }
// Uglify does the opposite of PrettifyURLPath(). // /section/name/index.html becomes /section/name.html // /section/name/ becomes /section/name.html // /section/name.html becomes /section/name.html func Uglify(in string) string { if path.Ext(in) == "" { if len(in) < 2 { return "/" } // /section/name/ -> /section/name.html return path.Clean(in) + ".html" } name, ext := fileAndExt(in, pb) if name == "index" { // /section/name/index.html -> /section/name.html d := path.Dir(in) if len(d) > 1 { return d + ext } return in } // /.xml -> /index.xml if name == "" { return path.Dir(in) + "index" + ext } // /section/name.html -> /section/name.html return path.Clean(in) }
// Return the suffix of p relative to base // Both paths must be absolute or both relative. // Pref can be empty. // If there's no such suffix, the empty string is returned. // The suffix starts with '/' and is "/" if b == p func Suffix(p, pref string) string { if len(p) == 0 { return "" } p = path.Clean(p) if pref == "" { return p } pref = path.Clean(pref) if (pref[0] == '/') != (p[0] == '/') { return "" } if pref == "." || pref == "/" { return p } np := len(p) npref := len(pref) if np < npref { return "" } switch { case !strings.HasPrefix(p, pref): return "" case np == npref: return "/" case p[npref] != '/': return "" default: return p[npref:] } }
func CommonPrefix(sep string, paths ...string) string { // Handle special cases. switch len(paths) { case 0: return "" case 1: return path.Clean(paths[0]) } c := []byte(path.Clean(paths[0])) // Ignore the first path since it's already in c. for _, v := range paths[1:] { // Clean up each path before testing it. v = path.Clean(v) // Get the length of the shorter slice. shorter := len(v) if len(v) > len(c) { shorter = len(c) } // Find the first non-common character and copy up to it into c. for i := 0; i < shorter; i++ { if v[i] != c[i] { c = c[0:i] break } } } // Correct for problem caused by prepending the actual common path to the // list of paths searched through. for _, v := range paths { if len(v) > len(c) { if strings.HasPrefix(v, string(c)) { if len(v) > len(c)+len(sep) { if v[len(c):len(c)+len(sep)] == sep { c = append(c, []byte(sep)...) break } } } } } // Remove trailing non-seperator characters. for i := len(c) - 1; i >= 0; i-- { if i+len(sep) > len(c) { continue } if string(c[i:i+len(sep)]) == sep { c = c[0:i] break } } return string(c) }
// TODO(phase1+) Move these paramaters to the API group // we need some params for testing etc, let's keep these hidden for now func SetEnvParams() *EnvParams { envParams := map[string]string{ "kubernetes_dir": "/etc/kubernetes", "host_pki_path": "/etc/kubernetes/pki", "host_etcd_path": "/var/lib/etcd", "hyperkube_image": "", "repo_prefix": "gcr.io/google_containers", "discovery_image": fmt.Sprintf("gcr.io/google_containers/kube-discovery-%s:%s", runtime.GOARCH, "1.0"), "etcd_image": "", } for k := range envParams { if v := strings.TrimSpace(os.Getenv(fmt.Sprintf("KUBE_%s", strings.ToUpper(k)))); v != "" { envParams[k] = v } } return &EnvParams{ KubernetesDir: path.Clean(envParams["kubernetes_dir"]), HostPKIPath: path.Clean(envParams["host_pki_path"]), HostEtcdPath: path.Clean(envParams["host_etcd_path"]), HyperkubeImage: envParams["hyperkube_image"], RepositoryPrefix: envParams["repo_prefix"], DiscoveryImage: envParams["discovery_image"], EtcdImage: envParams["etcd_image"], } }
func (ks *KeySuite) SubtestKey(s string, c *C) { fixed := path.Clean("/" + s) namespaces := strings.Split(fixed, "/")[1:] lastNamespace := namespaces[len(namespaces)-1] lnparts := strings.Split(lastNamespace, ":") ktype := "" if len(lnparts) > 1 { ktype = strings.Join(lnparts[:len(lnparts)-1], ":") } kname := lnparts[len(lnparts)-1] kchild := path.Clean(fixed + "/cchildd") kparent := "/" + strings.Join(append(namespaces[:len(namespaces)-1]), "/") kpath := path.Clean(kparent + "/" + ktype) kinstance := fixed + ":" + "inst" c.Log("Testing: ", NewKey(s)) c.Check(NewKey(s).String(), Equals, fixed) c.Check(NewKey(s), Equals, NewKey(s)) c.Check(NewKey(s).String(), Equals, NewKey(s).String()) c.Check(NewKey(s).Name(), Equals, kname) c.Check(NewKey(s).Type(), Equals, ktype) c.Check(NewKey(s).Path().String(), Equals, kpath) c.Check(NewKey(s).Instance("inst").String(), Equals, kinstance) c.Check(NewKey(s).Child("cchildd").String(), Equals, kchild) c.Check(NewKey(s).Child("cchildd").Parent().String(), Equals, fixed) c.Check(NewKey(s).Parent().String(), Equals, kparent) c.Check(len(NewKey(s).List()), Equals, len(namespaces)) c.Check(len(NewKey(s).Namespaces()), Equals, len(namespaces)) for i, e := range NewKey(s).List() { c.Check(namespaces[i], Equals, e) } }
// url returns a parsed url to the given path. c must not be nil func (b *Bucket) url(bPath string, c *Config) (*url.URL, error) { // parse versionID parameter from path, if included // See https://github.com/rlmcpherson/s3gof3r/issues/84 for rationale purl, err := url.Parse(bPath) if err != nil { return nil, err } var vals url.Values if v := purl.Query().Get(versionParam); v != "" { vals = make(url.Values) vals.Add(versionParam, v) bPath = strings.Split(bPath, "?")[0] // remove versionID from path } // handling for bucket names containing periods / explicit PathStyle addressing // http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html for details if strings.Contains(b.Name, ".") || c.PathStyle { return &url.URL{ Host: b.S3.Domain, Scheme: c.Scheme, Path: path.Clean(fmt.Sprintf("/%s/%s", b.Name, bPath)), RawQuery: vals.Encode(), }, nil } else { return &url.URL{ Scheme: c.Scheme, Path: path.Clean(fmt.Sprintf("/%s", bPath)), Host: path.Clean(fmt.Sprintf("%s.%s", b.Name, b.S3.Domain)), RawQuery: vals.Encode(), }, nil } }
func NewCfIgnore(text string) CfIgnore { patterns := []ignorePattern{} inclusions := []glob.Glob{} exclusions := []glob.Glob{} lines := strings.Split(text, "\n") lines = append(DefaultIgnoreFiles, lines...) for _, pattern := range lines { pattern = strings.TrimSpace(pattern) if pattern == "" { continue } if strings.HasPrefix(pattern, "!") { pattern := pattern[1:] pattern = path.Clean(pattern) inclusions = append(inclusions, globsForPattern(pattern)...) } else { pattern = path.Clean(pattern) exclusions = append(exclusions, globsForPattern(pattern)...) } } for _, glob := range exclusions { patterns = append(patterns, ignorePattern{true, glob}) } for _, glob := range inclusions { patterns = append(patterns, ignorePattern{false, glob}) } return cfIgnore(patterns) }
func main() { hostPtr := flag.String("host", "localhost", "hostname of the server") portPtr := flag.Int("port", 30000, "port that the server runs on") keyPtr := flag.String("key", "client.private", "private key for authentication") servKeyPtr := flag.String("servkey", "server.public", "public key of the server") flag.Parse() sock, err := zmq.NewSocket(zmq.REQ) condlog.Fatal(err, "Unable to create socket") defer sock.Close() initSecurity(path.Clean(*keyPtr), path.Clean(*servKeyPtr), sock) err = sock.Connect(fmt.Sprintf("tcp://%s:%d", *hostPtr, *portPtr)) condlog.Fatal(err, "Unable to connect") in := bufio.NewScanner(os.Stdin) fmt.Print("> ") for in.Scan() { hand := in.Text() results := solve(hand, sock) for _, r := range results { fmt.Println(r) } fmt.Print("> ") } condlog.Fatal(in.Err(), "Unable to read from stdin") }
func expandOutputs(output string, dirDst, usePipe bool, tasks *[]task) bool { if output != "" { output = path.Clean(filepath.ToSlash(output)) if output[len(output)-1] != '/' { info, err := os.Stat(output) if err == nil && info.Mode().IsDir() { output += "/" } } if dirDst { if output[len(output)-1] != '/' { output += "/" } if err := os.MkdirAll(output, 0777); err != nil { fmt.Fprintln(os.Stderr, "ERROR: "+err.Error()) os.Exit(1) } } else if output[len(output)-1] == '/' { output += "out" } } if verbose { if output == "" { if usePipe { fmt.Fprintln(os.Stderr, "INFO: minify to stdout") } else { fmt.Fprintln(os.Stderr, "INFO: minify to overwrite itself") } } else if output[len(output)-1] != '/' { fmt.Fprintf(os.Stderr, "INFO: minify to output file %v\n", output) } else if output == "./" { fmt.Fprintf(os.Stderr, "INFO: minify to current working directory\n") } else { fmt.Fprintf(os.Stderr, "INFO: minify to output directory %v\n", output) } } ok := true for i, t := range *tasks { if !usePipe && output == "" { (*tasks)[i].dst = (*tasks)[i].src } else { (*tasks)[i].dst = output } if len(output) > 0 && output[len(output)-1] == '/' { rel, err := filepath.Rel(t.srcDir, t.src) if err != nil { fmt.Fprintln(os.Stderr, "ERROR: "+err.Error()) ok = false } (*tasks)[i].dst = path.Clean(filepath.ToSlash(path.Join(output, rel))) } } if usePipe && len(*tasks) == 0 { *tasks = append(*tasks, task{"", "", output}) } return ok }
func newFS(r *zip.Reader) http.FileSystem { files := map[string]*zip.File{} dirs := map[string]map[string]bool{} for _, f := range r.File { name := path.Clean(f.Name) if !f.Mode().IsDir() { files[name] = f } var filename string for name != "." { name, filename = path.Split(name) name = path.Clean(name) if filename == "" { continue } if _, ok := dirs[name]; !ok { dirs[name] = map[string]bool{} } if dirs[name][filename] { break } dirs[name][filename] = true } } return fileSystem{files, dirMapsToSlices(dirs)} }
func (s *Sync) Sync(targetPath string, convert bool) error { logger.Log.Println("getting data...") tracks, err := s.db.ListAll() if err != nil { return err } logger.Log.Println(len(tracks), "tracks found.") logger.Log.Println("indexing target...") cleanPath := path.Clean(targetPath) if cleanPath != "" { cleanPath += "/" } var targetFiles []file filepath.Walk(targetPath, func(filepath string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return nil } filepath = path.Clean(filepath) if !strings.HasPrefix(filepath, cleanPath) { return errrs.New("file " + filepath + " is not within " + cleanPath) } filepath = strings.TrimPrefix(filepath, cleanPath) targetFiles = append(targetFiles, file{filepath, info}) return nil }) logger.Log.Println(len(targetFiles), "files in target") return core.ErrorNotImplemented }
// Rename renames (moves) a file. // Handles to the oldpath persist but might return oldpath if Name() is called. func (fs *MemFS) Rename(oldpath, newpath string) error { fs.lock.Lock() defer fs.lock.Unlock() // OldPath oldpath = filepath.Clean(oldpath) fiOldParent, fiOld, err := fs.fileInfo(oldpath) if err != nil { return &os.PathError{"rename", oldpath, err} } if fiOld == nil { return &os.PathError{"rename", oldpath, os.ErrNotExist} } newpath = filepath.Clean(newpath) fiNewParent, fiNew, err := fs.fileInfo(newpath) if err != nil { return &os.PathError{"rename", newpath, err} } if fiNew != nil { return &os.PathError{"rename", newpath, os.ErrExist} } newBase := filepath.Base(newpath) // Relink delete(fiOldParent.childs, fiOld.name) fiOld.parent = fiNewParent fiOld.name = newBase fiOld.modTime = time.Now() fiNewParent.childs[fiOld.name] = fiOld return nil }
func handleReq(w http.ResponseWriter, r *http.Request) { //Is_Ajax := strings.Contains(r.Header.Get("Accept"), "application/json") if r.Method == "PUT" { AjaxUpload(w, r) return } if r.Method == "POST" { WebCommandHandler(w, r) return } log.Print("Request: ", r.RequestURI) // See bug #9. For some reason, don't arrive index.html, when asked it.. if strings.HasSuffix(r.URL.Path, "/") && r.FormValue("get_file") != "true" { log.Printf("Index dir %s", r.URL.Path) handleDir(w, r) } else { log.Printf("downloading file %s", path.Clean(dir+r.URL.Path)) r.Header.Del("If-Modified-Since") http.ServeFile(w, r, path.Clean(dir+r.URL.Path)) //http.ServeContent(w, r, r.URL.Path) //w.Write([]byte("this is a test inside file handler")) } }
func S(key string, f cacheSetFunc, w http.ResponseWriter, r *http.Request, timeout int) { if base == "" { Lcw(key, f, w, r, timeout) return } fb, fe := f() if fe != nil { http.NotFound(w, r) return } w.Write(fb) p := path.Clean(base + "/" + key + ".html") pb := path.Clean(base + "/" + key + ".tmp") e := ioutil.WriteFile(pb, fb, 0644) if e != nil { os.MkdirAll(path.Dir(pb), os.ModeDir|0755) e = ioutil.WriteFile(pb, fb, 0644) if e != nil { printError(e) } } if e == nil { os.Rename(pb, p) go func() { <-time.After(time.Duration(int64(time.Second) * int64(timeout))) os.Remove(p) }() } }
// Check if endpoint is in expected syntax by valid scheme/path across all platforms. func checkEndpointURL(endpointURL *url.URL) (err error) { // applicable to all OS. if endpointURL.Scheme == "" || endpointURL.Scheme == "http" || endpointURL.Scheme == "https" { urlPath := path.Clean(endpointURL.Path) if urlPath == "" || urlPath == "." || urlPath == "/" || urlPath == `\` { err = fmt.Errorf("Empty or root path is not allowed") } return err } // Applicable to Windows only. if runtime.GOOS == "windows" { // On Windows, endpoint can be a path with drive eg. C:\Export and its URL.Scheme is 'C'. // Check if URL.Scheme is a single letter alphabet to represent a drive. // Note: URL.Parse() converts scheme into lower case always. if len(endpointURL.Scheme) == 1 && endpointURL.Scheme[0] >= 'a' && endpointURL.Scheme[0] <= 'z' { // If endpoint is C:\ or C:\export, URL.Path does not have path information like \ or \export // hence we directly work with endpoint. urlPath := strings.SplitN(path.Clean(endpointURL.String()), ":", 2)[1] if urlPath == "" || urlPath == "." || urlPath == "/" || urlPath == `\` { err = fmt.Errorf("Empty or root path is not allowed") } return err } } return fmt.Errorf("Invalid scheme") }
func parseSite(line string) *get3w.Site { arrOuter := regexOuter.FindStringSubmatch(line) if len(arrOuter) != 3 || arrOuter[0] == "" || arrOuter[1] == "" || arrOuter[2] == "" { return nil } arrInner := regexInner.FindStringSubmatch(arrOuter[2]) if len(arrInner) != 4 || arrInner[0] == "" { return nil } name, p, url := arrOuter[1], "", "" if arrInner[3] == "" { p, url = path.Clean(strings.TrimSpace(arrInner[1])), strings.TrimSpace(arrInner[2]) } else { p = path.Clean(strings.TrimSpace(arrInner[3])) } if p == "." { p = "" } if url == "" { url = getSiteURL(p) } if name == "" { return nil } return &get3w.Site{ Name: name, Path: p, URL: url, } }
/* Returns absolute path of executing file. WARNING: this must be called before changing the current directory */ func (be *BdsExec) discoverExecName() string { if DEBUG { log.Printf("Debug: discoverExecName (%s)\n", be.args[0]) } f := be.args[0] if path.IsAbs(f) { return f } wd, err := os.Getwd() if err != nil { panic(fmt.Sprintf("discoverExecName: Getwd failed '%s'", err)) } _, err = os.Stat(f) if err == nil { // Relative file exists return path.Clean(path.Join(wd, f)) } f2, err := exec.LookPath(f) if err != nil { panic(fmt.Sprintf("discoverExecName: Lookpath failed '%s'", err)) } if path.IsAbs(f2) { return f2 } return path.Clean(path.Join(wd, f2)) }
func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { pr, pw := io.Pipe() go func() { tarReader := tar.NewReader(in) tarWriter := tar.NewWriter(pw) defer in.Close() hasRootFS := false for { hdr, err := tarReader.Next() if err == io.EOF { if !hasRootFS { pw.CloseWithError(errors.Wrap(err, "no rootfs found")) return } // Signals end of archive. tarWriter.Close() pw.Close() return } if err != nil { pw.CloseWithError(errors.Wrap(err, "failed to read from tar")) return } content := io.Reader(tarReader) name := path.Clean(hdr.Name) if path.IsAbs(name) { name = name[1:] } if name == configFileName { dt, err := ioutil.ReadAll(content) if err != nil { pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName)) return } *config = dt } if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName { hdr.Name = path.Clean(path.Join(parts[1:]...)) if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") { hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:] } if err := tarWriter.WriteHeader(hdr); err != nil { pw.CloseWithError(errors.Wrap(err, "error writing tar header")) return } if _, err := pools.Copy(tarWriter, content); err != nil { pw.CloseWithError(errors.Wrap(err, "error copying tar data")) return } hasRootFS = true } else { io.Copy(ioutil.Discard, content) } } }() return pr }
// Setup configures a *build.Context to use the given VFS // as its filesystem. func Setup(ctx *build.Context, fs vfs.VFS) { ctx.JoinPath = path.Join ctx.SplitPathList = filepath.SplitList ctx.IsAbsPath = func(p string) bool { return p != "" && p[0] == '/' } ctx.IsDir = func(p string) bool { stat, err := fs.Stat(p) return err == nil && stat.IsDir() } ctx.HasSubdir = func(root, dir string) (string, bool) { root = path.Clean(root) if !strings.HasSuffix(root, separator) { root += separator } dir = path.Clean(dir) if !strings.HasPrefix(dir, root) { return "", false } return dir[len(root):], true } ctx.ReadDir = fs.ReadDir ctx.OpenFile = func(p string) (io.ReadCloser, error) { return fs.Open(p) } }
func expandRoot(root string) string { if rroot := []rune(root); rroot[0] == '.' { // Get the current directory cd, err := os.Getwd() // Check for an error if err != nil { panic(err) } //if // Check if the path is simple switch root { case ".": return cd case "..": return path.Dir(cd) } //switch // Check if the second is also a '.' if rroot[1] == '.' { return path.Clean(path.Join(path.Dir(cd), root[2:])) } //if // Return the current directory and everything after the first '.' return path.Clean(path.Join(cd, root[1:])) } //if return path.Clean(root) } //expandRoot
// collectData collects requested downwardAPI in data map. // Map's key is the requested name of file to dump // Map's value is the (sorted) content of the field to be dumped in the file. func (d *downwardAPIVolume) collectData() (map[string][]byte, error) { errlist := []error{} data := make(map[string][]byte) for _, fileInfo := range d.items { if fileInfo.FieldRef != nil { if values, err := fieldpath.ExtractFieldPathAsString(d.pod, fileInfo.FieldRef.FieldPath); err != nil { glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error()) errlist = append(errlist, err) } else { data[path.Clean(fileInfo.Path)] = []byte(sortLines(values)) } } else if fileInfo.ResourceFieldRef != nil { containerName := fileInfo.ResourceFieldRef.ContainerName nodeAllocatable, err := d.plugin.host.GetNodeAllocatable() if err != nil { errlist = append(errlist, err) } else if values, err := fieldpath.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, d.pod, containerName, nodeAllocatable); err != nil { glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error()) errlist = append(errlist, err) } else { data[path.Clean(fileInfo.Path)] = []byte(sortLines(values)) } } } return data, utilerrors.NewAggregate(errlist) }
// Prepare configures the output directory or returns an error if it already exists. func (c *OutputConfig) Prepare(ctx *interpolate.Context, pc *common.PackerConfig) []error { if c.OutputDir == "" { c.OutputDir = fmt.Sprintf("output-%s", pc.PackerBuildName) } var errs []error if path.IsAbs(c.OutputDir) { c.OutputDir = path.Clean(c.OutputDir) } else { wd, err := os.Getwd() if err != nil { errs = append(errs, err) } c.OutputDir = path.Clean(path.Join(wd, c.OutputDir)) } if !pc.PackerForce { if _, err := os.Stat(c.OutputDir); err == nil { errs = append(errs, fmt.Errorf( "Output directory '%s' already exists. It must not exist.", c.OutputDir)) } } return errs }
// importPathsNoDotExpansion returns the import paths to use for the given // command line, but it does no ... expansion. // $GOROOT/src/cmd/main.go:332 func importPathsNoDotExpansion(args []string) []string { if len(args) == 0 { return []string{"."} } var out []string for _, a := range args { // Arguments are supposed to be import paths, but // as a courtesy to Windows developers, rewrite \ to / // in command-line arguments. Handles .\... and so on. if filepath.Separator == '\\' { a = strings.Replace(a, `\`, `/`, -1) } // Put argument in canonical form, but preserve leading ./. if strings.HasPrefix(a, "./") { a = "./" + pathpkg.Clean(a) if a == "./." { a = "." } } else { a = pathpkg.Clean(a) } if a == "all" || a == "std" || a == "cmd" { out = append(out, allPackages(a)...) continue } out = append(out, a) } return out }
// Pattern is a string like "/story/:storyId/:partId" func (r request) matchPath(pattern string) *map[string]string { pattern = path.Clean(pattern) url := path.Clean(r.req.URL.String()) patternSplit := strings.Split(pattern, "/") if patternSplit[0] != "" { panic(errors.New("Bad pattern")) } urlSplit := strings.Split(url, "/") // Process differences between dev_appserver and live code: // 1. prod servers include the http://foo.appspot.com prefix if strings.HasPrefix(urlSplit[0], "http") { urlSplit = urlSplit[1:] } // 2. prod servers don't include the trailing slash if len(urlSplit) == 1 { urlSplit = append(urlSplit, "") } if len(urlSplit) < len(patternSplit) || (len(urlSplit) > len(patternSplit) && !strings.HasSuffix(url, "/*")) { return nil } result := make(map[string]string) for i := 1; i < len(patternSplit); i++ { // ignore i=0 (i.e. host/domain) if patternSplit[i] == "*" { if i != len(patternSplit)-1 { panic(errors.New("Bad pattern")) } result["*"] = strings.Join(urlSplit[i:], "/") } else if strings.HasPrefix(patternSplit[i], ":") { result[patternSplit[i][1:]] = urlSplit[i] } else if patternSplit[i] != urlSplit[i] { return nil } } return &result }