func generateJSON(path string, distPath string) { var obj map[string]interface{} baseURL := fmt.Sprintf("%v://%v%v", *protocol, *host, path) fileFmt := "%v/%v" time.Sleep(100 * time.Millisecond) _, jsonD, _ := cr.HTTPDataMethod(cr.HTTPGetStr, baseURL, "") err := json.Unmarshal([]byte(jsonD), &obj) if err != nil { log.Fatal(err) } var prefix, p1, file string for i, v := range obj["apis"].([]interface{}) { casted := v.(map[string]interface{}) url1 := fmt.Sprintf("%v%v", baseURL, casted["path"]) _, u, _ := cr.HTTPDataMethod(cr.HTTPGetStr, url1, "") p1, file = filepath.Split(fmt.Sprintf("%v", casted["path"])) if i == 0 { prefix = strings.Replace(p1, "/", distPath, 1) err := os.MkdirAll(prefix, 0777) if err != nil { log.Fatalf("Fatal error while generating static JSON path: %v", err) } } ioutil.WriteFile(fmt.Sprintf(fileFmt, prefix, file), []byte(u), 0777) } _, file = filepath.Split(path) prefix1 := strings.Replace(p1, "/", "/../", 1) obj["apiVersion"] = "2.02" a := obj["info"].(map[string]interface{}) a["title"] = "Libsecurity API" j, _ := json.Marshal(obj) newS := strings.Replace(string(j), p1, prefix1, -1) ioutil.WriteFile(fmt.Sprintf(fileFmt, distPath, file), []byte(newS), 0777) }
func (l *Location) Navigate(path string) *Location { r := New() if is_remote(path) { u, _ := url.Parse(path) dir, filename := filepath.Split(u.Path) u.Path = dir r.Dir = u.String() r.Filename = filename } else if is_relative(path) { dir, filename := filepath.Split(path) if is_remote(l.Dir) { u, _ := url.Parse(l.Dir) u.Path = filepath.Join(u.Path, dir) + "/" r.Dir = u.String() } else { r.Dir = filepath.Join(l.Dir, dir) + "/" } r.Filename = filename } else { dir, filename := filepath.Split(path) r.Dir = dir r.Filename = filename } return r }
// NewTagEntry creates a new ArchiveEntry for the given values. func NewTagEntry(file, title string) *TagEntry { url := file + ".html" // Not all pages have a metadata title defined. // Use the page url instead, after we prettify it a bit. if len(title) == 0 { title = file if strings.HasSuffix(title, "/index") { title, _ = filepath.Split(title) } if title == "/" { title = "Home" } } // If the url ends with /index.html, we strip off the index part. // It just takes up unnecessary bytes in the output and // `foo/bar/` looks better than `foo/bar/index.html`. if strings.HasSuffix(url, "/index.html") { url, _ = filepath.Split(url) } te := new(TagEntry) te.Url = template.HTMLAttr(url) te.Title = template.HTMLAttr(title) return te }
// Link creates the ChildDirs and ChildFiles links in all EmbeddedDir's func (e *EmbeddedBox) Link() { for path, ed := range e.Dirs { fmt.Println(path) ed.ChildDirs = make([]*EmbeddedDir, 0) ed.ChildFiles = make([]*EmbeddedFile, 0) } for path, ed := range e.Dirs { parentDirpath, _ := filepath.Split(path) if strings.HasSuffix(parentDirpath, "/") { parentDirpath = parentDirpath[:len(parentDirpath)-1] } parentDir := e.Dirs[parentDirpath] if parentDir == nil { panic("parentDir `" + parentDirpath + "` is missing in embedded box") } parentDir.ChildDirs = append(parentDir.ChildDirs, ed) } for path, ef := range e.Files { dirpath, _ := filepath.Split(path) if strings.HasSuffix(dirpath, "/") { dirpath = dirpath[:len(dirpath)-1] } dir := e.Dirs[dirpath] if dir == nil { panic("dir `" + dirpath + "` is missing in embedded box") } dir.ChildFiles = append(dir.ChildFiles, ef) } }
// genCertPair generates a key/cert pair to the paths provided. func genCertPair(certFile, keyFile string) error { log.Infof("Generating TLS certificates...") // Create directories for cert and key files if they do not yet exist. certDir, _ := filepath.Split(certFile) keyDir, _ := filepath.Split(keyFile) if err := os.MkdirAll(certDir, 0700); err != nil { return err } if err := os.MkdirAll(keyDir, 0700); err != nil { return err } // Generate cert pair. org := "btcwallet autogenerated cert" validUntil := time.Now().Add(10 * 365 * 24 * time.Hour) cert, key, err := btcutil.NewTLSCertPair(org, validUntil, nil) if err != nil { return err } // Write cert and key files. if err = ioutil.WriteFile(certFile, cert, 0666); err != nil { return err } if err = ioutil.WriteFile(keyFile, key, 0600); err != nil { os.Remove(certFile) return err } log.Infof("Done generating TLS certificates") return nil }
func downloadFile(furl string, ignoreTLS bool, cRoot string) ([]byte, error) { parsedURL, err := url.Parse(furl) // explicit file scheme ? if parsedURL.Scheme == "file" { return ioutil.ReadFile(parsedURL.RequestURI()) } else if cRoot != "" { // check cache on local file system path, name := filepath.Split(parsedURL.RequestURI()) fname := filepath.Join(cRoot+path, name) log.Println("attempting to load from cache: ", fname) b, err := ioutil.ReadFile(fname) if err == nil { log.Println("file loaded from cache as: ", fname) return b, nil } else if os.IsNotExist(err) { log.Println("not in cache: fname") } else { log.Println("cache load failed: ", err) } } tr := &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: ignoreTLS, }, Dial: dialTimeout, } client := &http.Client{Transport: tr} resp, err := client.Get(furl) if err != nil { return nil, err } defer resp.Body.Close() data, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } // cache on local file system if cRoot != "" { path, name := filepath.Split(parsedURL.RequestURI()) fp := cRoot + path err := os.MkdirAll(fp, 0777) if err != nil { return nil, err } fname := filepath.Join(fp, name) err = ioutil.WriteFile(fname, data, 0644) if err != nil { return nil, err } log.Println("file saved in cache as: ", fname) } return data, nil }
// MoveTo moves named file or directory to trash. func MoveTo(name string) error { name = filepath.Clean(name) home := os.Getenv("HOME") dir, file := filepath.Split(name) target := filepath.Join(home, ".Trash", file) // TODO: If target name exists in Trash, come up with a unique one (perhaps append a timestamp) instead of overwriting. // TODO: Support OS X "Put Back". Figure out how it's done and do it. err := os.Rename(name, target) if err != nil { return err } // If directory became empty, remove it (recursively up). for { // Ensure it's an empty directory. if dirEntries, err := ioutil.ReadDir(dir); err != nil || len(dirEntries) != 0 { break } // Remove directory if it's (now) empty. err := os.Remove(dir) if err != nil { break } dir, _ = filepath.Split(dir) } return nil }
// NewArchiveEntry creates a new ArchiveEntry for the given values. func NewArchiveEntry(file, title, desc string, stamp time.Time) *ArchiveEntry { url := file + ".html" // Not all pages have a metadata title defined. // Use the page url instead, after we prettify it a bit. if len(title) == 0 { title = file if strings.HasSuffix(title, "/index") { title, _ = filepath.Split(title) } if title == "/" { title = "Home" } } // If the url ends with /index.html, we strip off the index part. // It just takes up unnecessary bytes in the output and // `foo/bar/` looks better than `foo/bar/index.html`. if strings.HasSuffix(url, "/index.html") { url, _ = filepath.Split(url) } ae := new(ArchiveEntry) ae.Url = template.HTMLAttr(url) ae.Title = template.HTMLAttr(title) ae.Description = template.HTMLAttr(desc) ae.Stamp = stamp return ae }
// GetMp3Tags returns a FileTags struct with // all the information obtained from the tags in the // MP3 file. // Includes the Artist, Album and Song and defines // default values if the values are missing. // If the tags are missing, the default values will // be stored on the file. // If the tags are obtained correctly the first // return value will be nil. func GetMp3Tags(path string) (error, FileTags) { mp3File, err := id3.Open(path) if err != nil { _, file := filepath.Split(path) extension := filepath.Ext(file) songTitle := file[0 : len(file)-len(extension)] return err, FileTags{songTitle, "unknown", "unknown"} } defer mp3File.Close() title := mp3File.Title() if title == "" || title == "unknown" { _, file := filepath.Split(path) extension := filepath.Ext(file) title = file[0 : len(file)-len(extension)] mp3File.SetTitle(title) } artist := mp3File.Artist() if artist == "" { artist = "unknown" mp3File.SetArtist(artist) } album := mp3File.Album() if album == "" { album = "unknown" mp3File.SetAlbum(album) } ft := FileTags{title, artist, album} return nil, ft }
func BenchmarkCFuseThreadedStat(b *testing.B) { b.StopTimer() wd, _ := os.Getwd() fileList := wd + "/testpaths.txt" lines := ReadLines(fileList) unique := map[string]int{} for _, l := range lines { unique[l] = 1 dir, _ := filepath.Split(l) for dir != "/" && dir != "" { unique[dir] = 1 dir = filepath.Clean(dir) dir, _ = filepath.Split(dir) } } out := []string{} for k := range unique { out = append(out, k) } f, err := ioutil.TempFile("", "") if err != nil { b.Fatalf("failed: %v", err) } sort.Strings(out) for _, k := range out { f.Write([]byte(fmt.Sprintf("/%s\n", k))) } f.Close() mountPoint := testutil.TempDir() cmd := exec.Command(wd+"/cstatfs", "-o", "entry_timeout=0.0,attr_timeout=0.0,ac_attr_timeout=0.0,negative_timeout=0.0", mountPoint) cmd.Env = append(os.Environ(), fmt.Sprintf("STATFS_INPUT=%s", f.Name())) cmd.Start() bin, err := exec.LookPath("fusermount") if err != nil { b.Fatalf("failed: %v", err) } stop := exec.Command(bin, "-u", mountPoint) if err != nil { b.Fatalf("failed: %v", err) } defer stop.Run() time.Sleep(100 * time.Millisecond) os.Lstat(mountPoint) threads := runtime.GOMAXPROCS(0) if err := TestingBOnePass(b, threads, fileList, mountPoint); err != nil { log.Fatalf("TestingBOnePass %v", err) } }
func trimWebPath(p string) string { d, f := filepath.Split(p) clean := strings.TrimSuffix(d, string(filepath.Separator)) _, f1 := filepath.Split(clean) if f == strings.TrimSuffix(f1, filepath.Ext(clean)) { return clean } return p }
func (fc *FileChooser) up() { path := fc.filename.GetText() dir, file := filepath.Split(path) if file == "" { dir, file = filepath.Split(path[0 : len(path)-1]) } fc.filename.SetText(dir) fc.setList() }
func (ftp *FTP) uploadDirTree(localDir string, excludedDirs sort.StringSlice, callback Callback, n *int) (err error) { _, dir := filepath.Split(localDir) ftp.writeInfo("The directory where to upload is:", dir) if _, err = ftp.Mkd(dir); err != nil { return } _, err = ftp.Cwd(dir) if err != nil { ftp.writeInfo(fmt.Sprintf("An error occurred while CWD, err: %s.", err)) return } defer ftp.Cwd("..") globSearch := filepath.Join(localDir, "*") ftp.writeInfo("Looking up files in", globSearch) var files []string files, err = filepath.Glob(globSearch) // find all files in folder if err != nil { return } ftp.writeInfo("Found", len(files), "files") sort.Strings(files) // sort by name for _, s := range files { _, fname := filepath.Split(s) // find file name localPath := filepath.Join(localDir, fname) ftp.writeInfo("Uploading file or dir:", localPath) var f os.FileInfo if f, err = os.Stat(localPath); err != nil { return } if !f.IsDir() { err = ftp.UploadFile(fname, localPath, false, callback) // always binary upload if err != nil { return } *n += 1 // increment } else { if len(excludedDirs) > 0 { ftp.writeInfo("Checking folder name:", fname) lfname := strings.ToLower(fname) idx := sort.SearchStrings(excludedDirs, lfname) if idx < len(excludedDirs) && excludedDirs[idx] == lfname { ftp.writeInfo("Excluding folder:", s) continue } } if err = ftp.uploadDirTree(localPath, excludedDirs, callback, n); err != nil { return } } } return }
func BenchmarkCFuseThreadedStat(b *testing.B) { b.StopTimer() lines := GetTestLines() unique := map[string]int{} for _, l := range lines { unique[l] = 1 dir, _ := filepath.Split(l) for dir != "/" && dir != "" { unique[dir] = 1 dir = filepath.Clean(dir) dir, _ = filepath.Split(dir) } } out := []string{} for k := range unique { out = append(out, k) } f, err := ioutil.TempFile("", "") CheckSuccess(err) sort.Strings(out) for _, k := range out { f.Write([]byte(fmt.Sprintf("/%s\n", k))) } f.Close() mountPoint, _ := ioutil.TempDir("", "stat_test") wd, _ := os.Getwd() cmd := exec.Command(wd+"/cstatfs", "-o", "entry_timeout=0.0,attr_timeout=0.0,ac_attr_timeout=0.0,negative_timeout=0.0", mountPoint) cmd.Env = append(os.Environ(), fmt.Sprintf("STATFS_INPUT=%s", f.Name()), fmt.Sprintf("STATFS_DELAY_USEC=%d", delay/time.Microsecond)) cmd.Start() bin, err := exec.LookPath("fusermount") CheckSuccess(err) stop := exec.Command(bin, "-u", mountPoint) CheckSuccess(err) defer stop.Run() for i, l := range lines { lines[i] = filepath.Join(mountPoint, l) } // Wait for the daemon to mount. time.Sleep(200 * time.Millisecond) ttl := time.Millisecond * 100 threads := runtime.GOMAXPROCS(0) results := TestingBOnePass(b, threads, time.Duration((ttl*12)/10), lines) AnalyzeBenchmarkRuns("CFuse", results) }
func (d *Driver) extractKernelImages() error { log.Debugf("Mounting %s", isoFilename) volumeRootDir := d.ResolveStorePath(isoMountPath) err := hdiutil("attach", d.ResolveStorePath(isoFilename), "-mountpoint", volumeRootDir) if err != nil { return err } log.Debugf("Extracting Kernel Options...") if err := d.extractKernelOptions(); err != nil { return err } defer func() error { log.Debugf("Unmounting %s", isoFilename) return hdiutil("detach", volumeRootDir) }() if d.BootKernel == "" && d.BootInitrd == "" { err = filepath.Walk(volumeRootDir, func(path string, f os.FileInfo, err error) error { if kernelRegexp.MatchString(path) { d.BootKernel = path _, d.Vmlinuz = filepath.Split(path) } if strings.Contains(path, "initrd") { d.BootInitrd = path _, d.Initrd = filepath.Split(path) } return nil }) } if err != nil { if err != nil || d.BootKernel == "" || d.BootInitrd == "" { err = fmt.Errorf("==== Can't extract Kernel and Ramdisk file ====") return err } } dest := d.ResolveStorePath(d.Vmlinuz) log.Debugf("Extracting %s into %s", d.BootKernel, dest) if err := mcnutils.CopyFile(d.BootKernel, dest); err != nil { return err } dest = d.ResolveStorePath(d.Initrd) log.Debugf("Extracting %s into %s", d.BootInitrd, dest) if err := mcnutils.CopyFile(d.BootInitrd, dest); err != nil { return err } return nil }
// DecodeStorePath extracts the database and retention policy names // from a given shard or WAL path. func DecodeStorePath(shardOrWALPath string) (database, retentionPolicy string) { // shardOrWALPath format: /maybe/absolute/base/then/:database/:retentionPolicy/:nameOfShardOrWAL // Discard the last part of the path (the shard name or the wal name). path, _ := filepath.Split(filepath.Clean(shardOrWALPath)) // Extract the database and retention policy. path, rp := filepath.Split(filepath.Clean(path)) _, db := filepath.Split(filepath.Clean(path)) return db, rp }
func (f *p4Folder) fetch() bool { f.mu.Lock() defer f.mu.Unlock() if f.files != nil { return true } var err error path := "//" + f.path if !strings.HasSuffix(path, "/") { path += "/" } path += fmt.Sprintf("*@%d", f.change) folders, err := f.fs.p4.Dirs([]string{path}) if err != nil { log.Printf("fetch: %v", err) return false } files, err := f.fs.p4.Fstat([]string{path}) if err != nil { log.Printf("fetch: %v", err) return false } f.files = map[string]*p4.Stat{} done := map[string]bool{} for _, r := range files { stat, ok := r.(*p4.Stat) if !ok { continue } _, base := filepath.Split(stat.DepotFile) if done[base] { continue } done[base] = true if stat.HeadAction != "delete" { f.files[base] = stat } } f.folders = map[string]bool{} for _, r := range folders { if dir, ok := r.(*p4.Dir); ok { _, base := filepath.Split(dir.Dir) f.folders[base] = true } } return true }
func BenchmarkCFuseThreadedStat(b *testing.B) { log.Println("benchmarking CFuse") lines := GetTestLines() unique := map[string]int{} for _, l := range lines { unique[l] = 1 dir, _ := filepath.Split(l) for dir != "/" && dir != "" { unique[dir] = 1 dir = filepath.Clean(dir) dir, _ = filepath.Split(dir) } } out := []string{} for k, _ := range unique { out = append(out, k) } f, err := ioutil.TempFile("", "") CheckSuccess(err) sort.Strings(out) for _, k := range out { f.Write([]byte(fmt.Sprintf("/%s\n", k))) } f.Close() log.Println("Written:", f.Name()) mountPoint, _ := ioutil.TempDir("", "stat_test") wd, _ := os.Getwd() cmd := exec.Command(wd+"/cstatfs", mountPoint) cmd.Env = append(os.Environ(), fmt.Sprintf("STATFS_INPUT=%s", f.Name())) cmd.Start() bin, err := exec.LookPath("fusermount") CheckSuccess(err) stop := exec.Command(bin, "-u", mountPoint) CheckSuccess(err) defer stop.Run() for i, l := range lines { lines[i] = filepath.Join(mountPoint, l) } // Wait for the daemon to mount. time.Sleep(0.2e9) ttl := 1.0 log.Println("N = ", b.N) threads := runtime.GOMAXPROCS(0) results := TestingBOnePass(b, threads, ttl*1.2, lines) AnalyzeBenchmarkRuns(results) }
func main() { flag.Usage = func() { _, program := filepath.Split(os.Args[0]) fmt.Fprintf(os.Stderr, "usage: %s [OPTIONS] [FILE]\n", program) flag.PrintDefaults() } shouldCompress := flag.Bool("compress", false, "compress output using gzip") flag.Parse() if flag.NArg() != 1 { flag.Usage() os.Exit(2) } root, err := filepath.Abs(flag.Arg(0)) if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(-1) } f, err := os.OpenFile(root, os.O_RDONLY, os.ModeDir) if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(-1) } output := io.WriteCloser(os.Stdout) if *shouldCompress { output = gzip.NewWriter(output) } _, rootArchivePath := filepath.Split(root) ctx := &creationContext{rootArchivePath, tar.NewWriter(output), make(map[string]bool)} err = ctx.addDir(root, rootArchivePath, f, true) if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(-1) } err = ctx.archive.Close() if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(-1) } err = output.Close() if err != nil { fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error()) os.Exit(-1) } }
func (d *DB) createFileTree() error { if _, err := d.Exec(createFilesTable); err != nil { return fmt.Errorf("error creating files table: %v", err) } rs, err := d.Query("SELECT ticket FROM Nodes WHERE node_kind = 'file';") if err != nil { return fmt.Errorf("error creating files query: %v", err) } insert, err := d.Prepare(`INSERT INTO Files (corpus, root, path, ticket, file) VALUES ($1, $2, $3, $4, $5);`) if err != nil { return fmt.Errorf("error preparing statement: %v", err) } for rs.Next() { var ticket string if err := rs.Scan(&ticket); err != nil { return fmt.Errorf("scan error: %v", err) } uri, err := kytheuri.Parse(ticket) if err != nil { return fmt.Errorf("error parsing node ticket %q: %v", ticket, err) } path, _ := filepath.Split(filepath.Join("/", uri.Path)) if _, err := insert.Exec(uri.Corpus, uri.Root, path, ticket, true); err != nil { return fmt.Errorf("error inserting file: %v", err) } uri.Signature, uri.Language = "", "" for { uri.Path = path path, _ = filepath.Split(strings.TrimSuffix(path, "/")) if path == "" { break } if _, err := insert.Exec(uri.Corpus, uri.Root, path, uri.String(), false); err != nil { if err, ok := err.(*pq.Error); ok && err.Code == pqUniqueViolationErrorCode { // Since we've found the current directory, we can stop recursively // adding parent directories now break } return fmt.Errorf("error inserting directory: %v", err) } } } return nil }
func NewFile(relpath string) *File { f := &File{ relpath: relpath, } f.dir, _ = filepath.Split(f.relpath) _, f.logicalName = filepath.Split(f.relpath) f.ext = strings.TrimPrefix(filepath.Ext(f.LogicalName()), ".") f.section = helpers.GuessSection(f.Dir()) f.uniqueID = helpers.Md5String(f.LogicalName()) return f }
func reloader(reload chan int) { watcher, err := fsnotify.NewWatcher() if err != nil { die("failed creating watcher (%s)", err.Error()) } defer watcher.Close() bindir, err := Arg0Dir() if err != nil { die("failed getting path to binary (%s)", err.Error()) } pprint("monitoring %s", bindir) me := Arg0Base() err = watcher.Add(bindir) if err != nil { die("failed watching binary (%s)", err.Error()) } watchcfg := false cfgfile, cfgdir := "", "" if len(os.Args) == 2 { cfgpath, err := CfgPath() if err != nil { die("error getting configuration path (%s)", err.Error()) } cfgdir, cfgfile = filepath.Split(cfgpath) err = watcher.Add(cfgdir) if err != nil { die("failed watching configuration (%s)", err.Error()) } cfgdir = cfgdir[:len(cfgdir)-1] pprint("monitoring %s", cfgdir) watchcfg = true } for { select { case event := <-watcher.Events: if isReloadEvent(event) { dir, base := filepath.Split(event.Name) dir = dir[:len(dir)-1] if dir == bindir && base == me { reload <- BinReload } else if watchcfg && dir == cfgdir && base == cfgfile { reload <- ConfigReload } } case err := <-watcher.Errors: die("failed gettiing events (%s)", err.Error()) } } }
// collectTests traverses the input directory and finds all // unit test files. func collectTests() <-chan string { c := make(chan string) go func() { defer close(c) stat, _ := os.Lstat(input) if !stat.IsDir() { _, name := filepath.Split(input) ok, err := filepath.Match("*_test.dasm", name) if !ok || err != nil { return } c <- input return } filepath.Walk(input, func(file string, info os.FileInfo, err error) error { if info.IsDir() { return nil } _, name := filepath.Split(file) ok, err := filepath.Match("*_test.dasm", name) if !ok || err != nil { return err } parts := strings.Split(file, string(filepath.Separator)) for i := range parts { if len(parts[i]) == 0 { continue } if parts[i][0] == '_' { return nil } } c <- file return nil }) }() return c }
func osFileCompletions(line []rune, pos int, parts []string) (head string, completions []string, tail string) { dir, err := os.Getwd() if err != nil { fmt.Printf("Error getting current working dir - %v", err) return } file := "" // if this is not a new file we are starting with if !unicode.IsSpace(line[pos-1]) || strings.HasSuffix(string(line), "\\ ") { lineCopy := strings.Replace(string(line[:pos]), "\\ ", " ", -1) index := strings.Index(lineCopy, " ") for index != -1 { if line[index-1] != '\\' { lineCopy = lineCopy[index+1:] index = strings.Index(lineCopy, " ") } else { index = strings.Index(lineCopy[index+1:], " ") } } if lineCopy[0] == os.PathSeparator || len(lineCopy) > 2 && lineCopy[1] == ':' { dir, file = filepath.Split(lineCopy) } else { dir, file = filepath.Split(fmt.Sprintf("%s%c%s", dir, os.PathSeparator, lineCopy)) } } dirFile, err := os.Open(dir) if err != nil { return } fi, err := dirFile.Readdir(-1) if err != nil { return } for i := range fi { if strings.HasPrefix(fi[i].Name(), file) { name := strings.Replace(fi[i].Name(), " ", "\\ ", -1) if fi[i].IsDir() { name = fmt.Sprintf("%s%c", name, os.PathSeparator) } completions = append(completions, name) } } if pos < len(line) { tail = string(line[pos:]) } head = string(line[:pos-len([]rune(file))]) return }
func (sb *sandbox) buildHostsFile() error { if sb.config.hostsPath == "" { sb.config.hostsPath = defaultPrefix + "/" + sb.id + "/hosts" } dir, _ := filepath.Split(sb.config.hostsPath) if err := createBasePath(dir); err != nil { return err } // This is for the host mode networking if sb.config.originHostsPath != "" { if err := copyFile(sb.config.originHostsPath, sb.config.hostsPath); err != nil && !os.IsNotExist(err) { return types.InternalErrorf("could not copy source hosts file %s to %s: %v", sb.config.originHostsPath, sb.config.hostsPath, err) } return nil } extraContent := make([]etchosts.Record, 0, len(sb.config.extraHosts)) for _, extraHost := range sb.config.extraHosts { extraContent = append(extraContent, etchosts.Record{Hosts: extraHost.name, IP: extraHost.IP}) } return etchosts.Build(sb.config.hostsPath, "", sb.config.hostName, sb.config.domainName, extraContent) }
func SafeWriteToDisk(inpath string, r io.Reader) (err error) { dir, _ := filepath.Split(inpath) ospath := filepath.FromSlash(dir) if ospath != "" { err = os.MkdirAll(ospath, 0777) // rwx, rw, r if err != nil { return } } exists, err := Exists(inpath) if err != nil { return } if exists { return fmt.Errorf("%v already exists", inpath) } file, err := os.Create(inpath) if err != nil { return } defer file.Close() _, err = io.Copy(file, r) return }
// MountAgent bind mounts a SSH or GnuPG agent socket into the chroot func (e *enter) MountAgent(env string) error { origPath := os.Getenv(env) if origPath == "" { return nil } origDir, origFile := filepath.Split(origPath) if _, err := os.Stat(origDir); err != nil { // Just skip if the agent has gone missing. return nil } newDir, err := ioutil.TempDir(e.UserRunDir, "agent-") if err != nil { return err } if err := system.Bind(origDir, newDir); err != nil { return err } newPath := filepath.Join(newDir, origFile) chrootPath := strings.TrimPrefix(newPath, e.Chroot) return os.Setenv(env, chrootPath) }
// importLayer adds a new layer to the tag and graph store based on the given data. func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) { layerFolder := d.dir(id) tempFolder := layerFolder + "-" + strconv.FormatUint(uint64(random.Rand.Uint32()), 10) if err = os.MkdirAll(tempFolder, 0755); err != nil { logrus.Errorf("Could not create %s %s", tempFolder, err) return } defer func() { _, folderName := filepath.Split(tempFolder) if err2 := hcsshim.DestroyLayer(d.info, folderName); err2 != nil { logrus.Warnf("Couldn't clean-up tempFolder: %s %s", tempFolder, err2) } }() start := time.Now().UTC() logrus.Debugf("Start untar layer") if size, err = chrootarchive.ApplyLayer(tempFolder, layerData); err != nil { return } logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) if err = hcsshim.ImportLayer(d.info, id, tempFolder, parentLayerPaths); err != nil { return } return }
func prepareRequest(postBody interface{}, headerParams map[string]string, queryParams url.Values, formParams map[string]string, fileName string, fileBytes []byte) *resty.Request { request := resty.R() request.SetBody(postBody) // add header parameter, if any if len(headerParams) > 0 { request.SetHeaders(headerParams) } // add query parameter, if any if len(queryParams) > 0 { request.SetMultiValueQueryParams(queryParams) } // add form parameter, if any if len(formParams) > 0 { request.SetFormData(formParams) } if len(fileBytes) > 0 && fileName != "" { _, fileNm := filepath.Split(fileName) request.SetFileReader("file", fileNm, bytes.NewReader(fileBytes)) } return request }
// chooseGOPATH selects the gopath component that has the longest prefix in common with dest. // It breaks ties by preferring earlier components. func chooseGOPATH(gopaths []string, dest string) string { for { dest = strings.TrimSuffix(dest, "/") chosen := "" for i := len(gopaths) - 1; i >= 0; i-- { dir := filepath.Join(gopaths[i], "src", dest) if _, err := os.Stat(dir); err == nil { chosen = gopaths[i] } } if chosen != "" { return chosen } dest, _ = filepath.Split(dest) if dest == "" { break } } // None of the gopaths contain any prefix of dest. // Pick the first gopath that exists. for _, dir := range gopaths { if _, err := os.Stat(dir); err == nil { return dir } } return gopaths[0] }