func TestNewResourceId(t *testing.T) { r, err := NewRepository(testBaseDir) // something wrong with constructor if err != nil { t.Errorf("could not create the directories") } id, fp := r.NewResourceIdWithSubdir(SubdirDefault) if id == "" || filepath.Dir(fp) != filepath.Join(r.basedir, SubdirDefault) || filepath.Base(fp) != id { t.Errorf("something went wrong with id generation") } // test cached id id, fp = r.NewResourceIdWithSubdir(SubdirCache) if id == "" || filepath.Dir(fp) != filepath.Join(r.basedir, SubdirCache) || filepath.Base(fp) != id { t.Errorf("something went wrong with id generation") } // test a local id id, fp = r.NewResourceId() if id == "" || filepath.Dir(fp) != filepath.Join(r.basedir, SubdirDefault) || filepath.Base(fp) != id { t.Errorf("something went wrong with id generation") } // test invalid dir AssertPanic(t, func() { r.NewResourceIdWithSubdir("nonsense") }) }
// Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = UntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } }
func TestCommandRelativeName(t *testing.T) { if runtime.GOOS == "darwin" && runtime.GOARCH == "arm" { t.Skip("skipping on darwin/arm") } // Run our own binary as a relative path // (e.g. "_test/exec.test") our parent directory. base := filepath.Base(os.Args[0]) // "exec.test" dir := filepath.Dir(os.Args[0]) // "/tmp/go-buildNNNN/os/exec/_test" if dir == "." { t.Skip("skipping; running test at root somehow") } parentDir := filepath.Dir(dir) // "/tmp/go-buildNNNN/os/exec" dirBase := filepath.Base(dir) // "_test" if dirBase == "." { t.Skipf("skipping; unexpected shallow dir of %q", dir) } cmd := exec.Command(filepath.Join(dirBase, base), "-test.run=TestHelperProcess", "--", "echo", "foo") cmd.Dir = parentDir cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} out, err := cmd.Output() if err != nil { t.Errorf("echo: %v", err) } if g, e := string(out), "foo\n"; g != e { t.Errorf("echo: want %q, got %q", e, g) } }
func ProcessRename(page *Page, args []string) { if len(args) < 1 { errhandle(fmt.Errorf("'rename' rule needs an argument")) } dest := args[0] if strings.Contains(dest, "*") { if !strings.Contains(page.Pattern, "*") { errhandle(fmt.Errorf( "'rename' rule cannot rename '%s' to '%s'", page.Pattern, dest)) } group := fmt.Sprintf("([^%c]*)", filepath.Separator) base := filepath.Base(page.Pattern) pat := strings.Replace(regexp.QuoteMeta(base), "\\*", group, 1) re, err := regexp.Compile(pat) errhandle(err) m := re.FindStringSubmatch(filepath.Base(page.Path)) dest = strings.Replace(dest, "*", m[1], 1) } page.Path = filepath.Join(filepath.Dir(page.Path), dest) }
// Gets meta type and name based on a path func getMetaForPath(path string) (metaName string, objectName string) { parentDir := filepath.Dir(path) parentName := filepath.Base(parentDir) grandparentName := filepath.Base(filepath.Dir(parentDir)) fileName := filepath.Base(path) for _, mp := range metapaths { if mp.hasFolder && grandparentName == mp.path { metaName = mp.name if mp.onlyFolder { objectName = parentName } else { objectName = parentName + "/" + fileName } return } if mp.path == parentName { metaName = mp.name objectName = fileName return } } // Unknown, so use path metaName = parentName objectName = fileName return }
// StartSnapServices starts service units for the applications from the snap which are services. func StartSnapServices(s *snap.Info, inter interacter) error { for _, app := range s.Apps { if app.Daemon == "" { continue } // daemon-reload and enable plus start serviceName := filepath.Base(app.ServiceFile()) sysd := systemd.New(dirs.GlobalRootDir, inter) if err := sysd.DaemonReload(); err != nil { return err } if err := sysd.Enable(serviceName); err != nil { return err } if err := sysd.Start(serviceName); err != nil { return err } if app.Socket { socketName := filepath.Base(app.ServiceSocketFile()) // enable the socket if err := sysd.Enable(socketName); err != nil { return err } if err := sysd.Start(socketName); err != nil { return err } } } return nil }
func (c *config) discoverSingle(glob string, m *map[string]string) error { matches, err := filepath.Glob(glob) if err != nil { return err } if *m == nil { *m = make(map[string]string) } prefix := filepath.Base(glob) prefix = prefix[:strings.Index(prefix, "*")] for _, match := range matches { file := filepath.Base(match) // If the filename has a ".", trim up to there if idx := strings.Index(file, "."); idx >= 0 { file = file[:idx] } // Look for foo-bar-baz. The plugin name is "baz" plugin := file[len(prefix):] log.Printf("[DEBUG] Discovered plugin: %s = %s", plugin, match) (*m)[plugin] = match } return nil }
// NewPackageBuilder creates a new Builder based on the go package in dir. func NewPackageBuilder(dir string, target io.Writer, options *Options) (Builder, error) { abs, err := filepath.Abs(dir) if err != nil { return nil, err } b := NewBuilder(target, options).(*builder) b.pkgName = abs files, err := filepath.Glob(filepath.Join(dir, "*.go")) if err != nil { return nil, err } var f *os.File for _, file := range files { f, err = os.Open(filepath.Join(dir, filepath.Base(file))) if err != nil { return nil, err } // make a copy in order to be able to close the file var buf bytes.Buffer _, err = io.Copy(&buf, f) if err != nil { f.Close() return nil, err } b.Add(filepath.Base(file), &buf) f.Close() } return b, nil }
// CopyInfoSourcePath stats the given path to create a CopyInfo // struct representing that resource for the source of an archive copy // operation. The given path should be an absolute local path. A source path // has all symlinks evaluated that appear before the last path separator ("/" // on Unix). As it is to be a copy source, the path must exist. func CopyInfoSourcePath(path string) (CopyInfo, error) { // Split the given path into its Directory and Base components. We will // evaluate symlinks in the directory component then append the base. dirPath, basePath := filepath.Split(path) resolvedDirPath, err := filepath.EvalSymlinks(dirPath) if err != nil { return CopyInfo{}, err } // resolvedDirPath will have been cleaned (no trailing path separators) so // we can manually join it with the base path element. resolvedPath := resolvedDirPath + string(filepath.Separator) + basePath var rebaseName string if HasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { // In the case where the path had a trailing separator and a symlink // evaluation has changed the last path component, we will need to // rebase the name in the archive that is being copied to match the // originally requested name. rebaseName = filepath.Base(path) } stat, err := os.Lstat(resolvedPath) if err != nil { return CopyInfo{}, err } return CopyInfo{ Path: resolvedPath, Exists: true, IsDir: stat.IsDir(), RebaseName: rebaseName, }, nil }
// CreateSnapshot will create hardlinks for all tsm and tombstone files // in the path provided func (f *FileStore) CreateSnapshot() (string, error) { f.traceLogger.Printf("Creating snapshot in %s", f.dir) files := f.Files() f.mu.Lock() f.currentTempDirID += 1 f.mu.Unlock() f.mu.RLock() defer f.mu.RUnlock() // get a tmp directory name tmpPath := fmt.Sprintf("%s/%d.tmp", f.dir, f.currentTempDirID) err := os.Mkdir(tmpPath, 0777) if err != nil { return "", err } for _, tsmf := range files { newpath := filepath.Join(tmpPath, filepath.Base(tsmf.Path())) if err := os.Link(tsmf.Path(), newpath); err != nil { return "", fmt.Errorf("error creating tsm hard link: %q", err) } // Check for tombstones and link those as well for _, tf := range tsmf.TombstoneFiles() { newpath := filepath.Join(tmpPath, filepath.Base(tf.Path)) if err := os.Link(tf.Path, newpath); err != nil { return "", fmt.Errorf("error creating tombstone hard link: %q", err) } } } return tmpPath, nil }
// API function to delete an image by its filename. func deleteApiImageHandler(w http.ResponseWriter, r *http.Request, _ map[string]string) { userName := authentication.GetUserName(r) if userName != "" { // TODO: Check if the user has permissions to delete the image // Get the file name from the json data decoder := json.NewDecoder(r.Body) var json JsonImage err := decoder.Decode(&json) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } err = filepath.Walk(filenames.ImagesFilepath, func(filePath string, info os.FileInfo, err error) error { if !info.IsDir() && filepath.Base(filePath) == filepath.Base(json.Filename) { err := os.Remove(filePath) if err != nil { return err } } return nil }) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) w.Write([]byte("Image deleted!")) return } else { http.Error(w, "Not logged in!", http.StatusInternalServerError) return } }
// createDummyPackage is deprecated func createDummyPackage(name string) bool { if f, e := os.Create(filepath.Join(filepath.Dir(name), "dummy")); e == nil { f.Close() } else { return false } c := NewExcmd("jar") c.SetDir(filepath.Dir(name)) if !c.Run("DummyPackage", "cf", filepath.Base(name), "dummy") { return false } if e := os.Remove(filepath.Join(filepath.Dir(name), "dummy")); e != nil { Fatal("remove: %v (%v)\n", "dummy", e) } c = NewExcmd("zip") c.SetDir(filepath.Dir(name)) if !c.Run("DummyPackage", "-qd", filepath.Base(name), "dummy") { return false } return true }
// ResolveHostSourcePath decides real path need to be copied with parameters such as // whether to follow symbol link or not, if followLink is true, resolvedPath will return // link target of any symbol link file, else it will only resolve symlink of directory // but return symbol link file itself without resolving. func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { if followLink { resolvedPath, err = filepath.EvalSymlinks(path) if err != nil { return } resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) } else { dirPath, basePath := filepath.Split(path) // if not follow symbol link, then resolve symbol link of parent dir var resolvedDirPath string resolvedDirPath, err = filepath.EvalSymlinks(dirPath) if err != nil { return } // resolvedDirPath will have been cleaned (no trailing path separators) so // we can manually join it with the base path element. resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { rebaseName = filepath.Base(path) } } return resolvedPath, rebaseName, nil }
func NewPageSet(dir string) *PageSet { p := &PageSet{Sitemap: true} p.Path = filepath.Base(dir) p.pages = make(map[string]*Page) filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if err == nil { if !info.IsDir() { switch { case filepath.Ext(path) == ".md": if page, err := Parse(path, false); err != nil { panic(err) } else { p.Add(page.FrontMatter.Slug, page) } case filepath.Base(path) == "penny.yml": p.Configure(path) } } } return err }) p.Link() PageSets[dir] = p return p }
func archiveLegacyConfig() { pat := filepath.Join(confDir, "*.idx.gz*") idxs, err := filepath.Glob(pat) if err == nil && len(idxs) > 0 { // There are legacy indexes. This is probably the first time we run as v0.9. backupDir := filepath.Join(confDir, "backup-of-v0.8") err = os.MkdirAll(backupDir, 0700) if err != nil { l.Warnln("Cannot archive config/indexes:", err) return } for _, idx := range idxs { l.Infof("Archiving %s", filepath.Base(idx)) os.Rename(idx, filepath.Join(backupDir, filepath.Base(idx))) } src, err := os.Open(filepath.Join(confDir, "config.xml")) if err != nil { l.Warnf("Cannot archive config:", err) return } defer src.Close() dst, err := os.Create(filepath.Join(backupDir, "config.xml")) if err != nil { l.Warnf("Cannot archive config:", err) return } defer src.Close() l.Infoln("Archiving config.xml") io.Copy(dst, src) } }
// getKeyRole finds the role for the given keyID. It attempts to look // both in the newer format PEM headers, and also in the legacy filename // format. It returns: the role, whether it was found in the legacy format // (true == legacy), and an error func getKeyRole(s Storage, keyID string) (string, bool, error) { name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID))) for _, file := range s.ListFiles() { filename := filepath.Base(file) if strings.HasPrefix(filename, name) { d, err := s.Get(file) if err != nil { return "", false, err } block, _ := pem.Decode(d) if block != nil { if role, ok := block.Headers["role"]; ok { return role, false, nil } } role := strings.TrimPrefix(filename, name+"_") return role, true, nil } } return "", false, ErrKeyNotFound{KeyID: keyID} }
func TestStatJunctionLink(t *testing.T) { if !supportJunctionLinks { t.Skip("skipping because junction links are not supported") } dir, err := ioutil.TempDir("", "go-build") if err != nil { t.Fatalf("failed to create temp directory: %v", err) } defer os.RemoveAll(dir) link := filepath.Join(filepath.Dir(dir), filepath.Base(dir)+"-link") output, err := osexec.Command("cmd", "/c", "mklink", "/J", link, dir).CombinedOutput() if err != nil { t.Fatalf("failed to run mklink %v %v: %v %q", link, dir, err, output) } defer os.Remove(link) fi, err := os.Stat(link) if err != nil { t.Fatalf("failed to stat link %v: %v", link, err) } expected := filepath.Base(dir) got := fi.Name() if !fi.IsDir() || expected != got { t.Fatalf("link should point to %v but points to %v instead", expected, got) } }
// NewLDBDatabase returns a LevelDB wrapped object. func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) { // Calculate the cache and file descriptor allowance for this particular database cache = int(float64(cache) * cacheRatio[filepath.Base(file)]) if cache < 16 { cache = 16 } handles = int(float64(handles) * handleRatio[filepath.Base(file)]) if handles < 16 { handles = 16 } glog.V(logger.Info).Infof("Alloted %dMB cache and %d file handles to %s", cache, handles, file) // Open the db and recover any potential corruptions db, err := leveldb.OpenFile(file, &opt.Options{ OpenFilesCacheCapacity: handles, BlockCacheCapacity: cache / 2 * opt.MiB, WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally Filter: filter.NewBloomFilter(10), }) if _, corrupted := err.(*errors.ErrCorrupted); corrupted { db, err = leveldb.RecoverFile(file, nil) } // (Re)check for errors and abort if opening of the db failed if err != nil { return nil, err } return &LDBDatabase{ fn: file, db: db, }, nil }
// assemblyPerSourceLine disassembles the binary containing a symbol // and classifies the assembly instructions according to its // corresponding source line, annotating them with a set of samples. func assemblyPerSourceLine(objSyms []*objSymbol, rs nodes, src string, obj plugin.ObjTool) map[int]nodes { assembly := make(map[int]nodes) // Identify symbol to use for this collection of samples. o := findMatchingSymbol(objSyms, rs) if o == nil { return assembly } // Extract assembly for matched symbol insns, err := obj.Disasm(o.sym.File, o.sym.Start, o.sym.End) if err != nil { return assembly } srcBase := filepath.Base(src) anodes := annotateAssembly(insns, rs, o.base) var lineno = 0 for _, an := range anodes { if filepath.Base(an.info.file) == srcBase { lineno = an.info.lineno } if lineno != 0 { assembly[lineno] = append(assembly[lineno], an) } } return assembly }
// pattern: in repo/android/build/core/base_rules.mk // java_resource_file_groups+= ... // cd ${TOP_DIR}${LOCAL_PATH}/${dir} && find . -type d -a -name ".svn" -prune \ // -o -type f -a \! -name "*.java" -a \! -name "package.html" -a \! \ // -name "overview.html" -a \! -name ".*.swp" -a \! -name ".DS_Store" \ // -a \! -name "*~" -print ) func (c *androidFindCacheT) findJavaResourceFileGroup(w evalWriter, dir string) { glog.V(1).Infof("android find java resource in dir cache: %s", dir) c.walk(filepath.Clean(dir), func(_ int, fi fileInfo) error { // -type d -a -name ".svn" -prune if fi.mode.IsDir() && filepath.Base(fi.path) == ".svn" { return errSkipDir } // -type f if !fi.mode.IsRegular() { return nil } // ! -name "*.java" -a ! -name "package.html" -a // ! -name "overview.html" -a ! -name ".*.swp" -a // ! -name ".DS_Store" -a ! -name "*~" base := filepath.Base(fi.path) if filepath.Ext(base) == ".java" || base == "package.html" || base == "overview.html" || (strings.HasPrefix(base, ".") && strings.HasSuffix(base, ".swp")) || base == ".DS_Store" || strings.HasSuffix(base, "~") { return nil } name := strings.TrimPrefix(fi.path, dir+"/") name = "./" + name w.writeWordString(name) glog.V(1).Infof("android find java resource in dir cache: %s=> %s", dir, name) return nil }) }
// Refresh reloads the Network from the data store. func (n *Network) Refresh() error { resp, err := n.context.etcd.Get(filepath.Join(NetworkPath, n.ID), false, true) if err != nil { return err } for _, node := range resp.Node.Nodes { key := filepath.Base(node.Key) switch key { case "metadata": if err := json.Unmarshal([]byte(node.Value), &n); err != nil { return err } n.modifiedIndex = node.ModifiedIndex case "subnets": for _, x := range node.Nodes { n.subnets = append(n.subnets, filepath.Base(x.Key)) } } } return nil }
func TestListLogFiles(t *testing.T) { setFlags() Info(context.Background(), "x") // Be sure we have a file. Warning(context.Background(), "x") // Be sure we have a file. var info, warn *syncBuffer var ok bool info, ok = logging.file[InfoLog].(*syncBuffer) if !ok { t.Fatal("info wasn't created") } infoName := filepath.Base(info.file.Name()) warn, ok = logging.file[WarningLog].(*syncBuffer) if !ok { t.Fatal("warning wasn't created") } warnName := filepath.Base(warn.file.Name()) results, err := ListLogFiles() if err != nil { t.Fatal(err) } var foundInfo, foundWarn bool for _, r := range results { fmt.Printf("Results: Name:%v\n", r.Name) if r.Name == infoName { foundInfo = true } if r.Name == warnName { foundWarn = true } } if !foundInfo || !foundWarn { t.Errorf("expected to find %s, %s; got %d results", infoName, warnName, len(results)) } }
func main() { files, err := filepath.Glob("pub/c/*.jpg") if err != nil { panic(err) } j := []*img{} for _, file := range files { lg, err := decode(file) if err != nil { panic(err) } sm, err := decode(filepath.Join("pub/b", filepath.Base(file))) if err != nil { panic(err) } j = append(j, &img{ File: filepath.Base(file), LgSize: lg.Bounds().Size(), SmSize: sm.Bounds().Size(), }) } o, err := json.MarshalIndent(j, "", " ") if err != nil { panic(err) } fmt.Printf("%s\n", o) }
// cgocc compiles all .c files. // TODO(dfc) cxx not done func cgocc(pkg *Package, cflags, cxxflags, cfiles, cxxfiles []string, deps ...*Action) ([]*Action, []string) { workdir := cgoworkdir(pkg) var cc []*Action var ofiles []string for _, cfile := range cfiles { cfile := cfile ofile := filepath.Join(workdir, stripext(filepath.Base(cfile))+".o") ofiles = append(ofiles, ofile) cc = append(cc, &Action{ Name: "rungcc1: " + pkg.ImportPath + ": " + cfile, Deps: deps, Run: func() error { return rungcc1(pkg, cflags, ofile, cfile) }, }) } for _, cxxfile := range cxxfiles { cxxfile := cxxfile ofile := filepath.Join(workdir, stripext(filepath.Base(cxxfile))+".o") ofiles = append(ofiles, ofile) cc = append(cc, &Action{ Name: "rung++1: " + pkg.ImportPath + ": " + cxxfile, Deps: deps, Run: func() error { return rungpp1(pkg, cxxflags, ofile, cxxfile) }, }) } return cc, ofiles }
func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } err = UntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } }
// Run a migration specified in raw SQL. // // Sections of the script can be annotated with a special comment, // starting with "-- +goose" to specify whether the section should // be applied during an Up or Down migration // // All statements following an Up or Down directive are grouped together // until another direction directive is found. func runSQLMigration(conf *DBConf, db *sql.DB, scriptFile string, v int64, direction bool) error { txn, err := db.Begin() if err != nil { log.Fatal("db.Begin:", err) } f, err := os.Open(scriptFile) if err != nil { log.Fatal(err) } // find each statement, checking annotations for up/down direction // and execute each of them in the current transaction. // Commits the transaction if successfully applied each statement and // records the version into the version table or returns an error and // rolls back the transaction. for _, query := range splitSQLStatements(f, direction) { if _, err = txn.Exec(query); err != nil { txn.Rollback() log.Fatalf("FAIL %s (%v), quitting migration.", filepath.Base(scriptFile), err) return err } } if err = FinalizeMigration(conf, txn, direction, v); err != nil { log.Fatalf("error finalizing migration %s, quitting. (%v)", filepath.Base(scriptFile), err) } return nil }
func (d *MemoryDriver) Rename(from_path string, to_path string) bool { if f, from_path_exists := d.Files[from_path]; from_path_exists { if _, to_path_exists := d.Files[to_path]; !to_path_exists { if _, to_path_parent_exists := d.Files[filepath.Dir(to_path)]; to_path_parent_exists { if f.File.IsDir() { delete(d.Files, from_path) d.Files[to_path] = &MemoryFile{graval.NewDirItem(filepath.Base(to_path)), nil} torename := make([]string, 0) for p, _ := range d.Files { if strings.HasPrefix(p, from_path+"/") { torename = append(torename, p) } } for _, p := range torename { sf := d.Files[p] delete(d.Files, p) np := to_path + p[len(from_path):] d.Files[np] = sf } } else { delete(d.Files, from_path) d.Files[to_path] = &MemoryFile{graval.NewFileItem(filepath.Base(to_path), f.File.Size(), f.File.ModTime()), f.Content} } return true } else { return false } } else { return false } } else { return false } }
// directory listing checks for existing index file // and if exists processes like any other markdown file // otherwise gets directory listing of html and md files // and creates a "category" page using the category.html // template file with array of .Pages func getDirectoryListing(dir string) (html string, err error) { // check for index.md indexfile := dir + "/index.md" if _, err := os.Stat(indexfile); err == nil { return getMarkdownFile(indexfile) } page := Page{} page.Title = filepath.Base(dir) page.Layout = "category" page.Category = filepath.Base(dir) var files []string dirlist, _ := ioutil.ReadDir(dir) for _, fi := range dirlist { f := dir + "/" + fi.Name() ext := filepath.Ext(f) if ext == ".html" || ext == ".md" { files = append(files, f) } } // read markdown files to get title, date for _, f := range files { pg := readParseFile(f) filename := strings.Replace(f, ".md", ".html", 1) pg.Url = "/" + strings.Replace(filename, config.PublicDir, "", 1) page.Pages = append(page.Pages, pg) } page.Pages.Sort() html = applyTemplates(page) return html, err }
// Read is like (*os.File).Read() // Visit http://golang.org/pkg/os/#File.Read for more information func (f *File) Read(bts []byte) (int, error) { if f.appendedF != nil { if f.appendedFileReader == nil { return 0, &os.PathError{ Op: "read", Path: filepath.Base(f.appendedF.zipFile.Name), Err: errors.New("file is closed"), } } if f.appendedF.dir { return 0, &os.PathError{ Op: "read", Path: filepath.Base(f.appendedF.zipFile.Name), Err: errors.New("is a directory"), } } return f.appendedFileReader.Read(bts) } if f.virtualF != nil { return f.virtualF.read(bts) } if f.virtualD != nil { return f.virtualD.read(bts) } return f.realF.Read(bts) }
func _tar_compress(call otto.FunctionCall) otto.Value { var ( baseDir string ) source, _ := call.Argument(0).ToString() target, _ := call.Argument(1).ToString() filename := filepath.Base(source) target = filepath.Join(target, fmt.Sprintf("%s.tar", filename)) tarfile, err := os.Create(target) if err != nil { jsThrow(call, err) } defer tarfile.Close() tarball := tar.NewWriter(tarfile) defer tarball.Close() info, err := os.Stat(source) if err != nil { jsThrow(call, err) } if info.IsDir() { baseDir = filepath.Base(source) } err = filepath.Walk(source, func(path string, info os.FileInfo, err error) error { if err != nil { return err } header, err := tar.FileInfoHeader(info, info.Name()) if err != nil { return err } if baseDir != "" { header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source)) } if err := tarball.WriteHeader(header); err != nil { return err } if info.IsDir() { return nil } file, err := os.Open(path) if err != nil { return err } defer file.Close() _, err = io.Copy(tarball, file) return err }) return otto.Value{} }