/* *copy 缺少json文件的绘本到目标目录 */ func supplyFile(jsonPath string, forDir string) { var fileMap map[string]string = make(map[string]string) _ = filepath.Walk(jsonPath, func(filename string, info os.FileInfo, err error) error { if !info.IsDir() { if _, ok := fileMap[info.Name()]; !ok { var value = jsonPath + "\\" + info.Name() fileMap[info.Name()] = value } } return nil }) _ = filepath.Walk(forDir, func(path string, info os.FileInfo, err error) error { if info.IsDir() { var fileName = path + "\\" + info.Name() + ".json" _, err := os.Stat(fileName) if err == nil || os.IsExist(err) { fmt.Println(fileName + " is exist") } else { if v, ok := fileMap[info.Name()+".json"]; ok { // fmt.Println("############# " + v) if _, err := CopyFile(fileName, v); err != nil { panic(err) } } else { fmt.Println("~~~~~~~~~~~~~ " + fileName + " is not exist") } } } return nil }) //fmt.Printf("%s\n", fileMap) }
func TagDirParallel(start_dir string) { //Setup fingerprint worker for w := 0; w < 3; w++ { go FingerprintWorker(w) } //Setup acousticid worker for w := 0; w < 3; w++ { go AcousticidWorker(w) } //Setup ID3 worker for w := 0; w < 3; w++ { go ID3Worker(w) } //Count the number of files to tag: filepath.Walk(start_dir, countMp3Files) log.Info("Need to tag %d mp3 files..", mp3FileCount) //Send the files to be fingerprinted filepath.Walk(start_dir, tagFileParallel) //Ensure all files have been processed for i := 0; i < mp3FileCount; i++ { <-processedChan } }
// Walk returns the list of files found in the local repository by scanning the // file system. Files are blockwise hashed. func (w *Walker) Walk() (chan protocol.FileInfo, error) { if debug { l.Debugln("Walk", w.Dir, w.Sub, w.BlockSize, w.IgnoreFile) } err := checkDir(w.Dir) if err != nil { return nil, err } files := make(chan protocol.FileInfo) hashedFiles := make(chan protocol.FileInfo) newParallelHasher(w.Dir, w.BlockSize, runtime.NumCPU(), hashedFiles, files) var ignores []*regexp.Regexp go func() { filepath.Walk(w.Dir, w.loadIgnoreFiles(w.Dir, &ignores)) hashFiles := w.walkAndHashFiles(files, ignores) filepath.Walk(filepath.Join(w.Dir, w.Sub), hashFiles) close(files) }() return hashedFiles, nil }
func main() { var nogo = flag.Bool("nogo", false, "prevent running the 'go build' command") var f = flag.Bool("f", false, "force compilation of all haml files") var v = flag.Bool("v", false, "prints the name of the files as they are compiled") var clean = flag.Bool("clean", false, "cleans generated *.go files") flag.Parse() cfg := &ghamlConfig{ goBuildAfter: *nogo == false, forceCompile: *f, verbose: *v, clean: *clean, } wdStr, err := os.Getwd() if err != nil { panic("Can't get working directory") } if cfg.clean { filepath.Walk(wdStr, makeWalkFunc(checkFileForDeletion, cfg)) return } // create closure to pass our config into a WalkFunc filepath.Walk(wdStr, makeWalkFunc(checkFileForCompilation, cfg)) if cfg.goBuildAfter { runGoBuild(cfg) } }
// Walk returns the list of files found in the local repository by scanning the // file system. Files are blockwise hashed. func (w *Walker) Walk() (files []File, ignore map[string][]string, err error) { if debug { l.Debugln("Walk", w.Dir, w.BlockSize, w.IgnoreFile) } err = checkDir(w.Dir) if err != nil { return } t0 := time.Now() ignore = make(map[string][]string) hashFiles := w.walkAndHashFiles(&files, ignore) filepath.Walk(w.Dir, w.loadIgnoreFiles(w.Dir, ignore)) filepath.Walk(w.Dir, hashFiles) if debug { t1 := time.Now() d := t1.Sub(t0).Seconds() l.Debugf("Walk in %.02f ms, %.0f files/s", d*1000, float64(len(files))/d) } err = checkDir(w.Dir) return }
// GodepWorkspace removes any Godeps/_workspace directories and makes sure // any rewrites are undone. // Note, this is not concuccency safe. func GodepWorkspace(v string) error { vPath = v if _, err := os.Stat(vPath); err != nil { if os.IsNotExist(err) { msg.Debug("Vendor directory does not exist.") } return err } err := filepath.Walk(vPath, stripGodepWorkspaceHandler) if err != nil { return err } // Walk the marked projects to make sure rewrites are undone. for k := range godepMark { msg.Info("Removing Godep rewrites for %s", k) err := filepath.Walk(k, rewriteGodepfilesHandler) if err != nil { return err } } return nil }
func stdlib() { if len(os.Args) < 4 { printHelpToStderr() os.Exit(1) } goroot := filepath.Join(os.Args[2], "src", "pkg") outdir := os.Args[len(os.Args)-1] fmt.Printf("Building standard library documentation from '%s' to '%s'\n", goroot, outdir) filepath.Walk(goroot, dirVisitor(outdir), nil) for _, pkgroot := range os.Args[3 : len(os.Args)-1] { fmt.Printf("Building documentation from '%s' to '%s'\n", pkgroot, outdir) filepath.Walk(pkgroot, dirVisitor(outdir), nil) } fmt.Println("Writing shared data...") run("gortfm", "-outdir", outdir) fmt.Println("Writing index page...") writeIndexPage(outdir) fmt.Println("Writing index page data...") writeIndexPageData(outdir) }
func ProcessZip(currentUser m.User, outputZipPath string) { var imagesFound = 0 var tablesFound = 0 var WalkImageCallback = func(path string, fi os.FileInfo, err error) error { if err != nil { return nil } if fi.IsDir() { return nil } if !strings.Contains(fi.Name(), ".txt") { imagesFound++ log.Info(strconv.Itoa(imagesFound)) file, err := os.Open(path) if err != nil { // log.Error("Failed to read image file from zip: " + err.Error()) return nil } ProcessImage(currentUser, file, nil, true) } return nil } var WalkITableCallback = func(path string, fi os.FileInfo, err error) error { if err != nil { return nil } if fi.IsDir() { return nil } if strings.Contains(fi.Name(), ".txt") { tablesFound++ // log.Info(strconv.Itoa(tablesFound)) file, err := os.Open(path) if err != nil { log.Error("Failed to read txt file from zip: " + err.Error()) return nil } defer file.Close() ParseImageTable(currentUser, file) } return nil } log.Info("Received zip file for processing") filepath.Walk(outputZipPath, WalkImageCallback) log.Info("Images found in zip: " + strconv.Itoa(imagesFound)) log.Info("Starting processing of image tables.") filepath.Walk(outputZipPath, WalkITableCallback) log.Info("Tables found in zip: " + strconv.Itoa(tablesFound)) os.Remove(outputZipPath) }
func UndoCmd(args []string) (err error) { if len(args) != 0 { return errors.New("Usage: gorf [flags] undo") } lastChangePath := filepath.Join(LocalRoot, ".change.0.gorfc") var srcFile *os.File srcFile, err = os.Open(lastChangePath) if err != nil { return } buf := make([]byte, 1024) var n int n, err = srcFile.Read(buf) fmt.Printf("Undoing \"%s\"\n", strings.TrimSpace(string(buf[:n]))) filepath.Walk(LocalRoot, undoscanner(0).Walk) ur := UndoRoller{incr: -1} filepath.Walk(LocalRoot, ur.Walk) return ur.err return }
func GenErrorInSrc(src_path string) { log.Println(src_path) //base_dir, _ := filepath.Abs(filepath.Dir(os.Args[0])) //base_dir = base_dir + src_path //src_path, _ = filepath.Abs(base_dir) filepath.Walk(src_path, func(path string, info os.FileInfo, err error) error { // смотрим все файлы *.go, но не _gen.go if strings.HasSuffix(path, errgen_sfx) { println("remove: ", path) os.Remove(path) } return nil }) filepath.Walk(src_path, func(path string, info os.FileInfo, err error) error { // смотрим все файлы *.go, но не _err.gen.go if !strings.HasSuffix(path, ".go") || strings.HasSuffix(path, errgen_sfx) { return nil } // пропускаем папки for _, skip_path := range skip_paths { if strings.Contains(path, skip_path) { return nil } } parse_file(path) return nil }) }
// watch recursively watches changes in root and reports the filenames to names. // It sends an error on the done chan. // As an optimization, any dirs we encounter that meet the ExcludePrefix criteria of all reflexes can be // ignored. func watch(root string, watcher *fsnotify.Watcher, names chan<- string, done chan<- error, reflexes []*Reflex) { if err := filepath.Walk(root, walker(watcher, reflexes)); err != nil { infoPrintf(-1, "Error while walking path %s: %s", root, err) } for { select { case e := <-watcher.Events: if verbose { infoPrintln(-1, "fsnotify event:", e) } stat, err := os.Stat(e.Name) if err != nil { continue } path := normalize(e.Name, stat.IsDir()) if e.Op&chmodMask == 0 { continue } names <- path if e.Op&fsnotify.Create > 0 && stat.IsDir() { if err := filepath.Walk(path, walker(watcher, reflexes)); err != nil { infoPrintf(-1, "Error while walking path %s: %s", path, err) } } // TODO: Cannot currently remove fsnotify watches recursively, or for deleted files. See: // https://github.com/cespare/reflex/issues/13 // https://github.com/go-fsnotify/fsnotify/issues/40 // https://github.com/go-fsnotify/fsnotify/issues/41 case err := <-watcher.Errors: done <- err return } } }
func (w *Watcher) ObserveDir() { error := filepath.Walk(w.Dir, w.walkPopulate) mirror := new(Watcher) if error != nil { log.Println("ON DAEMON: ", error) return } mirror.ModTimes = make(map[string]time.Time, 0) error = filepath.Walk(w.Dir, mirror.walkPopulate) if error != nil { log.Println("ON DAEMON: ", error) return } for { err := filepath.Walk(w.Dir, w.walkCheck) if err != nil { log.Println("ON DAEMON: ", err) return } time.Sleep(time.Second * 2) } }
// DiskSize returns the size on disk of this shard func (s *Shard) DiskSize() (int64, error) { var size int64 err := filepath.Walk(s.path, func(_ string, fi os.FileInfo, err error) error { if err != nil { return err } if !fi.IsDir() { size += fi.Size() } return err }) if err != nil { return 0, err } err = filepath.Walk(s.walPath, func(_ string, fi os.FileInfo, err error) error { if err != nil { return err } if !fi.IsDir() { size += fi.Size() } return err }) return size, err }
func main() { wd, err := os.Getwd() if err != nil { panic(err) } pkgDir := filepath.Clean(filepath.Join(wd, "src", "pkg")) pkgWalker := &srcWalker{srcDir: pkgDir, pkg: true} filepath.Walk(pkgDir, pkgWalker, nil) pkgTargets := pkgWalker.finish() for pkg, target := range pkgTargets { makePkg(pkg, target, pkgTargets) } makeDirs(pkgDir, pkgTargets) makeDeps(pkgDir, pkgTargets) cmdDir := filepath.Clean(filepath.Join(wd, "src", "cmd")) cmdWalker := &srcWalker{srcDir: cmdDir, pkg: false} filepath.Walk(cmdDir, cmdWalker, nil) cmdTargets := cmdWalker.finish() for cmd, target := range cmdTargets { makeCmd(cmd, target, pkgTargets) } makeDirs(cmdDir, cmdTargets) makeGitIgnore(cmdDir, cmdTargets) }
// Walk returns the list of files found in the local repository by scanning the // file system. Files are blockwise hashed. func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]string) { ignore = make(map[string][]string) hashFiles := m.walkAndHashFiles(&files, ignore) filepath.Walk(m.dir, m.loadIgnoreFiles(ignore)) filepath.Walk(m.dir, hashFiles) if followSymlinks { d, err := os.Open(m.dir) if err != nil { return } defer d.Close() fis, err := d.Readdir(-1) if err != nil { return } for _, info := range fis { if info.Mode()&os.ModeSymlink != 0 { dir := path.Join(m.dir, info.Name()) + "/" filepath.Walk(dir, m.loadIgnoreFiles(ignore)) filepath.Walk(dir, hashFiles) } } } return }
func Walk(dir string, model *Model, followSymlinks bool) []File { var files []File fn := genWalker(dir, &files, model) err := filepath.Walk(dir, fn) if err != nil { warnln(err) } if !opts.NoSymlinks { d, err := os.Open(dir) if err != nil { warnln(err) return files } defer d.Close() fis, err := d.Readdir(-1) if err != nil { warnln(err) return files } for _, fi := range fis { if fi.Mode()&os.ModeSymlink != 0 { err := filepath.Walk(path.Join(dir, fi.Name())+"/", fn) if err != nil { warnln(err) } } } } return files }
// Scan scans all the plugin paths and returns all the names it found func Scan() ([]string, error) { var names []string if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { if err != nil { return nil } if fi.Mode()&os.ModeSocket != 0 { name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) names = append(names, name) } return nil }); err != nil { return nil, err } for _, path := range specsPaths { if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { if err != nil || fi.IsDir() { return nil } name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) names = append(names, name) return nil }); err != nil { return nil, err } } return names, nil }
// Walk returns the list of files found in the local folder by scanning the // file system. Files are blockwise hashed. func (w *Walker) Walk() (chan protocol.FileInfo, error) { if debug { l.Debugln("Walk", w.Dir, w.Subs, w.BlockSize, w.Matcher) } err := checkDir(w.Dir) if err != nil { return nil, err } files := make(chan protocol.FileInfo) hashedFiles := make(chan protocol.FileInfo) newParallelHasher(w.Dir, w.BlockSize, w.Hashers, hashedFiles, files) go func() { hashFiles := w.walkAndHashFiles(files, hashedFiles) if len(w.Subs) == 0 { filepath.Walk(w.Dir, hashFiles) } else { for _, sub := range w.Subs { filepath.Walk(filepath.Join(w.Dir, sub), hashFiles) } } close(files) }() return hashedFiles, nil }
func getDirList() []string { var a []string walker := func(path string, fi os.FileInfo, err error) error { if err != nil { jww.ERROR.Println("Walker: ", err) return nil } if fi.Mode()&os.ModeSymlink == os.ModeSymlink { jww.ERROR.Printf("Symbolic links not supported, skipping '%s'", path) return nil } if fi.IsDir() { a = append(a, path) } return nil } filepath.Walk(helpers.AbsPathify(viper.GetString("ContentDir")), walker) filepath.Walk(helpers.AbsPathify(viper.GetString("LayoutDir")), walker) filepath.Walk(helpers.AbsPathify(viper.GetString("StaticDir")), walker) if themeSet() { filepath.Walk(helpers.AbsPathify("themes/"+viper.GetString("theme")), walker) } return a }
// Creates the FSChanges between sourceDir and destDir. // To detect if a file was changed it checks the file's size and mtime (like // rsync does by default if no --checksum options is used) func (s *SimpleFSDiffer) Diff() (FSChanges, error) { changes := FSChanges{} sourceFileInfos := make(map[string]fileInfo) destFileInfos := make(map[string]fileInfo) err := filepath.Walk(s.sourceDir, fsWalker(sourceFileInfos)) if err != nil { return nil, err } err = filepath.Walk(s.destDir, fsWalker(destFileInfos)) if err != nil { return nil, err } for _, destInfo := range destFileInfos { relpath, _ := filepath.Rel(s.destDir, destInfo.Path) sourceInfo, ok := sourceFileInfos[filepath.Join(s.sourceDir, relpath)] if !ok { changes = append(changes, &FSChange{Path: relpath, ChangeType: Added}) } else { if sourceInfo.Size() != destInfo.Size() || sourceInfo.ModTime().Before(destInfo.ModTime()) { changes = append(changes, &FSChange{Path: relpath, ChangeType: Modified}) } } } for _, infoA := range sourceFileInfos { relpath, _ := filepath.Rel(s.sourceDir, infoA.Path) _, ok := destFileInfos[filepath.Join(s.destDir, relpath)] if !ok { changes = append(changes, &FSChange{Path: relpath, ChangeType: Deleted}) } } return changes, nil }
func watchFilesDarwin(top string, c chan fncMsg) { modTimes := make(map[string]time.Time) filepath.Walk(top, func(path string, info os.FileInfo, err error) error { if skipPath(path, info) { return nil } modTimes[path] = info.ModTime() return nil }) for { filepath.Walk(top, func(path string, info os.FileInfo, err error) error { if skipPath(path, info) { return nil } if ts, exists := modTimes[path]; exists { if info.ModTime().After(ts) { fmt.Println("changed", path, info) modTimes[path] = info.ModTime() c <- fncMsg{cmd: fncCmdFileChanged, path: path} return nil } } else { // new file fmt.Println("new file", path, info) modTimes[path] = info.ModTime() c <- fncMsg{cmd: fncCmdFileChanged, path: path} return nil } return nil }) time.Sleep(time.Second) } }
func watch(root string, watcher *fsnotify.Watcher, names chan<- string, done chan<- error) { if err := filepath.Walk(root, walker(watcher)); err != nil { // TODO: handle this somehow? infoPrintf(-1, "Error while walking path %s: %s\n", root, err) } for { select { case e := <-watcher.Event: path := strings.TrimPrefix(e.Name, "./") names <- path if e.IsCreate() { if err := filepath.Walk(path, walker(watcher)); err != nil { // TODO: handle this somehow? infoPrintf(-1, "Error while walking path %s: %s\n", path, err) } } if e.IsDelete() { watcher.RemoveWatch(path) } case err := <-watcher.Error: done <- err return } } }
//comp file func backUp() { backUpFile := beego.AppName + "_" + time.Now().Format("060102") + ".zip" fw, err := os.Create(backUpFile) if err != nil { beego.Error(err) return } defer fw.Close() zw := zip.NewWriter(fw) defer zw.Close() walk := func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return nil } src, _ := os.Open(path) defer src.Close() h := &zip.FileHeader{Name: path, Method: zip.Deflate, Flags: 0x800} fileName, _ := zw.CreateHeader(h) io.Copy(fileName, src) zw.Flush() return nil } if err := filepath.Walk(enum.CONST.DBNAME, walk); err != nil { beego.Error(err) return } if err := filepath.Walk(enum.CONST.UPLOADPATH, walk); err != nil { beego.Error(err) return } //file upload req := httplib.Post(url) req.Header("Authorization", dropboxAuthorization) req.Header("Dropbox-API-Arg", fmt.Sprintf(argF, beego.AppName, backUpFile)) req.Header("Content-Type", "application/octet-stream") data, err := ioutil.ReadFile(backUpFile) if err != nil { beego.Error(err) return } req.Body(data) resp, err := req.SendOut() if err != nil || resp.StatusCode != http.StatusOK { beego.Error(err, resp) } //file delete os.Remove(backUpFile) }
func main() { m := map[string]interface{}{} s := NewStack() filepath.Walk(*sourceDir, GatherJSON(s)) filepath.Walk(*sourceDir, GatherSource(s, m)) s.Add("", map[string]interface{}{*globalKey: m}) filepath.Walk(*sourceDir, Transform(s)) }
// Search the filesystem. func search() (err error) { if doMatchPath { err = filepath.Walk(root, walkFnPath) } else { err = filepath.Walk(root, walkFnName) } return }
/* * Export the container to a unshifted tarfile containing: * dir/ * metadata.yaml * rootfs/ */ func (c *containerLXD) ExportToTar(snap string, w io.Writer) error { if snap == "" && c.IsRunning() { return fmt.Errorf("Cannot export a running container as image") } idmap, err := c.LastIdmapSetGet() if err != nil { return err } if idmap != nil { if err := idmap.UnshiftRootfs(c.RootfsPathGet()); err != nil { return err } defer idmap.ShiftRootfs(c.RootfsPathGet()) } tw := tar.NewWriter(w) // keep track of the first path we saw for each path with nlink>1 linkmap := map[uint64]string{} cDir := c.PathGet("") // Path inside the tar image is the pathname starting after cDir offset := len(cDir) + 1 writeToTar := func(path string, fi os.FileInfo, err error) error { if err := c.tarStoreFile(linkmap, offset, tw, path, fi); err != nil { shared.Debugf("Error tarring up %s: %s", path, err) return err } return nil } fnam := filepath.Join(cDir, "metadata.yaml") if shared.PathExists(fnam) { fi, err := os.Lstat(fnam) if err != nil { shared.Debugf("Error statting %s during exportToTar", fnam) tw.Close() return err } if err := c.tarStoreFile(linkmap, offset, tw, fnam, fi); err != nil { shared.Debugf("Error writing to tarfile: %s", err) tw.Close() return err } } fnam = filepath.Join(cDir, "rootfs") filepath.Walk(fnam, writeToTar) fnam = filepath.Join(cDir, "templates") if shared.PathExists(fnam) { filepath.Walk(fnam, writeToTar) } return tw.Close() }
func (bsState *BsState) processReload() { bsState.GoodWords = map[string]int{} bsState.BadWords = map[string]int{} bsState.BsProba = map[string]float64{} filepath.Walk(bsState.getStorage(true), bsState.walker(true)) filepath.Walk(bsState.getStorage(false), bsState.walker(false)) bsState.trainWithTextFile(bsState.getPhraseStorage(true), true) bsState.trainWithTextFile(bsState.getPhraseStorage(false), false) }
// RSyncWithDelete syncs srcDir to destDir func RSyncWithDelete(srcDirName, destDirName string) error { // first remove everything thats not in srcdir err := filepath.Walk(destDirName, func(path string, info os.FileInfo, err error) error { if err != nil { return err } // relative to the root "destDirName" relPath := path[len(destDirName):] if !FileExists(filepath.Join(srcDirName, relPath)) { if err := os.RemoveAll(path); err != nil { return err } if info.IsDir() { return filepath.SkipDir } } return nil }) if err != nil { return err } // then copy or update the data from srcdir to destdir err = filepath.Walk(srcDirName, func(src string, info os.FileInfo, err error) error { if err != nil { return err } // relative to the root "srcDirName" relPath := src[len(srcDirName):] dst := filepath.Join(destDirName, relPath) if info.IsDir() { if err := os.MkdirAll(dst, info.Mode()); err != nil { return err } // this can panic. The alternative would be to use the "st, ok" pattern, and then if !ok... panic? st := info.Sys().(*syscall.Stat_t) ts := []syscall.Timespec{st.Atim, st.Mtim} return syscall.UtimesNano(dst, ts) } if !FilesAreEqual(src, dst) { // XXX: we should (eventually) use CopyFile here, // but we need to teach it about preserving // of atime/mtime and permissions output, err := exec.Command("cp", "-va", src, dst).CombinedOutput() if err != nil { return fmt.Errorf("Failed to copy %s to %s (%s)", src, dst, output) } } return nil }) return err }
// getDirList provides NewWatcher() with a list of directories to watch for changes. func getDirList() []string { var a []string dataDir := helpers.AbsPathify(viper.GetString("DataDir")) layoutDir := helpers.AbsPathify(viper.GetString("LayoutDir")) walker := func(path string, fi os.FileInfo, err error) error { if err != nil { if path == dataDir && os.IsNotExist(err) { jww.WARN.Println("Skip DataDir:", err) return nil } if path == layoutDir && os.IsNotExist(err) { jww.WARN.Println("Skip LayoutDir:", err) return nil } jww.ERROR.Println("Walker: ", err) return nil } if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := filepath.EvalSymlinks(path) if err != nil { jww.ERROR.Printf("Cannot read symbolic link '%s', error was: %s", path, err) return nil } linkfi, err := os.Stat(link) if err != nil { jww.ERROR.Printf("Cannot stat '%s', error was: %s", link, err) return nil } if !linkfi.Mode().IsRegular() { jww.ERROR.Printf("Symbolic links for directories not supported, skipping '%s'", path) } return nil } if fi.IsDir() { if fi.Name() == ".git" || fi.Name() == "node_modules" || fi.Name() == "bower_components" { return filepath.SkipDir } a = append(a, path) } return nil } filepath.Walk(dataDir, walker) filepath.Walk(helpers.AbsPathify(viper.GetString("ContentDir")), walker) filepath.Walk(helpers.AbsPathify(viper.GetString("LayoutDir")), walker) filepath.Walk(helpers.AbsPathify(viper.GetString("StaticDir")), walker) if helpers.ThemeSet() { filepath.Walk(helpers.AbsPathify(viper.GetString("themesDir")+"/"+viper.GetString("theme")), walker) } return a }
func serve(cdir string, tld string) { tempDir, _ := ioutil.TempDir("", "tut") tempDir += string(os.PathSeparator) defer os.RemoveAll(tempDir) clog.Log("Workdir %s", tempDir) c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { <-c os.RemoveAll(tempDir) clog.Fatal("Stopped") }() var idx Index idx = make(map[string]int) walker := func(src string, f os.FileInfo, err error) error { return setup(tempDir, src, f, idx, err) } if err := filepath.Walk(cdir, walker); err != nil { clog.Fatalf("Filewalk %v", err) } getindex := func(w http.ResponseWriter, r *http.Request) { json, err := json.Marshal(idx) if err != nil { clog.Fatalf("During Index JSON Marshal %v", err) } w.Header().Set("Content-Type", "application/json") w.Write(json) } http.HandleFunc("/tutorial/index.json", getindex) url, _ := url.Parse("http://localhost:8093") rp := httputil.NewSingleHostReverseProxy(url) http.Handle("/query", rp) fs := http.FileServer(http.Dir(tempDir + "/" + tld + "/")) http.Handle("/tutorial/", http.StripPrefix("/tutorial/", fs)) http.Handle("/", http.RedirectHandler("/tutorial/index.html#1", 302)) clog.Log("Running at http://localhost:8000/") go func() { for { filepath.Walk(cdir, walker) time.Sleep(2 * time.Second) } }() // last step if err := http.ListenAndServe(":8000", nil); err != nil { clog.Fatalf("ListenAndServe %v", err) } }