func init() { defer func() { if err := recover(); err != nil { // Ensure that the supported flag is disabled when we hit an // error, even though it should already be. Also, silently swallow // the error since it's fine for a system not to support symlinks. Supported = false } }() // Needs administrator privileges. // Let's check that everything works. // This could be done more officially: // http://stackoverflow.com/questions/2094663/determine-if-windows-process-has-privilege-to-create-symbolic-link // But I don't want to define 10 more structs just to look this up. base := os.TempDir() path := filepath.Join(base, "symlinktest") defer os.Remove(path) err := Create(path, base, protocol.FlagDirectory) if err != nil { return } stat, err := osutil.Lstat(path) if err != nil || stat.Mode()&os.ModeSymlink == 0 { return } target, flags, err := Read(path) if err != nil || osutil.NativeFilename(target) != base || flags&protocol.FlagDirectory == 0 { return } Supported = true }
func checkDir(dir string) error { if info, err := osutil.Lstat(dir); err != nil { return err } else if !info.IsDir() { return errors.New(dir + ": not a directory") } else { l.Debugln("checkDir", dir, info) } return nil }
// Archive moves the named file away to a version archive. If this function // returns nil, the named file does not exist any more (has been archived). func (v External) Archive(filePath string) error { _, err := osutil.Lstat(filePath) if os.IsNotExist(err) { l.Debugln("not archiving nonexistent file", filePath) return nil } else if err != nil { return err } l.Debugln("archiving", filePath) inFolderPath, err := filepath.Rel(v.folderPath, filePath) if err != nil { return err } if v.command == "" { return errors.New("Versioner: command is empty, please enter a valid command") } cmd := exec.Command(v.command, v.folderPath, inFolderPath) env := os.Environ() // filter STGUIAUTH and STGUIAPIKEY from environment variables filteredEnv := []string{} for _, x := range env { if !strings.HasPrefix(x, "STGUIAUTH=") && !strings.HasPrefix(x, "STGUIAPIKEY=") { filteredEnv = append(filteredEnv, x) } } cmd.Env = filteredEnv err = cmd.Run() if err != nil { return err } // return error if the file was not removed if _, err = osutil.Lstat(filePath); os.IsNotExist(err) { return nil } return errors.New("Versioner: file was not removed by external script") }
func (t *Trashcan) cleanoutArchive() error { versionsDir := filepath.Join(t.folderPath, ".stversions") if _, err := osutil.Lstat(versionsDir); os.IsNotExist(err) { return nil } cutoff := time.Now().Add(time.Duration(-24*t.cleanoutDays) * time.Hour) currentDir := "" filesInDir := 0 walkFn := func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { // We have entered a new directory. Lets check if the previous // directory was empty and try to remove it. We ignore failure for // the time being. if currentDir != "" && filesInDir == 0 { osutil.Remove(currentDir) } currentDir = path filesInDir = 0 return nil } if info.ModTime().Before(cutoff) { // The file is too old; remove it. osutil.Remove(path) } else { // Keep this file, and remember it so we don't unnecessarily try // to remove this directory. filesInDir++ } return nil } if err := filepath.Walk(versionsDir, walkFn); err != nil { return err } // The last directory seen by the walkFn may not have been removed as it // should be. if currentDir != "" && filesInDir == 0 { osutil.Remove(currentDir) } return nil }
// Archive moves the named file away to a version archive. If this function // returns nil, the named file does not exist any more (has been archived). func (t *Trashcan) Archive(filePath string) error { _, err := osutil.Lstat(filePath) if os.IsNotExist(err) { l.Debugln("not archiving nonexistent file", filePath) return nil } else if err != nil { return err } versionsDir := filepath.Join(t.folderPath, ".stversions") if _, err := os.Stat(versionsDir); err != nil { if !os.IsNotExist(err) { return err } l.Debugln("creating versions dir", versionsDir) if err := osutil.MkdirAll(versionsDir, 0777); err != nil { return err } osutil.HideFile(versionsDir) } l.Debugln("archiving", filePath) relativePath, err := filepath.Rel(t.folderPath, filePath) if err != nil { return err } archivedPath := filepath.Join(versionsDir, relativePath) if err := osutil.MkdirAll(filepath.Dir(archivedPath), 0777); err != nil && !os.IsExist(err) { return err } l.Debugln("moving to", archivedPath) if err := osutil.Rename(filePath, archivedPath); err != nil { return err } // Set the mtime to the time the file was deleted. This is used by the // cleanout routine. If this fails things won't work optimally but there's // not much we can do about it so we ignore the error. os.Chtimes(archivedPath, time.Now(), time.Now()) return nil }
// normalizePath returns the normalized relative path (possibly after fixing // it on disk), or skip is true. func (w *Walker) normalizePath(absPath, relPath string) (normPath string, skip bool) { if runtime.GOOS == "darwin" { // Mac OS X file names should always be NFD normalized. normPath = norm.NFD.String(relPath) } else { // Every other OS in the known universe uses NFC or just plain // doesn't bother to define an encoding. In our case *we* do care, // so we enforce NFC regardless. normPath = norm.NFC.String(relPath) } if relPath != normPath { // The file name was not normalized. if !w.AutoNormalize { // We're not authorized to do anything about it, so complain and skip. l.Warnf("File name %q is not in the correct UTF8 normalization form; skipping.", relPath) return "", true } // We will attempt to normalize it. normalizedPath := filepath.Join(w.Dir, normPath) if _, err := osutil.Lstat(normalizedPath); os.IsNotExist(err) { // Nothing exists with the normalized filename. Good. if err = os.Rename(absPath, normalizedPath); err != nil { l.Infof(`Error normalizing UTF8 encoding of file "%s": %v`, relPath, err) return "", true } l.Infof(`Normalized UTF8 encoding of file name "%s".`, relPath) } else { // There is something already in the way at the normalized // file name. l.Infof(`File "%s" has UTF8 encoding conflict with another file; ignoring.`, relPath) return "", true } relPath = normPath } return normPath, false }
// cleanConfigDirectory removes old, unused configuration and index formats, a // suitable time after they have gone out of fashion. func cleanConfigDirectory() { patterns := map[string]time.Duration{ "panic-*.log": 7 * 24 * time.Hour, // keep panic logs for a week "audit-*.log": 7 * 24 * time.Hour, // keep audit logs for a week "index": 14 * 24 * time.Hour, // keep old index format for two weeks "index*.converted": 14 * 24 * time.Hour, // keep old converted indexes for two weeks "config.xml.v*": 30 * 24 * time.Hour, // old config versions for a month "*.idx.gz": 30 * 24 * time.Hour, // these should for sure no longer exist "backup-of-v0.8": 30 * 24 * time.Hour, // these neither } for pat, dur := range patterns { pat = filepath.Join(baseDirs["config"], pat) files, err := osutil.Glob(pat) if err != nil { l.Infoln("Cleaning:", err) continue } for _, file := range files { info, err := osutil.Lstat(file) if err != nil { l.Infoln("Cleaning:", err) continue } if time.Since(info.ModTime()) > dur { if err = os.RemoveAll(file); err != nil { l.Infoln("Cleaning:", err) } else { l.Infoln("Cleaned away old file", filepath.Base(file)) } } } } }
// handleFile queues the copies and pulls as necessary for a single new or // changed file. func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) { curFile, hasCurFile := p.model.CurrentFolderFile(p.folder, file.Name) if hasCurFile && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) { // We are supposed to copy the entire file, and then fetch nothing. We // are only updating metadata, so we don't actually *need* to make the // copy. l.Debugln(p, "taking shortcut on", file.Name) events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "file", "action": "metadata", }) p.queue.Done(file.Name) var err error if file.IsSymlink() { err = p.shortcutSymlink(file) } else { err = p.shortcutFile(file) } events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": file.Name, "error": events.Error(err), "type": "file", "action": "metadata", }) if err != nil { l.Infoln("Puller: shortcut:", err) p.newError(file.Name, err) } else { p.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile} } return } // Figure out the absolute filenames we need once and for all tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name)) realName := filepath.Join(p.dir, file.Name) if hasCurFile && !curFile.IsDirectory() && !curFile.IsSymlink() { // Check that the file on disk is what we expect it to be according to // the database. If there's a mismatch here, there might be local // changes that we don't know about yet and we should scan before // touching the file. If we can't stat the file we'll just pull it. if info, err := osutil.Lstat(realName); err == nil { mtime := p.virtualMtimeRepo.GetMtime(file.Name, info.ModTime()) if mtime.Unix() != curFile.Modified || info.Size() != curFile.Size() { l.Debugln("file modified but not rescanned; not pulling:", realName) // Scan() is synchronous (i.e. blocks until the scan is // completed and returns an error), but a scan can't happen // while we're in the puller routine. Request the scan in the // background and it'll be handled when the current pulling // sweep is complete. As we do retries, we'll queue the scan // for this file up to ten times, but the last nine of those // scans will be cheap... go p.Scan([]string{file.Name}) return } } } scanner.PopulateOffsets(file.Blocks) reused := 0 var blocks []protocol.BlockInfo var blocksSize int64 // Check for an old temporary file which might have some blocks we could // reuse. tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize, 0, nil) if err == nil { // Check for any reusable blocks in the temp file tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks) // block.String() returns a string unique to the block existingBlocks := make(map[string]struct{}, len(tempCopyBlocks)) for _, block := range tempCopyBlocks { existingBlocks[block.String()] = struct{}{} } // Since the blocks are already there, we don't need to get them. for _, block := range file.Blocks { _, ok := existingBlocks[block.String()] if !ok { blocks = append(blocks, block) blocksSize += int64(block.Size) } } // The sharedpullerstate will know which flags to use when opening the // temp file depending if we are reusing any blocks or not. reused = len(file.Blocks) - len(blocks) if reused == 0 { // Otherwise, discard the file ourselves in order for the // sharedpuller not to panic when it fails to exclusively create a // file which already exists osutil.InWritableDir(osutil.Remove, tempName) } } else { blocks = file.Blocks blocksSize = file.Size() } if p.checkFreeSpace { if free, err := osutil.DiskFreeBytes(p.dir); err == nil && free < blocksSize { l.Warnf(`Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB`, p.folder, p.dir, file.Name, float64(free)/1024/1024, float64(blocksSize)/1024/1024) p.newError(file.Name, errors.New("insufficient space")) return } } events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "file", "action": "update", }) s := sharedPullerState{ file: file, folder: p.folder, tempName: tempName, realName: realName, copyTotal: len(blocks), copyNeeded: len(blocks), reused: reused, ignorePerms: p.ignorePermissions(file), version: curFile.Version, mut: sync.NewMutex(), sparse: p.allowSparse, } l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused) cs := copyBlocksState{ sharedPullerState: &s, blocks: blocks, } copyChan <- cs }
// handleDir creates or updates the given directory func (p *rwFolder) handleDir(file protocol.FileInfo) { var err error events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "dir", "action": "update", }) defer func() { events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": file.Name, "error": events.Error(err), "type": "dir", "action": "update", }) }() realName := filepath.Join(p.dir, file.Name) mode := os.FileMode(file.Flags & 0777) if p.ignorePermissions(file) { mode = 0777 } if shouldDebug() { curFile, _ := p.model.CurrentFolderFile(p.folder, file.Name) l.Debugf("need dir\n\t%v\n\t%v", file, curFile) } info, err := osutil.Lstat(realName) switch { // There is already something under that name, but it's a file/link. // Most likely a file/link is getting replaced with a directory. // Remove the file/link and fall through to directory creation. case err == nil && (!info.IsDir() || info.Mode()&os.ModeSymlink != 0): err = osutil.InWritableDir(osutil.Remove, realName) if err != nil { l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err) p.newError(file.Name, err) return } fallthrough // The directory doesn't exist, so we create it with the right // mode bits from the start. case err != nil && os.IsNotExist(err): // We declare a function that acts on only the path name, so // we can pass it to InWritableDir. We use a regular Mkdir and // not MkdirAll because the parent should already exist. mkdir := func(path string) error { err = os.Mkdir(path, mode) if err != nil || p.ignorePermissions(file) { return err } // Stat the directory so we can check its permissions. info, err := osutil.Lstat(path) if err != nil { return err } // Mask for the bits we want to preserve and add them in to the // directories permissions. return os.Chmod(path, mode|(info.Mode()&retainBits)) } if err = osutil.InWritableDir(mkdir, realName); err == nil { p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir} } else { l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err) p.newError(file.Name, err) } return // Weird error when stat()'ing the dir. Probably won't work to do // anything else with it if we can't even stat() it. case err != nil: l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err) p.newError(file.Name, err) return } // The directory already exists, so we just correct the mode bits. (We // don't handle modification times on directories, because that sucks...) // It's OK to change mode bits on stuff within non-writable directories. if p.ignorePermissions(file) { p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir} } else if err := os.Chmod(realName, mode|(info.Mode()&retainBits)); err == nil { p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir} } else { l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err) p.newError(file.Name, err) } }
func (p *rwFolder) performFinish(state *sharedPullerState) error { // Set the correct permission bits on the new file if !p.ignorePermissions(state.file) { if err := os.Chmod(state.tempName, os.FileMode(state.file.Flags&0777)); err != nil { return err } } // Set the correct timestamp on the new file t := time.Unix(state.file.Modified, 0) if err := os.Chtimes(state.tempName, t, t); err != nil { // Try using virtual mtimes instead info, err := os.Stat(state.tempName) if err != nil { return err } p.virtualMtimeRepo.UpdateMtime(state.file.Name, info.ModTime(), t) } if stat, err := osutil.Lstat(state.realName); err == nil { // There is an old file or directory already in place. We need to // handle that. switch { case stat.IsDir() || stat.Mode()&os.ModeSymlink != 0: // It's a directory or a symlink. These are not versioned or // archived for conflicts, only removed (which of course fails for // non-empty directories). // TODO: This is the place where we want to remove temporary files // and future hard ignores before attempting a directory delete. // Should share code with p.deletDir(). if err = osutil.InWritableDir(osutil.Remove, state.realName); err != nil { return err } case p.inConflict(state.version, state.file.Version): // The new file has been changed in conflict with the existing one. We // should file it away as a conflict instead of just removing or // archiving. Also merge with the version vector we had, to indicate // we have resolved the conflict. state.file.Version = state.file.Version.Merge(state.version) if err = osutil.InWritableDir(p.moveForConflict, state.realName); err != nil { return err } case p.versioner != nil: // If we should use versioning, let the versioner archive the old // file before we replace it. Archiving a non-existent file is not // an error. if err = p.versioner.Archive(state.realName); err != nil { return err } } } // Replace the original content with the new one if err := osutil.Rename(state.tempName, state.realName); err != nil { return err } // If it's a symlink, the target of the symlink is inside the file. if state.file.IsSymlink() { content, err := ioutil.ReadFile(state.realName) if err != nil { return err } // Remove the file, and replace it with a symlink. err = osutil.InWritableDir(func(path string) error { os.Remove(path) tt := symlinks.TargetFile if state.file.IsDirectory() { tt = symlinks.TargetDirectory } return symlinks.Create(path, string(content), tt) }, state.realName) if err != nil { return err } } // Record the updated file in the index p.dbUpdates <- dbUpdateJob{state.file, dbUpdateHandleFile} return nil }
// Archive moves the named file away to a version archive. If this function // returns nil, the named file does not exist any more (has been archived). func (v Simple) Archive(filePath string) error { fileInfo, err := osutil.Lstat(filePath) if os.IsNotExist(err) { l.Debugln("not archiving nonexistent file", filePath) return nil } else if err != nil { return err } versionsDir := filepath.Join(v.folderPath, ".stversions") _, err = os.Stat(versionsDir) if err != nil { if os.IsNotExist(err) { l.Debugln("creating versions dir", versionsDir) osutil.MkdirAll(versionsDir, 0755) osutil.HideFile(versionsDir) } else { return err } } l.Debugln("archiving", filePath) file := filepath.Base(filePath) inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath)) if err != nil { return err } dir := filepath.Join(versionsDir, inFolderPath) err = osutil.MkdirAll(dir, 0755) if err != nil && !os.IsExist(err) { return err } ver := taggedFilename(file, fileInfo.ModTime().Format(TimeFormat)) dst := filepath.Join(dir, ver) l.Debugln("moving to", dst) err = osutil.Rename(filePath, dst) if err != nil { return err } // Glob according to the new file~timestamp.ext pattern. pattern := filepath.Join(dir, taggedFilename(file, TimeGlob)) newVersions, err := osutil.Glob(pattern) if err != nil { l.Warnln("globbing:", err, "for", pattern) return nil } // Also according to the old file.ext~timestamp pattern. pattern = filepath.Join(dir, file+"~"+TimeGlob) oldVersions, err := osutil.Glob(pattern) if err != nil { l.Warnln("globbing:", err, "for", pattern) return nil } // Use all the found filenames. "~" sorts after "." so all old pattern // files will be deleted before any new, which is as it should be. versions := uniqueSortedStrings(append(oldVersions, newVersions...)) if len(versions) > v.keep { for _, toRemove := range versions[:len(versions)-v.keep] { l.Debugln("cleaning out", toRemove) err = os.Remove(toRemove) if err != nil { l.Warnln("removing old version:", err) } } } return nil }
// Archive moves the named file away to a version archive. If this function // returns nil, the named file does not exist any more (has been archived). func (v Staggered) Archive(filePath string) error { l.Debugln("Waiting for lock on ", v.versionsPath) v.mutex.Lock() defer v.mutex.Unlock() _, err := osutil.Lstat(filePath) if os.IsNotExist(err) { l.Debugln("not archiving nonexistent file", filePath) return nil } else if err != nil { return err } if _, err := os.Stat(v.versionsPath); err != nil { if os.IsNotExist(err) { l.Debugln("creating versions dir", v.versionsPath) osutil.MkdirAll(v.versionsPath, 0755) osutil.HideFile(v.versionsPath) } else { return err } } l.Debugln("archiving", filePath) file := filepath.Base(filePath) inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath)) if err != nil { return err } dir := filepath.Join(v.versionsPath, inFolderPath) err = osutil.MkdirAll(dir, 0755) if err != nil && !os.IsExist(err) { return err } ver := taggedFilename(file, time.Now().Format(TimeFormat)) dst := filepath.Join(dir, ver) l.Debugln("moving to", dst) err = osutil.Rename(filePath, dst) if err != nil { return err } // Glob according to the new file~timestamp.ext pattern. pattern := filepath.Join(dir, taggedFilename(file, TimeGlob)) newVersions, err := osutil.Glob(pattern) if err != nil { l.Warnln("globbing:", err, "for", pattern) return nil } // Also according to the old file.ext~timestamp pattern. pattern = filepath.Join(dir, file+"~"+TimeGlob) oldVersions, err := osutil.Glob(pattern) if err != nil { l.Warnln("globbing:", err, "for", pattern) return nil } // Use all the found filenames. versions := append(oldVersions, newVersions...) v.expire(uniqueSortedStrings(versions)) return nil }
func (v Staggered) expire(versions []string) { l.Debugln("Versioner: Expiring versions", versions) var prevAge int64 firstFile := true for _, file := range versions { fi, err := osutil.Lstat(file) if err != nil { l.Warnln("versioner:", err) continue } if fi.IsDir() { l.Infof("non-file %q is named like a file version", file) continue } loc, _ := time.LoadLocation("Local") versionTime, err := time.ParseInLocation(TimeFormat, filenameTag(file), loc) if err != nil { l.Debugf("Versioner: file name %q is invalid: %v", file, err) continue } age := int64(time.Since(versionTime).Seconds()) // If the file is older than the max age of the last interval, remove it if lastIntv := v.interval[len(v.interval)-1]; lastIntv.end > 0 && age > lastIntv.end { l.Debugln("Versioner: File over maximum age -> delete ", file) err = os.Remove(file) if err != nil { l.Warnf("Versioner: can't remove %q: %v", file, err) } continue } // If it's the first (oldest) file in the list we can skip the interval checks if firstFile { prevAge = age firstFile = false continue } // Find the interval the file fits in var usedInterval Interval for _, usedInterval = range v.interval { if age < usedInterval.end { break } } if prevAge-age < usedInterval.step { l.Debugln("too many files in step -> delete", file) err = os.Remove(file) if err != nil { l.Warnf("Versioner: can't remove %q: %v", file, err) } continue } prevAge = age } }