func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.WalkFunc { now := time.Now() return func(p string, info os.FileInfo, err error) error { // Return value used when we are returning early and don't want to // process the item. For directories, this means do-not-descend. var skip error // nil // info nil when error is not nil if info != nil && info.IsDir() { skip = filepath.SkipDir } if err != nil { if debug { l.Debugln("error:", p, info, err) } return skip } rn, err := filepath.Rel(w.Dir, p) if err != nil { if debug { l.Debugln("rel error:", p, err) } return skip } if rn == "." { return nil } mtime := info.ModTime() if w.MtimeRepo != nil { mtime = w.MtimeRepo.GetMtime(rn, mtime) } if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) { // A temporary file if debug { l.Debugln("temporary:", rn) } if info.Mode().IsRegular() && mtime.Add(w.TempLifetime).Before(now) { os.Remove(p) if debug { l.Debugln("removing temporary:", rn, mtime) } } return nil } if sn := filepath.Base(rn); sn == ".stignore" || sn == ".stfolder" || strings.HasPrefix(rn, ".stversions") || (w.Matcher != nil && w.Matcher.Match(rn)) { // An ignored file if debug { l.Debugln("ignored:", rn) } return skip } if !utf8.ValidString(rn) { l.Warnf("File name %q is not in UTF8 encoding; skipping.", rn) return skip } var normalizedRn string if runtime.GOOS == "darwin" { // Mac OS X file names should always be NFD normalized. normalizedRn = norm.NFD.String(rn) } else { // Every other OS in the known universe uses NFC or just plain // doesn't bother to define an encoding. In our case *we* do care, // so we enforce NFC regardless. normalizedRn = norm.NFC.String(rn) } if rn != normalizedRn { // The file name was not normalized. if !w.AutoNormalize { // We're not authorized to do anything about it, so complain and skip. l.Warnf("File name %q is not in the correct UTF8 normalization form; skipping.", rn) return skip } // We will attempt to normalize it. normalizedPath := filepath.Join(w.Dir, normalizedRn) if _, err := osutil.Lstat(normalizedPath); os.IsNotExist(err) { // Nothing exists with the normalized filename. Good. if err = os.Rename(p, normalizedPath); err != nil { l.Infof(`Error normalizing UTF8 encoding of file "%s": %v`, rn, err) return skip } l.Infof(`Normalized UTF8 encoding of file name "%s".`, rn) } else { // There is something already in the way at the normalized // file name. l.Infof(`File "%s" has UTF8 encoding conflict with another file; ignoring.`, rn) return skip } rn = normalizedRn } var cf protocol.FileInfo var ok bool // Index wise symlinks are always files, regardless of what the target // is, because symlinks carry their target path as their content. if info.Mode()&os.ModeSymlink == os.ModeSymlink { // If the target is a directory, do NOT descend down there. This // will cause files to get tracked, and removing the symlink will // as a result remove files in their real location. if !symlinks.Supported { return skip } // We always rehash symlinks as they have no modtime or // permissions. We check if they point to the old target by // checking that their existing blocks match with the blocks in // the index. target, targetType, err := symlinks.Read(p) if err != nil { if debug { l.Debugln("readlink error:", p, err) } return skip } blocks, err := Blocks(strings.NewReader(target), w.BlockSize, 0, nil) if err != nil { if debug { l.Debugln("hash link error:", p, err) } return skip } if w.CurrentFiler != nil { // A symlink is "unchanged", if // - it exists // - it wasn't deleted (because it isn't now) // - it was a symlink // - it wasn't invalid // - the symlink type (file/dir) was the same // - the block list (i.e. hash of target) was the same cf, ok = w.CurrentFiler.CurrentFile(rn) if ok && !cf.IsDeleted() && cf.IsSymlink() && !cf.IsInvalid() && SymlinkTypeEqual(targetType, cf) && BlocksEqual(cf.Blocks, blocks) { return skip } } f := protocol.FileInfo{ Name: rn, Version: cf.Version.Update(w.ShortID), Flags: uint32(protocol.FlagSymlink | protocol.FlagNoPermBits | 0666 | SymlinkFlags(targetType)), Modified: 0, Blocks: blocks, } if debug { l.Debugln("symlink changedb:", p, f) } dchan <- f return skip } if info.Mode().IsDir() { if w.CurrentFiler != nil { // A directory is "unchanged", if it // - exists // - has the same permissions as previously, unless we are ignoring permissions // - was not marked deleted (since it apparently exists now) // - was a directory previously (not a file or something else) // - was not a symlink (since it's a directory now) // - was not invalid (since it looks valid now) cf, ok = w.CurrentFiler.CurrentFile(rn) permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, uint32(info.Mode())) if ok && permUnchanged && !cf.IsDeleted() && cf.IsDirectory() && !cf.IsSymlink() && !cf.IsInvalid() { return nil } } flags := uint32(protocol.FlagDirectory) if w.IgnorePerms { flags |= protocol.FlagNoPermBits | 0777 } else { flags |= uint32(info.Mode() & maskModePerm) } f := protocol.FileInfo{ Name: rn, Version: cf.Version.Update(w.ShortID), Flags: flags, Modified: mtime.Unix(), } if debug { l.Debugln("dir:", p, f) } dchan <- f return nil } if info.Mode().IsRegular() { curMode := uint32(info.Mode()) if runtime.GOOS == "windows" && osutil.IsWindowsExecutable(rn) { curMode |= 0111 } if w.CurrentFiler != nil { // A file is "unchanged", if it // - exists // - has the same permissions as previously, unless we are ignoring permissions // - was not marked deleted (since it apparently exists now) // - had the same modification time as it has now // - was not a directory previously (since it's a file now) // - was not a symlink (since it's a file now) // - was not invalid (since it looks valid now) // - has the same size as previously cf, ok = w.CurrentFiler.CurrentFile(rn) permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, curMode) if ok && permUnchanged && !cf.IsDeleted() && cf.Modified == mtime.Unix() && !cf.IsDirectory() && !cf.IsSymlink() && !cf.IsInvalid() && cf.Size() == info.Size() { return nil } if debug { l.Debugln("rescan:", cf, mtime.Unix(), info.Mode()&os.ModePerm) } } var flags = curMode & uint32(maskModePerm) if w.IgnorePerms { flags = protocol.FlagNoPermBits | 0666 } f := protocol.FileInfo{ Name: rn, Version: cf.Version.Update(w.ShortID), Flags: flags, Modified: mtime.Unix(), CachedSize: info.Size(), } if debug { l.Debugln("to hash:", p, f) } fchan <- f } return nil } }
// handleFile queues the copies and pulls as necessary for a single new or // changed file. func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) { curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name) if ok && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) { // We are supposed to copy the entire file, and then fetch nothing. We // are only updating metadata, so we don't actually *need* to make the // copy. if debug { l.Debugln(p, "taking shortcut on", file.Name) } events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "file", "action": "metadata", }) p.queue.Done(file.Name) var err error if file.IsSymlink() { err = p.shortcutSymlink(file) } else { err = p.shortcutFile(file) } events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": file.Name, "error": events.Error(err), "type": "file", "action": "metadata", }) if err != nil { l.Infoln("Puller: shortcut:", err) p.newError(file.Name, err) } else { p.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile} } return } events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "file", "action": "update", }) scanner.PopulateOffsets(file.Blocks) // Figure out the absolute filenames we need once and for all tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name)) realName := filepath.Join(p.dir, file.Name) reused := 0 var blocks []protocol.BlockInfo // Check for an old temporary file which might have some blocks we could // reuse. tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize) if err == nil { // Check for any reusable blocks in the temp file tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks) // block.String() returns a string unique to the block existingBlocks := make(map[string]struct{}, len(tempCopyBlocks)) for _, block := range tempCopyBlocks { existingBlocks[block.String()] = struct{}{} } // Since the blocks are already there, we don't need to get them. for _, block := range file.Blocks { _, ok := existingBlocks[block.String()] if !ok { blocks = append(blocks, block) } } // The sharedpullerstate will know which flags to use when opening the // temp file depending if we are reusing any blocks or not. reused = len(file.Blocks) - len(blocks) if reused == 0 { // Otherwise, discard the file ourselves in order for the // sharedpuller not to panic when it fails to exclusively create a // file which already exists os.Remove(tempName) } } else { blocks = file.Blocks } s := sharedPullerState{ file: file, folder: p.folder, tempName: tempName, realName: realName, copyTotal: len(blocks), copyNeeded: len(blocks), reused: reused, ignorePerms: p.ignorePermissions(file), version: curFile.Version, mut: sync.NewMutex(), } if debug { l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused) } cs := copyBlocksState{ sharedPullerState: &s, blocks: blocks, } copyChan <- cs }
// handleFile queues the copies and pulls as necessary for a single new or // changed file. func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) { curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name) if ok && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) { // We are supposed to copy the entire file, and then fetch nothing. We // are only updating metadata, so we don't actually *need* to make the // copy. if debug { l.Debugln(p, "taking shortcut on", file.Name) } p.queue.Done(file.Name) if file.IsSymlink() { p.shortcutSymlink(file) } else { p.shortcutFile(file) } return } scanner.PopulateOffsets(file.Blocks) // Figure out the absolute filenames we need once and for all tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name)) realName := filepath.Join(p.dir, file.Name) reused := 0 var blocks []protocol.BlockInfo // Check for an old temporary file which might have some blocks we could // reuse. tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize) if err == nil { // Check for any reusable blocks in the temp file tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks) // block.String() returns a string unique to the block existingBlocks := make(map[string]bool, len(tempCopyBlocks)) for _, block := range tempCopyBlocks { existingBlocks[block.String()] = true } // Since the blocks are already there, we don't need to get them. for _, block := range file.Blocks { _, ok := existingBlocks[block.String()] if !ok { blocks = append(blocks, block) } } // The sharedpullerstate will know which flags to use when opening the // temp file depending if we are reusing any blocks or not. reused = len(file.Blocks) - len(blocks) if reused == 0 { // Otherwise, discard the file ourselves in order for the // sharedpuller not to panic when it fails to exlusively create a // file which already exists os.Remove(tempName) } } else { blocks = file.Blocks } s := sharedPullerState{ file: file, folder: p.folder, tempName: tempName, realName: realName, copyTotal: len(blocks), copyNeeded: len(blocks), reused: reused, } if debug { l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused) } cs := copyBlocksState{ sharedPullerState: &s, blocks: blocks, } copyChan <- cs }