func (p *rwFolder) finisherRoutine(in <-chan *sharedPullerState) { for state := range in { if closed, err := state.finalClose(); closed { if debug { l.Debugln(p, "closing", state.file.Name) } p.queue.Done(state.file.Name) if err == nil { err = p.performFinish(state) } if err != nil { l.Infoln("Puller: final:", err) p.newError(state.file.Name, err) } events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": state.file.Name, "error": events.Error(err), "type": "file", "action": "update", }) if p.progressEmitter != nil { p.progressEmitter.Deregister(state) } } } }
// deleteFile attempts to delete the given file func (p *rwFolder) deleteFile(file protocol.FileInfo) { var err error events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "file", "action": "delete", }) defer func() { events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": file.Name, "error": events.Error(err), "type": "file", "action": "delete", }) }() realName := filepath.Join(p.dir, file.Name) cur, ok := p.model.CurrentFolderFile(p.folder, file.Name) if ok && p.inConflict(cur.Version, file.Version) { // There is a conflict here. Move the file to a conflict copy instead // of deleting. Also merge with the version vector we had, to indicate // we have resolved the conflict. file.Version = file.Version.Merge(cur.Version) err = osutil.InWritableDir(moveForConflict, realName) } else if p.versioner != nil { err = osutil.InWritableDir(p.versioner.Archive, realName) } else { err = osutil.InWritableDir(osutil.Remove, realName) } if err == nil || os.IsNotExist(err) { // It was removed or it doesn't exist to start with p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteFile} } else if _, serr := os.Lstat(realName); serr != nil && !os.IsPermission(serr) { // We get an error just looking at the file, and it's not a permission // problem. Lets assume the error is in fact some variant of "file // does not exist" (possibly expressed as some parent being a file and // not a directory etc) and that the delete is handled. p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteFile} } else { l.Infof("Puller (folder %q, file %q): delete: %v", p.folder, file.Name, err) p.newError(file.Name, err) } }
// deleteDir attempts to delete the given directory func (p *rwFolder) deleteDir(file protocol.FileInfo) { var err error events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "dir", "action": "delete", }) defer func() { events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": file.Name, "error": events.Error(err), "type": "dir", "action": "delete", }) }() realName := filepath.Join(p.dir, file.Name) // Delete any temporary files lying around in the directory dir, _ := os.Open(realName) if dir != nil { files, _ := dir.Readdirnames(-1) for _, file := range files { if defTempNamer.IsTemporary(file) { osutil.InWritableDir(osutil.Remove, filepath.Join(realName, file)) } } } err = osutil.InWritableDir(osutil.Remove, realName) if err == nil || os.IsNotExist(err) { // It was removed or it doesn't exist to start with p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteDir} } else if _, serr := os.Lstat(realName); serr != nil && !os.IsPermission(serr) { // We get an error just looking at the directory, and it's not a // permission problem. Lets assume the error is in fact some variant // of "file does not exist" (possibly expressed as some parent being a // file and not a directory etc) and that the delete is handled. p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteDir} } else { l.Infof("Puller (folder %q, dir %q): delete: %v", p.folder, file.Name, err) p.newError(file.Name, err) } }
// handleFile queues the copies and pulls as necessary for a single new or // changed file. func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) { curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name) if ok && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) { // We are supposed to copy the entire file, and then fetch nothing. We // are only updating metadata, so we don't actually *need* to make the // copy. if debug { l.Debugln(p, "taking shortcut on", file.Name) } events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "file", "action": "metadata", }) p.queue.Done(file.Name) var err error if file.IsSymlink() { err = p.shortcutSymlink(file) } else { err = p.shortcutFile(file) } events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": file.Name, "error": events.Error(err), "type": "file", "action": "metadata", }) if err != nil { l.Infoln("Puller: shortcut:", err) p.newError(file.Name, err) } else { p.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile} } return } events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "file", "action": "update", }) scanner.PopulateOffsets(file.Blocks) // Figure out the absolute filenames we need once and for all tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name)) realName := filepath.Join(p.dir, file.Name) reused := 0 var blocks []protocol.BlockInfo // Check for an old temporary file which might have some blocks we could // reuse. tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize) if err == nil { // Check for any reusable blocks in the temp file tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks) // block.String() returns a string unique to the block existingBlocks := make(map[string]struct{}, len(tempCopyBlocks)) for _, block := range tempCopyBlocks { existingBlocks[block.String()] = struct{}{} } // Since the blocks are already there, we don't need to get them. for _, block := range file.Blocks { _, ok := existingBlocks[block.String()] if !ok { blocks = append(blocks, block) } } // The sharedpullerstate will know which flags to use when opening the // temp file depending if we are reusing any blocks or not. reused = len(file.Blocks) - len(blocks) if reused == 0 { // Otherwise, discard the file ourselves in order for the // sharedpuller not to panic when it fails to exclusively create a // file which already exists os.Remove(tempName) } } else { blocks = file.Blocks } s := sharedPullerState{ file: file, folder: p.folder, tempName: tempName, realName: realName, copyTotal: len(blocks), copyNeeded: len(blocks), reused: reused, ignorePerms: p.ignorePermissions(file), version: curFile.Version, mut: sync.NewMutex(), } if debug { l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused) } cs := copyBlocksState{ sharedPullerState: &s, blocks: blocks, } copyChan <- cs }
// renameFile attempts to rename an existing file to a destination // and set the right attributes on it. func (p *rwFolder) renameFile(source, target protocol.FileInfo) { var err error events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": source.Name, "type": "file", "action": "delete", }) events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": target.Name, "type": "file", "action": "update", }) defer func() { events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": source.Name, "error": events.Error(err), "type": "file", "action": "delete", }) events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": target.Name, "error": events.Error(err), "type": "file", "action": "update", }) }() if debug { l.Debugln(p, "taking rename shortcut", source.Name, "->", target.Name) } from := filepath.Join(p.dir, source.Name) to := filepath.Join(p.dir, target.Name) if p.versioner != nil { err = osutil.Copy(from, to) if err == nil { err = osutil.InWritableDir(p.versioner.Archive, from) } } else { err = osutil.TryRename(from, to) } if err == nil { // The file was renamed, so we have handled both the necessary delete // of the source and the creation of the target. Fix-up the metadata, // and update the local index of the target file. p.dbUpdates <- dbUpdateJob{source, dbUpdateDeleteFile} err = p.shortcutFile(target) if err != nil { l.Infof("Puller (folder %q, file %q): rename from %q metadata: %v", p.folder, target.Name, source.Name, err) p.newError(target.Name, err) return } p.dbUpdates <- dbUpdateJob{target, dbUpdateHandleFile} } else { // We failed the rename so we have a source file that we still need to // get rid of. Attempt to delete it instead so that we make *some* // progress. The target is unhandled. err = osutil.InWritableDir(osutil.Remove, from) if err != nil { l.Infof("Puller (folder %q, file %q): delete %q after failed rename: %v", p.folder, target.Name, source.Name, err) p.newError(target.Name, err) return } p.dbUpdates <- dbUpdateJob{source, dbUpdateDeleteFile} } }
// handleDir creates or updates the given directory func (p *rwFolder) handleDir(file protocol.FileInfo) { var err error events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "dir", "action": "update", }) defer func() { events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": file.Name, "error": events.Error(err), "type": "dir", "action": "update", }) }() realName := filepath.Join(p.dir, file.Name) mode := os.FileMode(file.Flags & 0777) if p.ignorePermissions(file) { mode = 0777 } if debug { curFile, _ := p.model.CurrentFolderFile(p.folder, file.Name) l.Debugf("need dir\n\t%v\n\t%v", file, curFile) } info, err := osutil.Lstat(realName) switch { // There is already something under that name, but it's a file/link. // Most likely a file/link is getting replaced with a directory. // Remove the file/link and fall through to directory creation. case err == nil && (!info.IsDir() || info.Mode()&os.ModeSymlink != 0): err = osutil.InWritableDir(osutil.Remove, realName) if err != nil { l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err) p.newError(file.Name, err) return } fallthrough // The directory doesn't exist, so we create it with the right // mode bits from the start. case err != nil && os.IsNotExist(err): // We declare a function that acts on only the path name, so // we can pass it to InWritableDir. We use a regular Mkdir and // not MkdirAll because the parent should already exist. mkdir := func(path string) error { err = os.Mkdir(path, mode) if err != nil || p.ignorePermissions(file) { return err } // Stat the directory so we can check its permissions. info, err := osutil.Lstat(path) if err != nil { return err } // Mask for the bits we want to preserve and add them in to the // directories permissions. return os.Chmod(path, mode|(info.Mode()&retainBits)) } if err = osutil.InWritableDir(mkdir, realName); err == nil { p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir} } else { l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err) p.newError(file.Name, err) } return // Weird error when stat()'ing the dir. Probably won't work to do // anything else with it if we can't even stat() it. case err != nil: l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err) p.newError(file.Name, err) return } // The directory already exists, so we just correct the mode bits. (We // don't handle modification times on directories, because that sucks...) // It's OK to change mode bits on stuff within non-writable directories. if p.ignorePermissions(file) { p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir} } else if err := os.Chmod(realName, mode|(info.Mode()&retainBits)); err == nil { p.dbUpdates <- dbUpdateJob{file, dbUpdateHandleDir} } else { l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err) p.newError(file.Name, err) } }