func (p *Puller) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) { for state := range in { if state.failed() != nil { continue } // Get an fd to the temporary file. Tehcnically we don't need it until // after fetching the block, but if we run into an error here there is // no point in issuing the request to the network. fd, err := state.tempFile() if err != nil { continue } var lastError error potentialDevices := p.model.availability(p.folder, state.file.Name) for { // Select the least busy device to pull the block from. If we found no // feasible device at all, fail the block (and in the long run, the // file). selected := activity.leastBusy(potentialDevices) if selected == (protocol.DeviceID{}) { if lastError != nil { state.fail("pull", lastError) } else { state.fail("pull", errNoDevice) } break } potentialDevices = removeDevice(potentialDevices, selected) // Fetch the block, while marking the selected device as in use so that // leastBusy can select another device when someone else asks. activity.using(selected) buf, lastError := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash) activity.done(selected) if lastError != nil { continue } // Verify that the received block matches the desired hash, if not // try pulling it from another device. _, lastError = scanner.VerifyBuffer(buf, state.block) if lastError != nil { continue } // Save the block data we got from the cluster _, err = fd.WriteAt(buf, state.block.Offset) if err != nil { state.fail("save", err) } else { state.pullDone() } break } out <- state.sharedPullerState } }
func (p *rwFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) { for state := range in { if state.failed() != nil { out <- state.sharedPullerState continue } // Get an fd to the temporary file. Technically we don't need it until // after fetching the block, but if we run into an error here there is // no point in issuing the request to the network. fd, err := state.tempFile() if err != nil { out <- state.sharedPullerState continue } var lastError error potentialDevices := p.model.Availability(p.folder, state.file.Name) for { // Select the least busy device to pull the block from. If we found no // feasible device at all, fail the block (and in the long run, the // file). selected := activity.leastBusy(potentialDevices) if selected == (protocol.DeviceID{}) { if lastError != nil { state.fail("pull", lastError) } else { state.fail("pull", errNoDevice) } break } potentialDevices = removeDevice(potentialDevices, selected) // Fetch the block, while marking the selected device as in use so that // leastBusy can select another device when someone else asks. activity.using(selected) buf, lastError := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash, 0, nil) activity.done(selected) if lastError != nil { if debug { l.Debugln("request:", p.folder, state.file.Name, state.block.Offset, state.block.Size, "returned error:", lastError) } continue } // If this folder is set for encryption, ignore the hash (since the encrypted block will have a different hash) and go ahead and decrypt it if p.encrypt { if debug { l.Debugf("Decrypting %s/%s (S=%d o=%d)", p.folder, state.file.Name, state.block.Size, state.block.Offset) } dbuf, err := protocol.Decrypt(buf, p.key, state.block.Hash) if err != nil { l.Debugf("Error decrypting %q: %q", state.file.Name, err.Error()) } else { buf = dbuf } } // Verify that the received block matches the desired hash, if not // try pulling it from another device. _, lastError = scanner.VerifyBuffer(buf, state.block) if lastError != nil { if debug { l.Debugln("request:", p.folder, state.file.Name, state.block.Offset, state.block.Size, "hash mismatch") } continue } // Save the block data we got from the cluster _, err = fd.WriteAt(buf, state.block.Offset) if err != nil { state.fail("save", err) } else { state.pullDone() } break } out <- state.sharedPullerState } }
// copierRoutine reads copierStates until the in channel closes and performs // the relevant copies when possible, or passes it to the puller routine. func (p *rwFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) { buf := make([]byte, protocol.BlockSize) for state := range in { dstFd, err := state.tempFile() if err != nil { // Nothing more to do for this failed file, since we couldn't create a temporary for it. out <- state.sharedPullerState continue } if p.progressEmitter != nil { p.progressEmitter.Register(state.sharedPullerState) } folderRoots := make(map[string]string) p.model.fmut.RLock() for folder, cfg := range p.model.folderCfgs { folderRoots[folder] = cfg.Path() } p.model.fmut.RUnlock() for _, block := range state.blocks { found := p.model.finder.Iterate(block.Hash, func(folder, file string, index int32) bool { buf = buf[:int(block.Size)] fd, err := os.Open(filepath.Join(folderRoots[folder], file)) if err != nil { return false } _, err = fd.ReadAt(buf, protocol.BlockSize*int64(index)) fd.Close() if err != nil { return false } hash, err := scanner.VerifyBuffer(buf, block) if err != nil { if hash != nil { if debug { l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash) } err = p.model.finder.Fix(folder, file, index, block.Hash, hash) if err != nil { l.Warnln("finder fix:", err) } } else if debug { l.Debugln("Finder failed to verify buffer", err) } return false } _, err = dstFd.WriteAt(buf, block.Offset) if err != nil { state.fail("dst write", err) } if file == state.file.Name { state.copiedFromOrigin() } return true }) if state.failed() != nil { break } if !found { state.pullStarted() ps := pullBlockState{ sharedPullerState: state.sharedPullerState, block: block, } pullChan <- ps } else { state.copyDone() } } out <- state.sharedPullerState } }
// copierRoutine reads copierStates until the in channel closes and performs // the relevant copies when possible, or passes it to the puller routine. func (p *Puller) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) { buf := make([]byte, protocol.BlockSize) for state := range in { if p.progressEmitter != nil { p.progressEmitter.Register(state.sharedPullerState) } dstFd, err := state.tempFile() if err != nil { // Nothing more to do for this failed file (the error was logged // when it happened) out <- state.sharedPullerState continue } evictionChan := make(chan lfu.Eviction) fdCache := lfu.New() fdCache.UpperBound = 50 fdCache.LowerBound = 20 fdCache.EvictionChannel = evictionChan go func() { for item := range evictionChan { item.Value.(*os.File).Close() } }() folderRoots := make(map[string]string) p.model.fmut.RLock() for folder, cfg := range p.model.folderCfgs { folderRoots[folder] = cfg.Path } p.model.fmut.RUnlock() for _, block := range state.blocks { buf = buf[:int(block.Size)] found := p.model.finder.Iterate(block.Hash, func(folder, file string, index int32) bool { path := filepath.Join(folderRoots[folder], file) var fd *os.File fdi := fdCache.Get(path) if fdi != nil { fd = fdi.(*os.File) } else { fd, err = os.Open(path) if err != nil { return false } fdCache.Set(path, fd) } _, err = fd.ReadAt(buf, protocol.BlockSize*int64(index)) if err != nil { return false } hash, err := scanner.VerifyBuffer(buf, block) if err != nil { if hash != nil { if debug { l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash) } err = p.model.finder.Fix(folder, file, index, block.Hash, hash) if err != nil { l.Warnln("finder fix:", err) } } else if debug { l.Debugln("Finder failed to verify buffer", err) } return false } _, err = dstFd.WriteAt(buf, block.Offset) if err != nil { state.fail("dst write", err) } if file == state.file.Name { state.copiedFromOrigin() } return true }) if state.failed() != nil { break } if !found { state.pullStarted() ps := pullBlockState{ sharedPullerState: state.sharedPullerState, block: block, } pullChan <- ps } else { state.copyDone() } } fdCache.Evict(fdCache.Len()) close(evictionChan) out <- state.sharedPullerState } }