// Make sure that the copier routine hashes the content when asked, and pulls
// if it fails to find the block.
func TestLastResortPulling(t *testing.T) {
	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
	m.AddFolder(defaultFolderConfig)

	// Add a file to index (with the incorrect block representation, as content
	// doesn't actually match the block list)
	file := protocol.FileInfo{
		Name:     "empty",
		Flags:    0,
		Modified: 0,
		Blocks:   []protocol.BlockInfo{blocks[0]},
	}
	m.updateLocals("default", []protocol.FileInfo{file})

	// Pretend that we are handling a new file of the same content but
	// with a different name (causing to copy that particular block)
	file.Name = "newfile"

	iterFn := func(folder, file string, index int32) bool {
		return true
	}

	// Check that that particular block is there
	if !m.finder.Iterate(blocks[0].Hash, iterFn) {
		t.Error("Expected block not found")
	}

	p := rwFolder{
		folder: "default",
		dir:    "testdata",
		model:  m,
	}

	copyChan := make(chan copyBlocksState)
	pullChan := make(chan pullBlockState, 1)
	finisherChan := make(chan *sharedPullerState, 1)

	// Run a single copier routine
	go p.copierRoutine(copyChan, pullChan, finisherChan)

	p.handleFile(file, copyChan, finisherChan)

	// Copier should hash empty file, realise that the region it has read
	// doesn't match the hash which was advertised by the block map, fix it
	// and ask to pull the block.
	<-pullChan

	// Verify that it did fix the incorrect hash.
	if m.finder.Iterate(blocks[0].Hash, iterFn) {
		t.Error("Found unexpected block")
	}

	if !m.finder.Iterate(scanner.SHA256OfNothing, iterFn) {
		t.Error("Expected block not found")
	}

	(<-finisherChan).fd.Close()
	os.Remove(filepath.Join("testdata", defTempNamer.TempName("newfile")))
}
Exemple #2
0
func ldbReplaceWithDelete(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) int64 {
	return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) int64 {
		var tf FileInfoTruncated
		err := tf.UnmarshalXDR(dbi.Value())
		if err != nil {
			panic(err)
		}
		if !tf.IsDeleted() {
			if debugDB {
				l.Debugf("mark deleted; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
			}
			ts := clock(tf.LocalVersion)
			f := protocol.FileInfo{
				Name:         tf.Name,
				Version:      lamport.Default.Tick(tf.Version),
				LocalVersion: ts,
				Flags:        tf.Flags | protocol.FlagDeleted,
				Modified:     tf.Modified,
			}
			bs, _ := f.MarshalXDR()
			if debugDB {
				l.Debugf("batch.Put %p %x", batch, dbi.Key())
			}
			batch.Put(dbi.Key(), bs)
			ldbUpdateGlobal(db, batch, folder, device, deviceKeyName(dbi.Key()), f.Version)
			return ts
		}
		return 0
	})
}
Exemple #3
0
func (s *FolderStatisticsReference) ReceivedFile(file protocol.FileInfo) {
	if debug {
		l.Debugln("stats.FolderStatisticsReference.ReceivedFile:", s.folder, file)
	}
	s.ns.PutTime("lastFileAt", time.Now())
	s.ns.PutString("lastFileName", file.Name)
	s.ns.PutBool("lastFileDeleted", file.IsDeleted())
}
Exemple #4
0
func main() {
	log.SetFlags(0)
	log.SetOutput(os.Stdout)

	flag.Parse()

	ldb, err := leveldb.OpenFile(flag.Arg(0), &opt.Options{
		ErrorIfMissing:         true,
		Strict:                 opt.StrictAll,
		OpenFilesCacheCapacity: 100,
	})
	if err != nil {
		log.Fatal(err)
	}

	it := ldb.NewIterator(nil, nil)
	var dev protocol.DeviceID
	for it.Next() {
		key := it.Key()
		switch key[0] {
		case db.KeyTypeDevice:
			folder := nulString(key[1 : 1+64])
			devBytes := key[1+64 : 1+64+32]
			name := nulString(key[1+64+32:])
			copy(dev[:], devBytes)
			fmt.Printf("[device] F:%q N:%q D:%v\n", folder, name, dev)

			var f protocol.FileInfo
			err := f.UnmarshalXDR(it.Value())
			if err != nil {
				log.Fatal(err)
			}
			fmt.Printf("  N:%q\n  F:%#o\n  M:%d\n  V:%v\n  S:%d\n  B:%d\n", f.Name, f.Flags, f.Modified, f.Version, f.Size(), len(f.Blocks))

		case db.KeyTypeGlobal:
			folder := nulString(key[1 : 1+64])
			name := nulString(key[1+64:])
			fmt.Printf("[global] F:%q N:%q V:%x\n", folder, name, it.Value())

		case db.KeyTypeBlock:
			folder := nulString(key[1 : 1+64])
			hash := key[1+64 : 1+64+32]
			name := nulString(key[1+64+32:])
			fmt.Printf("[block] F:%q H:%x N:%q I:%d\n", folder, hash, name, binary.BigEndian.Uint32(it.Value()))

		case db.KeyTypeDeviceStatistic:
			fmt.Printf("[dstat]\n  %x\n  %x\n", it.Key(), it.Value())

		case db.KeyTypeFolderStatistic:
			fmt.Printf("[fstat]\n  %x\n  %x\n", it.Key(), it.Value())

		default:
			fmt.Printf("[???]\n  %x\n  %x\n", it.Key(), it.Value())
		}
	}
}
Exemple #5
0
func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
	if truncate {
		var tf FileInfoTruncated
		err := tf.UnmarshalXDR(bs)
		return tf, err
	} else {
		var tf protocol.FileInfo
		err := tf.UnmarshalXDR(bs)
		return tf, err
	}
}
Exemple #6
0
// shortcutSymlink changes the symlinks type if necessary.
func (p *rwFolder) shortcutSymlink(file protocol.FileInfo) (err error) {
	tt := symlinks.TargetFile
	if file.IsDirectory() {
		tt = symlinks.TargetDirectory
	}
	err = symlinks.ChangeType(filepath.Join(p.dir, file.Name), tt)
	if err != nil {
		l.Infof("Puller (folder %q, file %q): symlink shortcut: %v", p.folder, file.Name, err)
		p.newError(file.Name, err)
	}
	return
}
Exemple #7
0
// Test that updating a file removes it's old blocks from the blockmap
func TestCopierCleanup(t *testing.T) {
	iterFn := func(folder, file string, index int32) bool {
		return true
	}

	fcfg := config.FolderConfiguration{ID: "default", Path: "testdata"}
	cfg := config.Configuration{Folders: []config.FolderConfiguration{fcfg}}

	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
	m := NewModel(config.Wrap("/tmp/test", cfg), "device", "syncthing", "dev", db)
	m.AddFolder(fcfg)

	// Create a file
	file := protocol.FileInfo{
		Name:     "test",
		Flags:    0,
		Modified: 0,
		Blocks:   []protocol.BlockInfo{blocks[0]},
	}

	// Add file to index
	m.updateLocal("default", file)

	if !m.finder.Iterate(blocks[0].Hash, iterFn) {
		t.Error("Expected block not found")
	}

	file.Blocks = []protocol.BlockInfo{blocks[1]}
	file.Version++
	// Update index (removing old blocks)
	m.updateLocal("default", file)

	if m.finder.Iterate(blocks[0].Hash, iterFn) {
		t.Error("Unexpected block found")
	}

	if !m.finder.Iterate(blocks[1].Hash, iterFn) {
		t.Error("Expected block not found")
	}

	file.Blocks = []protocol.BlockInfo{blocks[0]}
	file.Version++
	// Update index (removing old blocks)
	m.updateLocal("default", file)

	if !m.finder.Iterate(blocks[0].Hash, iterFn) {
		t.Error("Unexpected block found")
	}

	if m.finder.Iterate(blocks[1].Hash, iterFn) {
		t.Error("Expected block not found")
	}
}
Exemple #8
0
func (m *Model) updateLocal(folder string, f protocol.FileInfo) {
	f.LocalVersion = 0
	m.fmut.RLock()
	m.folderFiles[folder].Update(protocol.LocalDeviceID, []protocol.FileInfo{f})
	m.fmut.RUnlock()
	events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
		"folder":   folder,
		"name":     f.Name,
		"modified": time.Unix(f.Modified, 0),
		"flags":    fmt.Sprintf("0%o", f.Flags),
		"size":     f.Size(),
	})
}
// Test that updating a file removes it's old blocks from the blockmap
func TestCopierCleanup(t *testing.T) {
	iterFn := func(folder, file string, index int32) bool {
		return true
	}

	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
	m.AddFolder(defaultFolderConfig)

	// Create a file
	file := protocol.FileInfo{
		Name:     "test",
		Flags:    0,
		Modified: 0,
		Blocks:   []protocol.BlockInfo{blocks[0]},
	}

	// Add file to index
	m.updateLocals("default", []protocol.FileInfo{file})

	if !m.finder.Iterate(blocks[0].Hash, iterFn) {
		t.Error("Expected block not found")
	}

	file.Blocks = []protocol.BlockInfo{blocks[1]}
	file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
	// Update index (removing old blocks)
	m.updateLocals("default", []protocol.FileInfo{file})

	if m.finder.Iterate(blocks[0].Hash, iterFn) {
		t.Error("Unexpected block found")
	}

	if !m.finder.Iterate(blocks[1].Hash, iterFn) {
		t.Error("Expected block not found")
	}

	file.Blocks = []protocol.BlockInfo{blocks[0]}
	file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
	// Update index (removing old blocks)
	m.updateLocals("default", []protocol.FileInfo{file})

	if !m.finder.Iterate(blocks[0].Hash, iterFn) {
		t.Error("Unexpected block found")
	}

	if m.finder.Iterate(blocks[1].Hash, iterFn) {
		t.Error("Expected block not found")
	}
}
Exemple #10
0
// shortcutFile sets file mode and modification time, when that's the only
// thing that has changed.
func (p *rwFolder) shortcutFile(file protocol.FileInfo) error {
	realName := filepath.Join(p.dir, file.Name)
	if !p.ignorePermissions(file) {
		if err := os.Chmod(realName, os.FileMode(file.Flags&0777)); err != nil {
			l.Infof("Puller (folder %q, file %q): shortcut: chmod: %v", p.folder, file.Name, err)
			p.newError(file.Name, err)
			return err
		}
	}

	t := time.Unix(file.Modified, 0)
	if err := os.Chtimes(realName, t, t); err != nil {
		// Try using virtual mtimes
		info, err := os.Stat(realName)
		if err != nil {
			l.Infof("Puller (folder %q, file %q): shortcut: unable to stat file: %v", p.folder, file.Name, err)
			p.newError(file.Name, err)
			return err
		}

		p.virtualMtimeRepo.UpdateMtime(file.Name, info.ModTime(), t)
	}

	// This may have been a conflict. We should merge the version vectors so
	// that our clock doesn't move backwards.
	if cur, ok := p.model.CurrentFolderFile(p.folder, file.Name); ok {
		file.Version = file.Version.Merge(cur.Version)
	}

	return nil
}
Exemple #11
0
func ldbGet(db *leveldb.DB, folder, device, file []byte) (protocol.FileInfo, bool) {
	nk := deviceKey(folder, device, file)
	bs, err := db.Get(nk, nil)
	if err == leveldb.ErrNotFound {
		return protocol.FileInfo{}, false
	}
	if err != nil {
		panic(err)
	}

	var f protocol.FileInfo
	err = f.UnmarshalXDR(bs)
	if err != nil {
		panic(err)
	}
	return f, true
}
Exemple #12
0
func ldbInsert(batch dbWriter, folder, device []byte, file protocol.FileInfo) int64 {
	if debugDB {
		l.Debugf("insert; folder=%q device=%v %v", folder, protocol.DeviceIDFromBytes(device), file)
	}

	if file.LocalVersion == 0 {
		file.LocalVersion = clock(0)
	}

	name := []byte(file.Name)
	nk := deviceKey(folder, device, name)
	if debugDB {
		l.Debugf("batch.Put %p %x", batch, nk)
	}
	batch.Put(nk, file.MustMarshalXDR())

	return file.LocalVersion
}
Exemple #13
0
func SymlinkTypeEqual(disk symlinks.TargetType, f protocol.FileInfo) bool {
	// If the target is missing, Unix never knows what type of symlink it is
	// and Windows always knows even if there is no target. Which means that
	// without this special check a Unix node would be fighting with a Windows
	// node about whether or not the target is known. Basically, if you don't
	// know and someone else knows, just accept it. The fact that you don't
	// know means you are on Unix, and on Unix you don't really care what the
	// target type is. The moment you do know, and if something doesn't match,
	// that will propagate through the cluster.
	switch disk {
	case symlinks.TargetUnknown:
		return true
	case symlinks.TargetDirectory:
		return f.IsDirectory() && f.Flags&protocol.FlagSymlinkMissingTarget == 0
	case symlinks.TargetFile:
		return !f.IsDirectory() && f.Flags&protocol.FlagSymlinkMissingTarget == 0
	}
	panic("unknown symlink TargetType")
}
Exemple #14
0
// deleteFile attempts to delete the given file
func (p *rwFolder) deleteFile(file protocol.FileInfo) {
	var err error
	events.Default.Log(events.ItemStarted, map[string]string{
		"folder": p.folder,
		"item":   file.Name,
		"type":   "file",
		"action": "delete",
	})
	defer func() {
		events.Default.Log(events.ItemFinished, map[string]interface{}{
			"folder": p.folder,
			"item":   file.Name,
			"error":  events.Error(err),
			"type":   "file",
			"action": "delete",
		})
	}()

	realName := filepath.Join(p.dir, file.Name)

	cur, ok := p.model.CurrentFolderFile(p.folder, file.Name)
	if ok && p.inConflict(cur.Version, file.Version) {
		// There is a conflict here. Move the file to a conflict copy instead
		// of deleting. Also merge with the version vector we had, to indicate
		// we have resolved the conflict.
		file.Version = file.Version.Merge(cur.Version)
		err = osutil.InWritableDir(moveForConflict, realName)
	} else if p.versioner != nil {
		err = osutil.InWritableDir(p.versioner.Archive, realName)
	} else {
		err = osutil.InWritableDir(osutil.Remove, realName)
	}

	if err == nil || os.IsNotExist(err) {
		// It was removed or it doesn't exist to start with
		p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteFile}
	} else if _, serr := os.Lstat(realName); serr != nil && !os.IsPermission(serr) {
		// We get an error just looking at the file, and it's not a permission
		// problem. Lets assume the error is in fact some variant of "file
		// does not exist" (possibly expressed as some parent being a file and
		// not a directory etc) and that the delete is handled.
		p.dbUpdates <- dbUpdateJob{file, dbUpdateDeleteFile}
	} else {
		l.Infof("Puller (folder %q, file %q): delete: %v", p.folder, file.Name, err)
		p.newError(file.Name, err)
	}
}
Exemple #15
0
// handleFile queues the copies and pulls as necessary for a single new or
// changed file.
func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
	curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name)

	if ok && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
		// We are supposed to copy the entire file, and then fetch nothing. We
		// are only updating metadata, so we don't actually *need* to make the
		// copy.
		if debug {
			l.Debugln(p, "taking shortcut on", file.Name)
		}

		events.Default.Log(events.ItemStarted, map[string]string{
			"folder": p.folder,
			"item":   file.Name,
			"type":   "file",
			"action": "metadata",
		})

		p.queue.Done(file.Name)

		var err error
		if file.IsSymlink() {
			err = p.shortcutSymlink(file)
		} else {
			err = p.shortcutFile(file)
		}

		events.Default.Log(events.ItemFinished, map[string]interface{}{
			"folder": p.folder,
			"item":   file.Name,
			"error":  events.Error(err),
			"type":   "file",
			"action": "metadata",
		})

		if err != nil {
			l.Infoln("Puller: shortcut:", err)
			p.newError(file.Name, err)
		} else {
			p.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile}
		}

		return
	}

	events.Default.Log(events.ItemStarted, map[string]string{
		"folder": p.folder,
		"item":   file.Name,
		"type":   "file",
		"action": "update",
	})

	scanner.PopulateOffsets(file.Blocks)

	// Figure out the absolute filenames we need once and for all
	tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name))
	realName := filepath.Join(p.dir, file.Name)

	reused := 0
	var blocks []protocol.BlockInfo

	// Check for an old temporary file which might have some blocks we could
	// reuse.
	tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize)
	if err == nil {
		// Check for any reusable blocks in the temp file
		tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)

		// block.String() returns a string unique to the block
		existingBlocks := make(map[string]struct{}, len(tempCopyBlocks))
		for _, block := range tempCopyBlocks {
			existingBlocks[block.String()] = struct{}{}
		}

		// Since the blocks are already there, we don't need to get them.
		for _, block := range file.Blocks {
			_, ok := existingBlocks[block.String()]
			if !ok {
				blocks = append(blocks, block)
			}
		}

		// The sharedpullerstate will know which flags to use when opening the
		// temp file depending if we are reusing any blocks or not.
		reused = len(file.Blocks) - len(blocks)
		if reused == 0 {
			// Otherwise, discard the file ourselves in order for the
			// sharedpuller not to panic when it fails to exclusively create a
			// file which already exists
			os.Remove(tempName)
		}
	} else {
		blocks = file.Blocks
	}

	s := sharedPullerState{
		file:        file,
		folder:      p.folder,
		tempName:    tempName,
		realName:    realName,
		copyTotal:   len(blocks),
		copyNeeded:  len(blocks),
		reused:      reused,
		ignorePerms: p.ignorePermissions(file),
		version:     curFile.Version,
		mut:         sync.NewMutex(),
	}

	if debug {
		l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused)
	}

	cs := copyBlocksState{
		sharedPullerState: &s,
		blocks:            blocks,
	}
	copyChan <- cs
}
Exemple #16
0
func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.WalkFunc {
	now := time.Now()
	return func(p string, info os.FileInfo, err error) error {
		// Return value used when we are returning early and don't want to
		// process the item. For directories, this means do-not-descend.
		var skip error // nil
		// info nil when error is not nil
		if info != nil && info.IsDir() {
			skip = filepath.SkipDir
		}

		if err != nil {
			if debug {
				l.Debugln("error:", p, info, err)
			}
			return skip
		}

		rn, err := filepath.Rel(w.Dir, p)
		if err != nil {
			if debug {
				l.Debugln("rel error:", p, err)
			}
			return skip
		}

		if rn == "." {
			return nil
		}

		mtime := info.ModTime()
		if w.MtimeRepo != nil {
			mtime = w.MtimeRepo.GetMtime(rn, mtime)
		}

		if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) {
			// A temporary file
			if debug {
				l.Debugln("temporary:", rn)
			}
			if info.Mode().IsRegular() && mtime.Add(w.TempLifetime).Before(now) {
				os.Remove(p)
				if debug {
					l.Debugln("removing temporary:", rn, mtime)
				}
			}
			return nil
		}

		if sn := filepath.Base(rn); sn == ".stignore" || sn == ".stfolder" ||
			strings.HasPrefix(rn, ".stversions") || (w.Matcher != nil && w.Matcher.Match(rn)) {
			// An ignored file
			if debug {
				l.Debugln("ignored:", rn)
			}
			return skip
		}

		if !utf8.ValidString(rn) {
			l.Warnf("File name %q is not in UTF8 encoding; skipping.", rn)
			return skip
		}

		var normalizedRn string
		if runtime.GOOS == "darwin" {
			// Mac OS X file names should always be NFD normalized.
			normalizedRn = norm.NFD.String(rn)
		} else {
			// Every other OS in the known universe uses NFC or just plain
			// doesn't bother to define an encoding. In our case *we* do care,
			// so we enforce NFC regardless.
			normalizedRn = norm.NFC.String(rn)
		}

		if rn != normalizedRn {
			// The file name was not normalized.

			if !w.AutoNormalize {
				// We're not authorized to do anything about it, so complain and skip.

				l.Warnf("File name %q is not in the correct UTF8 normalization form; skipping.", rn)
				return skip
			}

			// We will attempt to normalize it.
			normalizedPath := filepath.Join(w.Dir, normalizedRn)
			if _, err := osutil.Lstat(normalizedPath); os.IsNotExist(err) {
				// Nothing exists with the normalized filename. Good.
				if err = os.Rename(p, normalizedPath); err != nil {
					l.Infof(`Error normalizing UTF8 encoding of file "%s": %v`, rn, err)
					return skip
				}
				l.Infof(`Normalized UTF8 encoding of file name "%s".`, rn)
			} else {
				// There is something already in the way at the normalized
				// file name.
				l.Infof(`File "%s" has UTF8 encoding conflict with another file; ignoring.`, rn)
				return skip
			}

			rn = normalizedRn
		}

		var cf protocol.FileInfo
		var ok bool

		// Index wise symlinks are always files, regardless of what the target
		// is, because symlinks carry their target path as their content.
		if info.Mode()&os.ModeSymlink == os.ModeSymlink {
			// If the target is a directory, do NOT descend down there. This
			// will cause files to get tracked, and removing the symlink will
			// as a result remove files in their real location.
			if !symlinks.Supported {
				return skip
			}

			// We always rehash symlinks as they have no modtime or
			// permissions. We check if they point to the old target by
			// checking that their existing blocks match with the blocks in
			// the index.

			target, targetType, err := symlinks.Read(p)
			if err != nil {
				if debug {
					l.Debugln("readlink error:", p, err)
				}
				return skip
			}

			blocks, err := Blocks(strings.NewReader(target), w.BlockSize, 0, nil)
			if err != nil {
				if debug {
					l.Debugln("hash link error:", p, err)
				}
				return skip
			}

			if w.CurrentFiler != nil {
				// A symlink is "unchanged", if
				//  - it exists
				//  - it wasn't deleted (because it isn't now)
				//  - it was a symlink
				//  - it wasn't invalid
				//  - the symlink type (file/dir) was the same
				//  - the block list (i.e. hash of target) was the same
				cf, ok = w.CurrentFiler.CurrentFile(rn)
				if ok && !cf.IsDeleted() && cf.IsSymlink() && !cf.IsInvalid() && SymlinkTypeEqual(targetType, cf) && BlocksEqual(cf.Blocks, blocks) {
					return skip
				}
			}

			f := protocol.FileInfo{
				Name:     rn,
				Version:  cf.Version.Update(w.ShortID),
				Flags:    uint32(protocol.FlagSymlink | protocol.FlagNoPermBits | 0666 | SymlinkFlags(targetType)),
				Modified: 0,
				Blocks:   blocks,
			}

			if debug {
				l.Debugln("symlink changedb:", p, f)
			}

			dchan <- f

			return skip
		}

		if info.Mode().IsDir() {
			if w.CurrentFiler != nil {
				// A directory is "unchanged", if it
				//  - exists
				//  - has the same permissions as previously, unless we are ignoring permissions
				//  - was not marked deleted (since it apparently exists now)
				//  - was a directory previously (not a file or something else)
				//  - was not a symlink (since it's a directory now)
				//  - was not invalid (since it looks valid now)
				cf, ok = w.CurrentFiler.CurrentFile(rn)
				permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, uint32(info.Mode()))
				if ok && permUnchanged && !cf.IsDeleted() && cf.IsDirectory() && !cf.IsSymlink() && !cf.IsInvalid() {
					return nil
				}
			}

			flags := uint32(protocol.FlagDirectory)
			if w.IgnorePerms {
				flags |= protocol.FlagNoPermBits | 0777
			} else {
				flags |= uint32(info.Mode() & maskModePerm)
			}
			f := protocol.FileInfo{
				Name:     rn,
				Version:  cf.Version.Update(w.ShortID),
				Flags:    flags,
				Modified: mtime.Unix(),
			}
			if debug {
				l.Debugln("dir:", p, f)
			}
			dchan <- f
			return nil
		}

		if info.Mode().IsRegular() {
			curMode := uint32(info.Mode())
			if runtime.GOOS == "windows" && osutil.IsWindowsExecutable(rn) {
				curMode |= 0111
			}

			if w.CurrentFiler != nil {
				// A file is "unchanged", if it
				//  - exists
				//  - has the same permissions as previously, unless we are ignoring permissions
				//  - was not marked deleted (since it apparently exists now)
				//  - had the same modification time as it has now
				//  - was not a directory previously (since it's a file now)
				//  - was not a symlink (since it's a file now)
				//  - was not invalid (since it looks valid now)
				//  - has the same size as previously
				cf, ok = w.CurrentFiler.CurrentFile(rn)
				permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, curMode)
				if ok && permUnchanged && !cf.IsDeleted() && cf.Modified == mtime.Unix() && !cf.IsDirectory() &&
					!cf.IsSymlink() && !cf.IsInvalid() && cf.Size() == info.Size() {
					return nil
				}

				if debug {
					l.Debugln("rescan:", cf, mtime.Unix(), info.Mode()&os.ModePerm)
				}
			}

			var flags = curMode & uint32(maskModePerm)
			if w.IgnorePerms {
				flags = protocol.FlagNoPermBits | 0666
			}

			f := protocol.FileInfo{
				Name:       rn,
				Version:    cf.Version.Update(w.ShortID),
				Flags:      flags,
				Modified:   mtime.Unix(),
				CachedSize: info.Size(),
			}
			if debug {
				l.Debugln("to hash:", p, f)
			}
			fchan <- f
		}

		return nil
	}
}
Exemple #17
0
// handleFile queues the copies and pulls as necessary for a single new or
// changed file.
func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
	curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name)

	if ok && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
		// We are supposed to copy the entire file, and then fetch nothing. We
		// are only updating metadata, so we don't actually *need* to make the
		// copy.
		if debug {
			l.Debugln(p, "taking shortcut on", file.Name)
		}
		p.queue.Done(file.Name)
		if file.IsSymlink() {
			p.shortcutSymlink(file)
		} else {
			p.shortcutFile(file)
		}
		return
	}

	scanner.PopulateOffsets(file.Blocks)

	// Figure out the absolute filenames we need once and for all
	tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name))
	realName := filepath.Join(p.dir, file.Name)

	reused := 0
	var blocks []protocol.BlockInfo

	// Check for an old temporary file which might have some blocks we could
	// reuse.
	tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize)
	if err == nil {
		// Check for any reusable blocks in the temp file
		tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)

		// block.String() returns a string unique to the block
		existingBlocks := make(map[string]bool, len(tempCopyBlocks))
		for _, block := range tempCopyBlocks {
			existingBlocks[block.String()] = true
		}

		// Since the blocks are already there, we don't need to get them.
		for _, block := range file.Blocks {
			_, ok := existingBlocks[block.String()]
			if !ok {
				blocks = append(blocks, block)
			}
		}

		// The sharedpullerstate will know which flags to use when opening the
		// temp file depending if we are reusing any blocks or not.
		reused = len(file.Blocks) - len(blocks)
		if reused == 0 {
			// Otherwise, discard the file ourselves in order for the
			// sharedpuller not to panic when it fails to exlusively create a
			// file which already exists
			os.Remove(tempName)
		}
	} else {
		blocks = file.Blocks
	}

	s := sharedPullerState{
		file:       file,
		folder:     p.folder,
		tempName:   tempName,
		realName:   realName,
		copyTotal:  len(blocks),
		copyNeeded: len(blocks),
		reused:     reused,
	}

	if debug {
		l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused)
	}

	cs := copyBlocksState{
		sharedPullerState: &s,
		blocks:            blocks,
	}
	copyChan <- cs
}
Exemple #18
0
// ldbUpdateGlobal adds this device+version to the version list for the given
// file. If the device is already present in the list, the version is updated.
// If the file does not have an entry in the global list, it is created.
func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device []byte, file protocol.FileInfo) bool {
	if debugDB {
		l.Debugf("update global; folder=%q device=%v file=%q version=%d", folder, protocol.DeviceIDFromBytes(device), file.Name, file.Version)
	}
	name := []byte(file.Name)
	gk := globalKey(folder, name)
	svl, err := db.Get(gk, nil)
	if err != nil && err != leveldb.ErrNotFound {
		panic(err)
	}

	var fl versionList

	// Remove the device from the current version list
	if svl != nil {
		err = fl.UnmarshalXDR(svl)
		if err != nil {
			panic(err)
		}

		for i := range fl.versions {
			if bytes.Compare(fl.versions[i].device, device) == 0 {
				if fl.versions[i].version.Equal(file.Version) {
					// No need to do anything
					return false
				}
				fl.versions = append(fl.versions[:i], fl.versions[i+1:]...)
				break
			}
		}
	}

	nv := fileVersion{
		device:  device,
		version: file.Version,
	}

	// Find a position in the list to insert this file. The file at the front
	// of the list is the newer, the "global".
	for i := range fl.versions {
		switch fl.versions[i].version.Compare(file.Version) {
		case protocol.Equal, protocol.Lesser:
			// The version at this point in the list is equal to or lesser
			// ("older") than us. We insert ourselves in front of it.
			fl.versions = insertVersion(fl.versions, i, nv)
			goto done

		case protocol.ConcurrentLesser, protocol.ConcurrentGreater:
			// The version at this point is in conflict with us. We must pull
			// the actual file metadata to determine who wins. If we win, we
			// insert ourselves in front of the loser here. (The "Lesser" and
			// "Greater" in the condition above is just based on the device
			// IDs in the version vector, which is not the only thing we use
			// to determine the winner.)
			of, ok := ldbGet(db, folder, fl.versions[i].device, name)
			if !ok {
				panic("file referenced in version list does not exist")
			}
			if file.WinsConflict(of) {
				fl.versions = insertVersion(fl.versions, i, nv)
				goto done
			}
		}
	}

	// We didn't find a position for an insert above, so append to the end.
	fl.versions = append(fl.versions, nv)

done:
	if debugDB {
		l.Debugf("batch.Put %p %x", batch, gk)
		l.Debugf("new global after update: %v", fl)
	}
	batch.Put(gk, fl.MustMarshalXDR())

	return true
}