コード例 #1
0
ファイル: manypeers_test.go プロジェクト: wmwwmv/syncthing
func TestManyPeers(t *testing.T) {
	log.Println("Cleaning...")
	err := removeAll("s1", "s2", "h1/index*", "h2/index*")
	if err != nil {
		t.Fatal(err)
	}

	log.Println("Generating files...")
	err = generateFiles("s1", 200, 20, "../LICENSE")
	if err != nil {
		t.Fatal(err)
	}

	receiver := startInstance(t, 2)
	defer checkedStop(t, receiver)

	bs, err := receiver.Get("/rest/system/config")
	if err != nil {
		t.Fatal(err)
	}

	var cfg config.Configuration
	if err := json.Unmarshal(bs, &cfg); err != nil {
		t.Fatal(err)
	}

	for len(cfg.Devices) < 100 {
		bs := make([]byte, 16)
		ReadRand(bs)
		id := protocol.NewDeviceID(bs)
		cfg.Devices = append(cfg.Devices, config.DeviceConfiguration{DeviceID: id})
		cfg.Folders[0].Devices = append(cfg.Folders[0].Devices, config.FolderDeviceConfiguration{DeviceID: id})
	}

	osutil.Rename("h2/config.xml", "h2/config.xml.orig")
	defer osutil.Rename("h2/config.xml.orig", "h2/config.xml")

	var buf bytes.Buffer
	json.NewEncoder(&buf).Encode(cfg)
	_, err = receiver.Post("/rest/system/config", &buf)
	if err != nil {
		t.Fatal(err)
	}

	sender := startInstance(t, 1)
	defer checkedStop(t, sender)

	rc.AwaitSync("default", sender, receiver)

	log.Println("Comparing directories...")
	err = compareDirectories("s1", "s2")
	if err != nil {
		t.Fatal(err)
	}
}
コード例 #2
0
ファイル: trashcan.go プロジェクト: rwx-zwx-awx/syncthing
// Archive moves the named file away to a version archive. If this function
// returns nil, the named file does not exist any more (has been archived).
func (t *Trashcan) Archive(filePath string) error {
	_, err := osutil.Lstat(filePath)
	if os.IsNotExist(err) {
		if debug {
			l.Debugln("not archiving nonexistent file", filePath)
		}
		return nil
	} else if err != nil {
		return err
	}

	versionsDir := filepath.Join(t.folderPath, ".stversions")
	if _, err := os.Stat(versionsDir); err != nil {
		if !os.IsNotExist(err) {
			return err
		}

		if debug {
			l.Debugln("creating versions dir", versionsDir)
		}
		if err := osutil.MkdirAll(versionsDir, 0777); err != nil {
			return err
		}
		osutil.HideFile(versionsDir)
	}

	if debug {
		l.Debugln("archiving", filePath)
	}

	relativePath, err := filepath.Rel(t.folderPath, filePath)
	if err != nil {
		return err
	}

	archivedPath := filepath.Join(versionsDir, relativePath)
	if err := osutil.MkdirAll(filepath.Dir(archivedPath), 0777); err != nil && !os.IsExist(err) {
		return err
	}

	if debug {
		l.Debugln("moving to", archivedPath)
	}

	if err := osutil.Rename(filePath, archivedPath); err != nil {
		return err
	}

	// Set the mtime to the time the file was deleted. This is used by the
	// cleanout routine. If this fails things won't work optimally but there's
	// not much we can do about it so we ignore the error.
	os.Chtimes(archivedPath, time.Now(), time.Now())

	return nil
}
コード例 #3
0
ファイル: osutil_test.go プロジェクト: WeavingCode/syncthing
func TestInWritableDirWindowsRename(t *testing.T) {
	if runtime.GOOS != "windows" {
		t.Skipf("Tests not required")
		return
	}

	err := os.RemoveAll("testdata")
	if err != nil {
		t.Fatal(err)
	}
	defer os.Chmod("testdata/windows/ro/readonlynew", 0700)
	defer os.RemoveAll("testdata")

	create := func(name string) error {
		fd, err := os.Create(name)
		if err != nil {
			return err
		}
		fd.Close()
		return nil
	}

	os.Mkdir("testdata", 0700)

	os.Mkdir("testdata/windows", 0500)
	os.Mkdir("testdata/windows/ro", 0500)
	create("testdata/windows/ro/readonly")
	os.Chmod("testdata/windows/ro/readonly", 0500)

	for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
		err := os.Rename(path, path+"new")
		if err == nil {
			t.Skipf("seem like this test doesn't work here")
			return
		}
	}

	rename := func(path string) error {
		return osutil.Rename(path, path+"new")
	}

	for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} {
		err := osutil.InWritableDir(rename, path)
		if err != nil {
			t.Errorf("Unexpected error %s: %s", path, err)
		}
		_, err = os.Stat(path + "new")
		if err != nil {
			t.Errorf("Unexpected error %s: %s", path, err)
		}
	}
}
コード例 #4
0
func archiveAndSaveConfig(cfg *config.Wrapper) error {
	// To prevent previous config from being cleaned up, quickly touch it too
	now := time.Now()
	_ = os.Chtimes(cfg.ConfigPath(), now, now) // May return error on Android etc; no worries

	archivePath := cfg.ConfigPath() + fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion)
	l.Infoln("Archiving a copy of old config file format at:", archivePath)
	if err := osutil.Rename(cfg.ConfigPath(), archivePath); err != nil {
		return err
	}

	return cfg.Save()
}
コード例 #5
0
// checkConvertDatabase tries to convert an existing old (v0.11) database to
// new (v0.13) format.
func checkConvertDatabase(dbFile string) error {
	oldLoc := filepath.Join(filepath.Dir(dbFile), "index-v0.11.0.db")
	if _, err := os.Stat(oldLoc); os.IsNotExist(err) {
		// The old database file does not exist; that's ok, continue as if
		// everything succeeded.
		return nil
	} else if err != nil {
		// Any other error is weird.
		return err
	}

	// There exists a database in the old format. We run a one time
	// conversion from old to new.

	fromDb, err := leveldb.OpenFile(oldLoc, nil)
	if err != nil {
		return err
	}

	toDb, err := leveldb.OpenFile(dbFile, nil)
	if err != nil {
		return err
	}

	err = convertKeyFormat(fromDb, toDb)
	if err != nil {
		return err
	}

	err = toDb.Close()
	if err != nil {
		return err
	}

	// We've done this one, we don't want to do it again (if the user runs
	// -reset or so). We don't care too much about errors any more at this stage.
	fromDb.Close()
	osutil.Rename(oldLoc, oldLoc+".converted")

	return nil
}
コード例 #6
0
ファイル: rwfolder.go プロジェクト: jbfavre/syncthing
func (p *rwFolder) performFinish(state *sharedPullerState) error {
	// Set the correct permission bits on the new file
	if !p.ignorePermissions(state.file) {
		if err := os.Chmod(state.tempName, os.FileMode(state.file.Flags&0777)); err != nil {
			return err
		}
	}

	// Set the correct timestamp on the new file
	t := time.Unix(state.file.Modified, 0)
	if err := os.Chtimes(state.tempName, t, t); err != nil {
		// Try using virtual mtimes instead
		info, err := os.Stat(state.tempName)
		if err != nil {
			return err
		}
		p.virtualMtimeRepo.UpdateMtime(state.file.Name, info.ModTime(), t)
	}

	if stat, err := osutil.Lstat(state.realName); err == nil {
		// There is an old file or directory already in place. We need to
		// handle that.

		switch {
		case stat.IsDir() || stat.Mode()&os.ModeSymlink != 0:
			// It's a directory or a symlink. These are not versioned or
			// archived for conflicts, only removed (which of course fails for
			// non-empty directories).

			// TODO: This is the place where we want to remove temporary files
			// and future hard ignores before attempting a directory delete.
			// Should share code with p.deletDir().

			if err = osutil.InWritableDir(osutil.Remove, state.realName); err != nil {
				return err
			}

		case p.inConflict(state.version, state.file.Version):
			// The new file has been changed in conflict with the existing one. We
			// should file it away as a conflict instead of just removing or
			// archiving. Also merge with the version vector we had, to indicate
			// we have resolved the conflict.

			state.file.Version = state.file.Version.Merge(state.version)
			if err = osutil.InWritableDir(moveForConflict, state.realName); err != nil {
				return err
			}

		case p.versioner != nil:
			// If we should use versioning, let the versioner archive the old
			// file before we replace it. Archiving a non-existent file is not
			// an error.

			if err = p.versioner.Archive(state.realName); err != nil {
				return err
			}
		}
	}

	// Replace the original content with the new one
	if err := osutil.Rename(state.tempName, state.realName); err != nil {
		return err
	}

	// If it's a symlink, the target of the symlink is inside the file.
	if state.file.IsSymlink() {
		content, err := ioutil.ReadFile(state.realName)
		if err != nil {
			return err
		}

		// Remove the file, and replace it with a symlink.
		err = osutil.InWritableDir(func(path string) error {
			os.Remove(path)
			return symlinks.Create(path, string(content), state.file.Flags)
		}, state.realName)
		if err != nil {
			return err
		}
	}

	// Record the updated file in the index
	p.dbUpdates <- dbUpdateJob{state.file, dbUpdateHandleFile}
	return nil
}
コード例 #7
0
ファイル: staggered.go プロジェクト: kristallizer/syncthing
// Archive moves the named file away to a version archive. If this function
// returns nil, the named file does not exist any more (has been archived).
func (v Staggered) Archive(filePath string) error {
	if debug {
		l.Debugln("Waiting for lock on ", v.versionsPath)
	}
	v.mutex.Lock()
	defer v.mutex.Unlock()

	_, err := osutil.Lstat(filePath)
	if os.IsNotExist(err) {
		if debug {
			l.Debugln("not archiving nonexistent file", filePath)
		}
		return nil
	} else if err != nil {
		return err
	}

	if _, err := os.Stat(v.versionsPath); err != nil {
		if os.IsNotExist(err) {
			if debug {
				l.Debugln("creating versions dir", v.versionsPath)
			}
			osutil.MkdirAll(v.versionsPath, 0755)
			osutil.HideFile(v.versionsPath)
		} else {
			return err
		}
	}

	if debug {
		l.Debugln("archiving", filePath)
	}

	file := filepath.Base(filePath)
	inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
	if err != nil {
		return err
	}

	dir := filepath.Join(v.versionsPath, inFolderPath)
	err = osutil.MkdirAll(dir, 0755)
	if err != nil && !os.IsExist(err) {
		return err
	}

	ver := taggedFilename(file, time.Now().Format(TimeFormat))
	dst := filepath.Join(dir, ver)
	if debug {
		l.Debugln("moving to", dst)
	}
	err = osutil.Rename(filePath, dst)
	if err != nil {
		return err
	}

	// Glob according to the new file~timestamp.ext pattern.
	newVersions, err := osutil.Glob(filepath.Join(dir, taggedFilename(file, TimeGlob)))
	if err != nil {
		l.Warnln("globbing:", err)
		return nil
	}

	// Also according to the old file.ext~timestamp pattern.
	oldVersions, err := osutil.Glob(filepath.Join(dir, file+"~"+TimeGlob))
	if err != nil {
		l.Warnln("globbing:", err)
		return nil
	}

	// Use all the found filenames.
	versions := append(oldVersions, newVersions...)
	v.expire(uniqueSortedStrings(versions))

	return nil
}
コード例 #8
0
ファイル: main.go プロジェクト: JBTech/syncthing
func syncthingMain() {
	// Create a main service manager. We'll add things to this as we go along.
	// We want any logging it does to go through our log system.
	mainSvc := suture.New("main", suture.Spec{
		Log: func(line string) {
			l.Debugln(line)
		},
	})
	mainSvc.ServeBackground()

	// Set a log prefix similar to the ID we will have later on, or early log
	// lines look ugly.
	l.SetPrefix("[start] ")

	if auditEnabled {
		startAuditing(mainSvc)
	}

	if verbose {
		mainSvc.Add(newVerboseSvc())
	}

	errors := logger.NewRecorder(l, logger.LevelWarn, maxSystemErrors, 0)
	systemLog := logger.NewRecorder(l, logger.LevelDebug, maxSystemLog, initialSystemLog)

	// Event subscription for the API; must start early to catch the early events.
	apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)

	if len(os.Getenv("GOMAXPROCS")) == 0 {
		runtime.GOMAXPROCS(runtime.NumCPU())
	}

	// Attempt to increase the limit on number of open files to the maximum
	// allowed, in case we have many peers. We don't really care enough to
	// report the error if there is one.
	osutil.MaximizeOpenFileLimit()

	// Ensure that that we have a certificate and key.
	cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
	if err != nil {
		l.Infof("Generating RSA key and certificate for %s...", tlsDefaultCommonName)
		cert, err = tlsutil.NewCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName, tlsRSABits)
		if err != nil {
			l.Fatalln(err)
		}
	}

	// We reinitialize the predictable RNG with our device ID, to get a
	// sequence that is always the same but unique to this syncthing instance.
	predictableRandom.Seed(seedFromBytes(cert.Certificate[0]))

	myID = protocol.NewDeviceID(cert.Certificate[0])
	l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))

	l.Infoln(LongVersion)
	l.Infoln("My ID:", myID)

	// Emit the Starting event, now that we know who we are.

	events.Default.Log(events.Starting, map[string]string{
		"home": baseDirs["config"],
		"myID": myID.String(),
	})

	// Prepare to be able to save configuration

	cfgFile := locations[locConfigFile]

	// Load the configuration file, if it exists.
	// If it does not, create a template.

	cfg, myName, err := loadConfig(cfgFile)
	if err != nil {
		if os.IsNotExist(err) {
			l.Infoln("No config file; starting with empty defaults")
			myName, _ = os.Hostname()
			newCfg := defaultConfig(myName)
			cfg = config.Wrap(cfgFile, newCfg)
			cfg.Save()
			l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
		} else {
			l.Fatalln("Loading config:", err)
		}
	}

	if cfg.Raw().OriginalVersion != config.CurrentVersion {
		l.Infoln("Archiving a copy of old config file format")
		// Archive a copy
		osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion))
		// Save the new version
		cfg.Save()
	}

	if err := checkShortIDs(cfg); err != nil {
		l.Fatalln("Short device IDs are in conflict. Unlucky!\n  Regenerate the device ID of one if the following:\n  ", err)
	}

	if len(profiler) > 0 {
		go func() {
			l.Debugln("Starting profiler on", profiler)
			runtime.SetBlockProfileRate(1)
			err := http.ListenAndServe(profiler, nil)
			if err != nil {
				l.Fatalln(err)
			}
		}()
	}

	// The TLS configuration is used for both the listening socket and outgoing
	// connections.

	tlsCfg := &tls.Config{
		Certificates:           []tls.Certificate{cert},
		NextProtos:             []string{bepProtocolName},
		ClientAuth:             tls.RequestClientCert,
		SessionTicketsDisabled: true,
		InsecureSkipVerify:     true,
		MinVersion:             tls.VersionTLS12,
		CipherSuites: []uint16{
			tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
		},
	}

	// If the read or write rate should be limited, set up a rate limiter for it.
	// This will be used on connections created in the connect and listen routines.

	opts := cfg.Options()

	if !opts.SymlinksEnabled {
		symlinks.Supported = false
	}

	if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan {
		lans, _ = osutil.GetLans()
		networks := make([]string, 0, len(lans))
		for _, lan := range lans {
			networks = append(networks, lan.String())
		}
		for _, lan := range opts.AlwaysLocalNets {
			_, ipnet, err := net.ParseCIDR(lan)
			if err != nil {
				l.Infoln("Network", lan, "is malformed:", err)
				continue
			}
			networks = append(networks, ipnet.String())
		}
		l.Infoln("Local networks:", strings.Join(networks, ", "))
	}

	dbFile := locations[locDatabase]
	dbOpts := dbOpts(cfg)
	ldb, err := leveldb.OpenFile(dbFile, dbOpts)
	if leveldbIsCorrupted(err) {
		ldb, err = leveldb.RecoverFile(dbFile, dbOpts)
	}
	if leveldbIsCorrupted(err) {
		// The database is corrupted, and we've tried to recover it but it
		// didn't work. At this point there isn't much to do beyond dropping
		// the database and reindexing...
		l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
		if err := resetDB(); err != nil {
			l.Fatalln("Remove database:", err)
		}
		ldb, err = leveldb.OpenFile(dbFile, dbOpts)
	}
	if err != nil {
		l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
	}

	// Remove database entries for folders that no longer exist in the config
	folders := cfg.Folders()
	for _, folder := range db.ListFolders(ldb) {
		if _, ok := folders[folder]; !ok {
			l.Infof("Cleaning data for dropped folder %q", folder)
			db.DropFolder(ldb, folder)
		}
	}

	m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
	cfg.Subscribe(m)

	if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
		it, err := strconv.Atoi(t)
		if err == nil {
			m.StartDeadlockDetector(time.Duration(it) * time.Second)
		}
	} else if !IsRelease || IsBeta {
		m.StartDeadlockDetector(20 * time.Minute)
	}

	if paused {
		for device := range cfg.Devices() {
			m.PauseDevice(device)
		}
	}

	// Clear out old indexes for other devices. Otherwise we'll start up and
	// start needing a bunch of files which are nowhere to be found. This
	// needs to be changed when we correctly do persistent indexes.
	for _, folderCfg := range cfg.Folders() {
		m.AddFolder(folderCfg)
		for _, device := range folderCfg.DeviceIDs() {
			if device == myID {
				continue
			}
			m.Index(device, folderCfg.ID, nil, 0, nil)
		}
		// Routine to pull blocks from other devices to synchronize the local
		// folder. Does not run when we are in read only (publish only) mode.
		if folderCfg.ReadOnly {
			m.StartFolderRO(folderCfg.ID)
		} else {
			m.StartFolderRW(folderCfg.ID)
		}
	}

	mainSvc.Add(m)

	// The default port we announce, possibly modified by setupUPnP next.

	uri, err := url.Parse(opts.ListenAddress[0])
	if err != nil {
		l.Fatalf("Failed to parse listen address %s: %v", opts.ListenAddress[0], err)
	}

	addr, err := net.ResolveTCPAddr("tcp", uri.Host)
	if err != nil {
		l.Fatalln("Bad listen address:", err)
	}

	// The externalAddr tracks our external addresses for discovery purposes.

	var addrList *addressLister

	// Start UPnP

	if opts.UPnPEnabled {
		upnpSvc := newUPnPSvc(cfg, addr.Port)
		mainSvc.Add(upnpSvc)

		// The external address tracker needs to know about the UPnP service
		// so it can check for an external mapped port.
		addrList = newAddressLister(upnpSvc, cfg)
	} else {
		addrList = newAddressLister(nil, cfg)
	}

	// Start relay management

	var relaySvc *relay.Svc
	if opts.RelaysEnabled && (opts.GlobalAnnEnabled || opts.RelayWithoutGlobalAnn) {
		relaySvc = relay.NewSvc(cfg, tlsCfg)
		mainSvc.Add(relaySvc)
	}

	// Start discovery

	cachedDiscovery := discover.NewCachingMux()
	mainSvc.Add(cachedDiscovery)

	if cfg.Options().GlobalAnnEnabled {
		for _, srv := range cfg.GlobalDiscoveryServers() {
			l.Infoln("Using discovery server", srv)
			gd, err := discover.NewGlobal(srv, cert, addrList, relaySvc)
			if err != nil {
				l.Warnln("Global discovery:", err)
				continue
			}

			// Each global discovery server gets its results cached for five
			// minutes, and is not asked again for a minute when it's returned
			// unsuccessfully.
			cachedDiscovery.Add(gd, 5*time.Minute, time.Minute, globalDiscoveryPriority)
		}
	}

	if cfg.Options().LocalAnnEnabled {
		// v4 broadcasts
		bcd, err := discover.NewLocal(myID, fmt.Sprintf(":%d", cfg.Options().LocalAnnPort), addrList, relaySvc)
		if err != nil {
			l.Warnln("IPv4 local discovery:", err)
		} else {
			cachedDiscovery.Add(bcd, 0, 0, ipv4LocalDiscoveryPriority)
		}
		// v6 multicasts
		mcd, err := discover.NewLocal(myID, cfg.Options().LocalAnnMCAddr, addrList, relaySvc)
		if err != nil {
			l.Warnln("IPv6 local discovery:", err)
		} else {
			cachedDiscovery.Add(mcd, 0, 0, ipv6LocalDiscoveryPriority)
		}
	}

	// GUI

	setupGUI(mainSvc, cfg, m, apiSub, cachedDiscovery, relaySvc, errors, systemLog)

	// Start connection management

	connectionSvc := connections.NewConnectionSvc(cfg, myID, m, tlsCfg, cachedDiscovery, relaySvc, bepProtocolName, tlsDefaultCommonName, lans)
	mainSvc.Add(connectionSvc)

	if cpuProfile {
		f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
	}

	for _, device := range cfg.Devices() {
		if len(device.Name) > 0 {
			l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses)
		}
	}

	if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion {
		l.Infoln("Anonymous usage report has changed; revoking acceptance")
		opts.URAccepted = 0
		opts.URUniqueID = ""
		cfg.SetOptions(opts)
	}
	if opts.URAccepted >= usageReportVersion {
		if opts.URUniqueID == "" {
			// Previously the ID was generated from the node ID. We now need
			// to generate a new one.
			opts.URUniqueID = randomString(8)
			cfg.SetOptions(opts)
			cfg.Save()
		}
	}

	// The usageReportingManager registers itself to listen to configuration
	// changes, and there's nothing more we need to tell it from the outside.
	// Hence we don't keep the returned pointer.
	newUsageReportingManager(cfg, m)

	if opts.RestartOnWakeup {
		go standbyMonitor()
	}

	if opts.AutoUpgradeIntervalH > 0 {
		if noUpgrade {
			l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
		} else if IsRelease {
			go autoUpgrade(cfg)
		} else {
			l.Infof("No automatic upgrades; %s is not a release version.", Version)
		}
	}

	events.Default.Log(events.StartupComplete, map[string]string{
		"myID": myID.String(),
	})
	go generatePingEvents()

	cleanConfigDirectory()

	code := <-stop

	mainSvc.Stop()

	l.Okln("Exiting")

	if cpuProfile {
		pprof.StopCPUProfile()
	}

	os.Exit(code)
}
コード例 #9
0
func TestFolderWithoutRestart(t *testing.T) {
	log.Println("Cleaning...")
	err := removeAll("testfolder-p1", "testfolder-p4", "h1/index*", "h4/index*")
	if err != nil {
		t.Fatal(err)
	}
	defer removeAll("testfolder-p1", "testfolder-p4")

	if err := generateFiles("testfolder-p1", 50, 18, "../LICENSE"); err != nil {
		t.Fatal(err)
	}

	p1 := startInstance(t, 1)
	defer checkedStop(t, p1)

	p4 := startInstance(t, 4)
	defer checkedStop(t, p4)

	if ok, err := p1.ConfigInSync(); err != nil || !ok {
		t.Fatal("p1 should be in sync;", ok, err)
	}

	if ok, err := p4.ConfigInSync(); err != nil || !ok {
		t.Fatal("p4 should be in sync;", ok, err)
	}

	// Add a new folder to p1, shared with p4. Back up and restore the config
	// first.

	log.Println("Adding testfolder to p1...")

	os.Remove("h1/config.xml.orig")
	os.Rename("h1/config.xml", "h1/config.xml.orig")
	defer osutil.Rename("h1/config.xml.orig", "h1/config.xml")

	cfg, err := p1.GetConfig()
	if err != nil {
		t.Fatal(err)
	}

	newFolder := config.FolderConfiguration{
		ID:              "testfolder",
		RawPath:         "testfolder-p1",
		RescanIntervalS: 86400,
		Copiers:         1,
		Hashers:         1,
		Pullers:         1,
		Devices:         []config.FolderDeviceConfiguration{{DeviceID: p4.ID()}},
	}
	newDevice := config.DeviceConfiguration{
		DeviceID:    p4.ID(),
		Name:        "p4",
		Addresses:   []string{"dynamic"},
		Compression: protocol.CompressMetadata,
	}

	cfg.Folders = append(cfg.Folders, newFolder)
	cfg.Devices = append(cfg.Devices, newDevice)

	if err = p1.PostConfig(cfg); err != nil {
		t.Fatal(err)
	}

	// Add a new folder to p4, shared with p1. Back up and restore the config
	// first.

	log.Println("Adding testfolder to p4...")

	os.Remove("h4/config.xml.orig")
	os.Rename("h4/config.xml", "h4/config.xml.orig")
	defer osutil.Rename("h4/config.xml.orig", "h4/config.xml")

	cfg, err = p4.GetConfig()
	if err != nil {
		t.Fatal(err)
	}

	newFolder.RawPath = "testfolder-p4"
	newFolder.Devices = []config.FolderDeviceConfiguration{{DeviceID: p1.ID()}}
	newDevice.DeviceID = p1.ID()
	newDevice.Name = "p1"
	newDevice.Addresses = []string{"127.0.0.1:22001"}

	cfg.Folders = append(cfg.Folders, newFolder)
	cfg.Devices = append(cfg.Devices, newDevice)

	if err = p4.PostConfig(cfg); err != nil {
		t.Fatal(err)
	}

	// The change should not require a restart, so the config should be "in sync"

	if ok, err := p1.ConfigInSync(); err != nil || !ok {
		t.Fatal("p1 should be in sync;", ok, err)
	}
	if ok, err := p4.ConfigInSync(); err != nil || !ok {
		t.Fatal("p4 should be in sync;", ok, err)
	}

	// The folder should start and scan - wait for the event that signals this
	// has happened.

	log.Println("Waiting for testfolder to scan...")

	since := 0
outer:
	for {
		events, err := p4.Events(since)
		if err != nil {
			t.Fatal(err)
		}
		for _, event := range events {
			if event.Type == "StateChanged" {
				data := event.Data.(map[string]interface{})
				folder := data["folder"].(string)
				from := data["from"].(string)
				to := data["to"].(string)
				if folder == "testfolder" && from == "scanning" && to == "idle" {
					break outer
				}
			}
			since = event.ID
		}
	}

	// It should sync to the other side successfully

	log.Println("Waiting for p1 and p4 to connect and sync...")

	rc.AwaitSync("testfolder", p1, p4)
}
コード例 #10
0
func TestAddDeviceWithoutRestart(t *testing.T) {
	log.Println("Cleaning...")
	err := removeAll("s1", "h1/index*", "s4", "h4/index*")
	if err != nil {
		t.Fatal(err)
	}

	log.Println("Generating files...")
	err = generateFiles("s1", 100, 18, "../LICENSE")
	if err != nil {
		t.Fatal(err)
	}

	p1 := startInstance(t, 1)
	defer checkedStop(t, p1)

	p4 := startInstance(t, 4)
	defer checkedStop(t, p4)

	if ok, err := p1.ConfigInSync(); err != nil || !ok {
		t.Fatal("p1 should be in sync;", ok, err)
	}
	if ok, err := p4.ConfigInSync(); err != nil || !ok {
		t.Fatal("p4 should be in sync;", ok, err)
	}

	// Add the p1 device to p4. Back up and restore p4's config first.

	log.Println("Adding p1 to p4...")

	os.Remove("h4/config.xml.orig")
	os.Rename("h4/config.xml", "h4/config.xml.orig")
	defer osutil.Rename("h4/config.xml.orig", "h4/config.xml")

	cfg, err := p4.GetConfig()
	if err != nil {
		t.Fatal(err)
	}

	devCfg := config.DeviceConfiguration{
		DeviceID:    p1.ID(),
		Name:        "s1",
		Addresses:   []string{"127.0.0.1:22001"},
		Compression: protocol.CompressMetadata,
	}
	cfg.Devices = append(cfg.Devices, devCfg)

	cfg.Folders[0].Devices = append(cfg.Folders[0].Devices, config.FolderDeviceConfiguration{DeviceID: p1.ID()})

	if err = p4.PostConfig(cfg); err != nil {
		t.Fatal(err)
	}

	// The change should not require a restart, so the config should be "in sync"

	if ok, err := p4.ConfigInSync(); err != nil || !ok {
		t.Fatal("p4 should be in sync;", ok, err)
	}

	// Wait for the devices to connect and sync.

	log.Println("Waiting for p1 and p4 to connect and sync...")

	rc.AwaitSync("default", p1, p4)
}
コード例 #11
0
ファイル: override_test.go プロジェクト: nrm21/syncthing
func TestOverride(t *testing.T) {
	// Enable "Master" on s1/default
	id, _ := protocol.DeviceIDFromString(id1)
	cfg, _ := config.Load("h1/config.xml", id)
	fld := cfg.Folders()["default"]
	fld.Type = config.FolderTypeSendOnly
	cfg.SetFolder(fld)
	os.Rename("h1/config.xml", "h1/config.xml.orig")
	defer osutil.Rename("h1/config.xml.orig", "h1/config.xml")
	cfg.Save()

	log.Println("Cleaning...")
	err := removeAll("s1", "s2", "h1/index*", "h2/index*")
	if err != nil {
		t.Fatal(err)
	}

	log.Println("Generating files...")
	err = generateFiles("s1", 100, 20, "../LICENSE")
	if err != nil {
		t.Fatal(err)
	}

	fd, err := os.Create("s1/testfile.txt")
	if err != nil {
		t.Fatal(err)
	}
	_, err = fd.WriteString("hello\n")
	if err != nil {
		t.Fatal(err)
	}
	err = fd.Close()
	if err != nil {
		t.Fatal(err)
	}

	expected, err := directoryContents("s1")
	if err != nil {
		t.Fatal(err)
	}

	master := startInstance(t, 1)
	defer checkedStop(t, master)

	slave := startInstance(t, 2)
	defer checkedStop(t, slave)

	log.Println("Syncing...")

	rc.AwaitSync("default", master, slave)

	log.Println("Verifying...")

	actual, err := directoryContents("s2")
	if err != nil {
		t.Fatal(err)
	}
	err = compareDirectoryContents(actual, expected)
	if err != nil {
		t.Fatal(err)
	}

	log.Println("Changing file on slave side...")

	fd, err = os.OpenFile("s2/testfile.txt", os.O_WRONLY|os.O_APPEND, 0644)
	if err != nil {
		t.Fatal(err)
	}
	_, err = fd.WriteString("text added to s2\n")
	if err != nil {
		t.Fatal(err)
	}
	err = fd.Close()
	if err != nil {
		t.Fatal(err)
	}

	if err := slave.Rescan("default"); err != nil {
		t.Fatal(err)
	}

	log.Println("Waiting for index to send...")

	time.Sleep(10 * time.Second)

	log.Println("Hitting Override on master...")

	if _, err := master.Post("/rest/db/override?folder=default", nil); err != nil {
		t.Fatal(err)
	}

	log.Println("Syncing...")

	rc.AwaitSync("default", master, slave)

	// Verify that the override worked

	fd, err = os.Open("s1/testfile.txt")
	if err != nil {
		t.Fatal(err)
	}
	bs, err := ioutil.ReadAll(fd)
	if err != nil {
		t.Fatal(err)
	}
	fd.Close()

	if strings.Contains(string(bs), "added to s2") {
		t.Error("Change should not have been synced to master")
	}

	fd, err = os.Open("s2/testfile.txt")
	if err != nil {
		t.Fatal(err)
	}
	bs, err = ioutil.ReadAll(fd)
	if err != nil {
		t.Fatal(err)
	}
	fd.Close()

	if strings.Contains(string(bs), "added to s2") {
		t.Error("Change should have been overridden on slave")
	}
}
コード例 #12
0
ファイル: rwfolder.go プロジェクト: arkhi/syncthing
func (p *rwFolder) performFinish(state *sharedPullerState) error {
	// Set the correct permission bits on the new file
	if !p.ignorePermissions(state.file) {
		if err := os.Chmod(state.tempName, os.FileMode(state.file.Flags&0777)); err != nil {
			return err
		}
	}

	// Set the correct timestamp on the new file
	t := time.Unix(state.file.Modified, 0)
	if err := os.Chtimes(state.tempName, t, t); err != nil {
		// Try using virtual mtimes instead
		info, err := os.Stat(state.tempName)
		if err != nil {
			return err
		}
		p.virtualMtimeRepo.UpdateMtime(state.file.Name, info.ModTime(), t)
	}

	var err error
	if p.inConflict(state.version, state.file.Version) {
		// The new file has been changed in conflict with the existing one. We
		// should file it away as a conflict instead of just removing or
		// archiving. Also merge with the version vector we had, to indicate
		// we have resolved the conflict.
		state.file.Version = state.file.Version.Merge(state.version)
		err = osutil.InWritableDir(moveForConflict, state.realName)
	} else if p.versioner != nil {
		// If we should use versioning, let the versioner archive the old
		// file before we replace it. Archiving a non-existent file is not
		// an error.
		err = p.versioner.Archive(state.realName)
	} else {
		err = nil
	}
	if err != nil {
		return err
	}

	// If the target path is a symlink or a directory, we cannot copy
	// over it, hence remove it before proceeding.
	stat, err := osutil.Lstat(state.realName)
	if err == nil && (stat.IsDir() || stat.Mode()&os.ModeSymlink != 0) {
		osutil.InWritableDir(osutil.Remove, state.realName)
	}
	// Replace the original content with the new one
	if err = osutil.Rename(state.tempName, state.realName); err != nil {
		return err
	}

	// If it's a symlink, the target of the symlink is inside the file.
	if state.file.IsSymlink() {
		content, err := ioutil.ReadFile(state.realName)
		if err != nil {
			return err
		}

		// Remove the file, and replace it with a symlink.
		err = osutil.InWritableDir(func(path string) error {
			os.Remove(path)
			return symlinks.Create(path, string(content), state.file.Flags)
		}, state.realName)
		if err != nil {
			return err
		}
	}

	// Record the updated file in the index
	p.dbUpdates <- dbUpdateJob{state.file, dbUpdateHandleFile}
	return nil
}
コード例 #13
0
ファイル: simple.go プロジェクト: wmwwmv/syncthing
// Archive moves the named file away to a version archive. If this function
// returns nil, the named file does not exist any more (has been archived).
func (v Simple) Archive(filePath string) error {
	fileInfo, err := osutil.Lstat(filePath)
	if os.IsNotExist(err) {
		l.Debugln("not archiving nonexistent file", filePath)
		return nil
	} else if err != nil {
		return err
	}

	versionsDir := filepath.Join(v.folderPath, ".stversions")
	_, err = os.Stat(versionsDir)
	if err != nil {
		if os.IsNotExist(err) {
			l.Debugln("creating versions dir", versionsDir)
			osutil.MkdirAll(versionsDir, 0755)
			osutil.HideFile(versionsDir)
		} else {
			return err
		}
	}

	l.Debugln("archiving", filePath)

	file := filepath.Base(filePath)
	inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
	if err != nil {
		return err
	}

	dir := filepath.Join(versionsDir, inFolderPath)
	err = osutil.MkdirAll(dir, 0755)
	if err != nil && !os.IsExist(err) {
		return err
	}

	ver := taggedFilename(file, fileInfo.ModTime().Format(TimeFormat))
	dst := filepath.Join(dir, ver)
	l.Debugln("moving to", dst)
	err = osutil.Rename(filePath, dst)
	if err != nil {
		return err
	}

	// Glob according to the new file~timestamp.ext pattern.
	pattern := filepath.Join(dir, taggedFilename(file, TimeGlob))
	newVersions, err := osutil.Glob(pattern)
	if err != nil {
		l.Warnln("globbing:", err, "for", pattern)
		return nil
	}

	// Also according to the old file.ext~timestamp pattern.
	pattern = filepath.Join(dir, file+"~"+TimeGlob)
	oldVersions, err := osutil.Glob(pattern)
	if err != nil {
		l.Warnln("globbing:", err, "for", pattern)
		return nil
	}

	// Use all the found filenames. "~" sorts after "." so all old pattern
	// files will be deleted before any new, which is as it should be.
	versions := uniqueSortedStrings(append(oldVersions, newVersions...))

	if len(versions) > v.keep {
		for _, toRemove := range versions[:len(versions)-v.keep] {
			l.Debugln("cleaning out", toRemove)
			err = os.Remove(toRemove)
			if err != nil {
				l.Warnln("removing old version:", err)
			}
		}
	}

	return nil
}
コード例 #14
0
ファイル: main.go プロジェクト: kristallizer/syncthing
func syncthingMain() {
	// Create a main service manager. We'll add things to this as we go along.
	// We want any logging it does to go through our log system.
	mainSvc := suture.New("main", suture.Spec{
		Log: func(line string) {
			if debugSuture {
				l.Debugln(line)
			}
		},
	})
	mainSvc.ServeBackground()

	// Set a log prefix similar to the ID we will have later on, or early log
	// lines look ugly.
	l.SetPrefix("[start] ")

	if auditEnabled {
		startAuditing(mainSvc)
	}

	if verbose {
		mainSvc.Add(newVerboseSvc())
	}

	// Event subscription for the API; must start early to catch the early events.
	apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)

	if len(os.Getenv("GOMAXPROCS")) == 0 {
		runtime.GOMAXPROCS(runtime.NumCPU())
	}

	// Ensure that that we have a certificate and key.
	cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
	if err != nil {
		cert, err = newCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName)
		if err != nil {
			l.Fatalln("load cert:", err)
		}
	}

	// We reinitialize the predictable RNG with our device ID, to get a
	// sequence that is always the same but unique to this syncthing instance.
	predictableRandom.Seed(seedFromBytes(cert.Certificate[0]))

	myID = protocol.NewDeviceID(cert.Certificate[0])
	l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5]))

	l.Infoln(LongVersion)
	l.Infoln("My ID:", myID)

	// Emit the Starting event, now that we know who we are.

	events.Default.Log(events.Starting, map[string]string{
		"home": baseDirs["config"],
		"myID": myID.String(),
	})

	// Prepare to be able to save configuration

	cfgFile := locations[locConfigFile]

	var myName string

	// Load the configuration file, if it exists.
	// If it does not, create a template.

	if info, err := os.Stat(cfgFile); err == nil {
		if !info.Mode().IsRegular() {
			l.Fatalln("Config file is not a file?")
		}
		cfg, err = config.Load(cfgFile, myID)
		if err == nil {
			myCfg := cfg.Devices()[myID]
			if myCfg.Name == "" {
				myName, _ = os.Hostname()
			} else {
				myName = myCfg.Name
			}
		} else {
			l.Fatalln("Configuration:", err)
		}
	} else {
		l.Infoln("No config file; starting with empty defaults")
		myName, _ = os.Hostname()
		newCfg := defaultConfig(myName)
		cfg = config.Wrap(cfgFile, newCfg)
		cfg.Save()
		l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
	}

	if cfg.Raw().OriginalVersion != config.CurrentVersion {
		l.Infoln("Archiving a copy of old config file format")
		// Archive a copy
		osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion))
		// Save the new version
		cfg.Save()
	}

	if err := checkShortIDs(cfg); err != nil {
		l.Fatalln("Short device IDs are in conflict. Unlucky!\n  Regenerate the device ID of one if the following:\n  ", err)
	}

	if len(profiler) > 0 {
		go func() {
			l.Debugln("Starting profiler on", profiler)
			runtime.SetBlockProfileRate(1)
			err := http.ListenAndServe(profiler, nil)
			if err != nil {
				l.Fatalln(err)
			}
		}()
	}

	// The TLS configuration is used for both the listening socket and outgoing
	// connections.

	tlsCfg := &tls.Config{
		Certificates:           []tls.Certificate{cert},
		NextProtos:             []string{bepProtocolName},
		ClientAuth:             tls.RequestClientCert,
		SessionTicketsDisabled: true,
		InsecureSkipVerify:     true,
		MinVersion:             tls.VersionTLS12,
		CipherSuites: []uint16{
			tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
			tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
			tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
			tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
		},
	}

	// If the read or write rate should be limited, set up a rate limiter for it.
	// This will be used on connections created in the connect and listen routines.

	opts := cfg.Options()

	if !opts.SymlinksEnabled {
		symlinks.Supported = false
	}

	protocol.PingTimeout = time.Duration(opts.PingTimeoutS) * time.Second
	protocol.PingIdleTime = time.Duration(opts.PingIdleTimeS) * time.Second

	if opts.MaxSendKbps > 0 {
		writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps))
	}
	if opts.MaxRecvKbps > 0 {
		readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps))
	}

	if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan {
		lans, _ = osutil.GetLans()
		networks := make([]string, 0, len(lans))
		for _, lan := range lans {
			networks = append(networks, lan.String())
		}
		l.Infoln("Local networks:", strings.Join(networks, ", "))
	}

	dbFile := locations[locDatabase]
	ldb, err := leveldb.OpenFile(dbFile, dbOpts())
	if err != nil && errors.IsCorrupted(err) {
		ldb, err = leveldb.RecoverFile(dbFile, dbOpts())
	}
	if err != nil {
		l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?")
	}

	// Remove database entries for folders that no longer exist in the config
	folders := cfg.Folders()
	for _, folder := range db.ListFolders(ldb) {
		if _, ok := folders[folder]; !ok {
			l.Infof("Cleaning data for dropped folder %q", folder)
			db.DropFolder(ldb, folder)
		}
	}

	m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
	cfg.Subscribe(m)

	if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 {
		it, err := strconv.Atoi(t)
		if err == nil {
			m.StartDeadlockDetector(time.Duration(it) * time.Second)
		}
	} else if !IsRelease || IsBeta {
		m.StartDeadlockDetector(20 * 60 * time.Second)
	}

	// Clear out old indexes for other devices. Otherwise we'll start up and
	// start needing a bunch of files which are nowhere to be found. This
	// needs to be changed when we correctly do persistent indexes.
	for _, folderCfg := range cfg.Folders() {
		m.AddFolder(folderCfg)
		for _, device := range folderCfg.DeviceIDs() {
			if device == myID {
				continue
			}
			m.Index(device, folderCfg.ID, nil, 0, nil)
		}
		// Routine to pull blocks from other devices to synchronize the local
		// folder. Does not run when we are in read only (publish only) mode.
		if folderCfg.ReadOnly {
			m.StartFolderRO(folderCfg.ID)
		} else {
			m.StartFolderRW(folderCfg.ID)
		}
	}

	mainSvc.Add(m)

	// GUI

	setupGUI(mainSvc, cfg, m, apiSub)

	// The default port we announce, possibly modified by setupUPnP next.

	addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0])
	if err != nil {
		l.Fatalln("Bad listen address:", err)
	}

	// Start discovery

	localPort := addr.Port
	discoverer = discovery(localPort)

	// Start UPnP. The UPnP service will restart global discovery if the
	// external port changes.

	if opts.UPnPEnabled {
		upnpSvc := newUPnPSvc(cfg, localPort)
		mainSvc.Add(upnpSvc)
	}

	connectionSvc := newConnectionSvc(cfg, myID, m, tlsCfg)
	cfg.Subscribe(connectionSvc)
	mainSvc.Add(connectionSvc)

	if cpuProfile {
		f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
		if err != nil {
			log.Fatal(err)
		}
		pprof.StartCPUProfile(f)
	}

	for _, device := range cfg.Devices() {
		if len(device.Name) > 0 {
			l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses)
		}
	}

	if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion {
		l.Infoln("Anonymous usage report has changed; revoking acceptance")
		opts.URAccepted = 0
		opts.URUniqueID = ""
		cfg.SetOptions(opts)
	}
	if opts.URAccepted >= usageReportVersion {
		if opts.URUniqueID == "" {
			// Previously the ID was generated from the node ID. We now need
			// to generate a new one.
			opts.URUniqueID = randomString(8)
			cfg.SetOptions(opts)
			cfg.Save()
		}
	}

	// The usageReportingManager registers itself to listen to configuration
	// changes, and there's nothing more we need to tell it from the outside.
	// Hence we don't keep the returned pointer.
	newUsageReportingManager(m, cfg)

	if opts.RestartOnWakeup {
		go standbyMonitor()
	}

	if opts.AutoUpgradeIntervalH > 0 {
		if noUpgrade {
			l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.")
		} else if IsRelease {
			go autoUpgrade()
		} else {
			l.Infof("No automatic upgrades; %s is not a release version.", Version)
		}
	}

	events.Default.Log(events.StartupComplete, map[string]string{
		"myID": myID.String(),
	})
	go generatePingEvents()

	cleanConfigDirectory()

	code := <-stop

	mainSvc.Stop()

	l.Okln("Exiting")

	if cpuProfile {
		pprof.StopCPUProfile()
	}

	os.Exit(code)
}