// Archive moves the named file away to a version archive. If this function // returns nil, the named file does not exist any more (has been archived). func (t *Trashcan) Archive(filePath string) error { _, err := osutil.Lstat(filePath) if os.IsNotExist(err) { if debug { l.Debugln("not archiving nonexistent file", filePath) } return nil } else if err != nil { return err } versionsDir := filepath.Join(t.folderPath, ".stversions") if _, err := os.Stat(versionsDir); err != nil { if !os.IsNotExist(err) { return err } if debug { l.Debugln("creating versions dir", versionsDir) } if err := osutil.MkdirAll(versionsDir, 0777); err != nil { return err } osutil.HideFile(versionsDir) } if debug { l.Debugln("archiving", filePath) } relativePath, err := filepath.Rel(t.folderPath, filePath) if err != nil { return err } archivedPath := filepath.Join(versionsDir, relativePath) if err := osutil.MkdirAll(filepath.Dir(archivedPath), 0777); err != nil && !os.IsExist(err) { return err } if debug { l.Debugln("moving to", archivedPath) } if err := osutil.Rename(filePath, archivedPath); err != nil { return err } // Set the mtime to the time the file was deleted. This is used by the // cleanout routine. If this fails things won't work optimally but there's // not much we can do about it so we ignore the error. os.Chtimes(archivedPath, time.Now(), time.Now()) return nil }
func TestInWritableDirWindowsRename(t *testing.T) { if runtime.GOOS != "windows" { t.Skipf("Tests not required") return } err := os.RemoveAll("testdata") if err != nil { t.Fatal(err) } defer os.RemoveAll("testdata") create := func(name string) error { fd, err := os.Create(name) if err != nil { return err } fd.Close() return nil } os.Mkdir("testdata", 0700) os.Mkdir("testdata/windows", 0500) os.Mkdir("testdata/windows/ro", 0500) create("testdata/windows/ro/readonly") os.Chmod("testdata/windows/ro/readonly", 0500) for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} { err := os.Rename(path, path+"new") if err == nil { t.Skipf("seem like this test doesn't work here") return } } rename := func(path string) error { return osutil.Rename(path, path+"new") } for _, path := range []string{"testdata/windows/ro/readonly", "testdata/windows/ro", "testdata/windows"} { err := osutil.InWritableDir(rename, path) if err != nil { t.Errorf("Unexpected error %s: %s", path, err) } _, err = os.Stat(path + "new") if err != nil { t.Errorf("Unexpected error %s: %s", path, err) } } }
// Save writes the configuration to disk, and generates a ConfigSaved event. func (w *ConfigWrapper) Save() error { fd, err := ioutil.TempFile(filepath.Dir(w.path), "cfg") if err != nil { return err } err = w.cfg.WriteXML(fd) if err != nil { fd.Close() return err } err = fd.Close() if err != nil { return err } events.Default.Log(events.ConfigSaved, w.cfg) return osutil.Rename(fd.Name(), w.path) }
func saveCsrfTokens() { name := filepath.Join(confDir, "csrftokens.txt") tmp := fmt.Sprintf("%s.tmp.%d", name, time.Now().UnixNano()) f, err := os.OpenFile(tmp, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644) if err != nil { return } defer os.Remove(tmp) for _, t := range csrfTokens { _, err := fmt.Fprintln(f, t) if err != nil { return } } err = f.Close() if err != nil { return } osutil.Rename(tmp, name) }
func (m *Model) SetIgnores(folder string, content []string) error { cfg, ok := m.folderCfgs[folder] if !ok { return fmt.Errorf("Folder %s does not exist", folder) } fd, err := ioutil.TempFile(cfg.Path(), ".syncthing.stignore-"+folder) if err != nil { l.Warnln("Saving .stignore:", err) return err } defer os.Remove(fd.Name()) for _, line := range content { _, err = fmt.Fprintln(fd, line) if err != nil { l.Warnln("Saving .stignore:", err) return err } } err = fd.Close() if err != nil { l.Warnln("Saving .stignore:", err) return err } file := filepath.Join(cfg.Path(), ".stignore") err = osutil.Rename(fd.Name(), file) if err != nil { l.Warnln("Saving .stignore:", err) return err } return m.ScanFolder(folder) }
// Rename versions with old version format func (v Staggered) renameOld() { err := filepath.Walk(v.versionsPath, func(path string, f os.FileInfo, err error) error { if err != nil { return err } if f.Mode().IsRegular() { versionUnix, err := strconv.ParseInt(strings.Replace(filepath.Ext(path), ".v", "", 1), 10, 0) if err == nil { l.Infoln("Renaming file", path, "from old to new version format") versiondate := time.Unix(versionUnix, 0) name := path[:len(path)-len(filepath.Ext(path))] err = osutil.Rename(path, name+"~"+versiondate.Format(TimeLayout)) if err != nil { l.Infoln("Error renaming to new format", err) } } } return nil }) if err != nil { l.Infoln("Versioner: error scanning versions dir", err) return } }
func TestOverride(t *testing.T) { // Enable "Master" on s1/default id, _ := protocol.DeviceIDFromString(id1) cfg, _ := config.Load("h1/config.xml", id) fld := cfg.Folders()["default"] fld.ReadOnly = true cfg.SetFolder(fld) os.Rename("h1/config.xml", "h1/config.xml.orig") defer osutil.Rename("h1/config.xml.orig", "h1/config.xml") cfg.Save() log.Println("Cleaning...") err := removeAll("s1", "s2", "h1/index*", "h2/index*") if err != nil { t.Fatal(err) } log.Println("Generating files...") err = generateFiles("s1", 100, 20, "../LICENSE") if err != nil { t.Fatal(err) } fd, err := os.Create("s1/testfile.txt") if err != nil { t.Fatal(err) } _, err = fd.WriteString("hello\n") if err != nil { t.Fatal(err) } err = fd.Close() if err != nil { t.Fatal(err) } expected, err := directoryContents("s1") if err != nil { t.Fatal(err) } master := startInstance(t, 1) defer checkedStop(t, master) slave := startInstance(t, 2) defer checkedStop(t, slave) log.Println("Syncing...") rc.AwaitSync("default", master, slave) log.Println("Verifying...") actual, err := directoryContents("s2") if err != nil { t.Fatal(err) } err = compareDirectoryContents(actual, expected) if err != nil { t.Fatal(err) } log.Println("Changing file on slave side...") fd, err = os.OpenFile("s2/testfile.txt", os.O_WRONLY|os.O_APPEND, 0644) if err != nil { t.Fatal(err) } _, err = fd.WriteString("text added to s2\n") if err != nil { t.Fatal(err) } err = fd.Close() if err != nil { t.Fatal(err) } if err := slave.Rescan("default"); err != nil { t.Fatal(err) } log.Println("Waiting for index to send...") time.Sleep(10 * time.Second) log.Println("Hitting Override on master...") if _, err := master.Post("/rest/db/override?folder=default", nil); err != nil { t.Fatal(err) } log.Println("Syncing...") rc.AwaitSync("default", master, slave) // Verify that the override worked fd, err = os.Open("s1/testfile.txt") if err != nil { t.Fatal(err) } bs, err := ioutil.ReadAll(fd) if err != nil { t.Fatal(err) } fd.Close() if strings.Contains(string(bs), "added to s2") { t.Error("Change should not have been synced to master") } fd, err = os.Open("s2/testfile.txt") if err != nil { t.Fatal(err) } bs, err = ioutil.ReadAll(fd) if err != nil { t.Fatal(err) } fd.Close() if strings.Contains(string(bs), "added to s2") { t.Error("Change should have been overridden on slave") } }
// Archive moves the named file away to a version archive. If this function // returns nil, the named file does not exist any more (has been archived). func (v Simple) Archive(filePath string) error { fileInfo, err := osutil.Lstat(filePath) if os.IsNotExist(err) { if debug { l.Debugln("not archiving nonexistent file", filePath) } return nil } else if err != nil { return err } versionsDir := filepath.Join(v.folderPath, ".stversions") _, err = os.Stat(versionsDir) if err != nil { if os.IsNotExist(err) { if debug { l.Debugln("creating versions dir", versionsDir) } osutil.MkdirAll(versionsDir, 0755) osutil.HideFile(versionsDir) } else { return err } } if debug { l.Debugln("archiving", filePath) } file := filepath.Base(filePath) inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath)) if err != nil { return err } dir := filepath.Join(versionsDir, inFolderPath) err = osutil.MkdirAll(dir, 0755) if err != nil && !os.IsExist(err) { return err } ver := taggedFilename(file, fileInfo.ModTime().Format(TimeFormat)) dst := filepath.Join(dir, ver) if debug { l.Debugln("moving to", dst) } err = osutil.Rename(filePath, dst) if err != nil { return err } // Glob according to the new file~timestamp.ext pattern. newVersions, err := osutil.Glob(filepath.Join(dir, taggedFilename(file, TimeGlob))) if err != nil { l.Warnln("globbing:", err) return nil } // Also according to the old file.ext~timestamp pattern. oldVersions, err := osutil.Glob(filepath.Join(dir, file+"~"+TimeGlob)) if err != nil { l.Warnln("globbing:", err) return nil } // Use all the found filenames. "~" sorts after "." so all old pattern // files will be deleted before any new, which is as it should be. versions := uniqueSortedStrings(append(oldVersions, newVersions...)) if len(versions) > v.keep { for _, toRemove := range versions[:len(versions)-v.keep] { if debug { l.Debugln("cleaning out", toRemove) } err = os.Remove(toRemove) if err != nil { l.Warnln("removing old version:", err) } } } return nil }
func syncthingMain() { var err error if len(os.Getenv("GOGC")) == 0 { debug.SetGCPercent(25) } if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } events.Default.Log(events.Starting, map[string]string{"home": confDir}) // Ensure that that we have a certificate and key. cert, err = loadCert(confDir, "") if err != nil { newCertificate(confDir, "", tlsDefaultCommonName) cert, err = loadCert(confDir, "") if err != nil { l.Fatalln("load cert:", err) } } // We reinitialize the predictable RNG with our device ID, to get a // sequence that is always the same but unique to this syncthing instance. predictableRandom.Seed(seedFromBytes(cert.Certificate[0])) myID = protocol.NewDeviceID(cert.Certificate[0]) l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5])) l.Infoln(LongVersion) l.Infoln("My ID:", myID) // Prepare to be able to save configuration cfgFile := filepath.Join(confDir, "config.xml") var myName string // Load the configuration file, if it exists. // If it does not, create a template. if info, err := os.Stat(cfgFile); err == nil { if !info.Mode().IsRegular() { l.Fatalln("Config file is not a file?") } cfg, err = config.Load(cfgFile, myID) if err == nil { myCfg := cfg.Devices()[myID] if myCfg.Name == "" { myName, _ = os.Hostname() } else { myName = myCfg.Name } } else { l.Fatalln("Configuration:", err) } } else { l.Infoln("No config file; starting with empty defaults") myName, _ = os.Hostname() newCfg := defaultConfig(myName) cfg = config.Wrap(cfgFile, newCfg) cfg.Save() l.Infof("Edit %s to taste or use the GUI\n", cfgFile) } if cfg.Raw().OriginalVersion != config.CurrentVersion { l.Infoln("Archiving a copy of old config file format") // Archive a copy osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion)) // Save the new version cfg.Save() } if len(profiler) > 0 { go func() { l.Debugln("Starting profiler on", profiler) runtime.SetBlockProfileRate(1) err := http.ListenAndServe(profiler, nil) if err != nil { l.Fatalln(err) } }() } // The TLS configuration is used for both the listening socket and outgoing // connections. tlsCfg := &tls.Config{ Certificates: []tls.Certificate{cert}, NextProtos: []string{"bep/1.0"}, ClientAuth: tls.RequestClientCert, SessionTicketsDisabled: true, InsecureSkipVerify: true, MinVersion: tls.VersionTLS12, CipherSuites: []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, }, } // If the read or write rate should be limited, set up a rate limiter for it. // This will be used on connections created in the connect and listen routines. opts := cfg.Options() if !opts.SymlinksEnabled { symlinks.Supported = false } if opts.MaxSendKbps > 0 { writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps)) } if opts.MaxRecvKbps > 0 { readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps)) } ldb, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{OpenFilesCacheCapacity: 100}) if err != nil { l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?") } // Remove database entries for folders that no longer exist in the config folders := cfg.Folders() for _, folder := range db.ListFolders(ldb) { if _, ok := folders[folder]; !ok { l.Infof("Cleaning data for dropped folder %q", folder) db.DropFolder(ldb, folder) } } m := model.NewModel(cfg, myName, "syncthing", Version, ldb) sanityCheckFolders(cfg, m) // GUI setupGUI(cfg, m) // Clear out old indexes for other devices. Otherwise we'll start up and // start needing a bunch of files which are nowhere to be found. This // needs to be changed when we correctly do persistent indexes. for _, folderCfg := range cfg.Folders() { if folderCfg.Invalid != "" { continue } for _, device := range folderCfg.DeviceIDs() { if device == myID { continue } m.Index(device, folderCfg.ID, nil) } } // The default port we announce, possibly modified by setupUPnP next. addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0]) if err != nil { l.Fatalln("Bad listen address:", err) } externalPort = addr.Port // UPnP igd = nil if opts.UPnPEnabled { setupUPnP() } // Routine to connect out to configured devices discoverer = discovery(externalPort) go listenConnect(myID, m, tlsCfg) for _, folder := range cfg.Folders() { if folder.Invalid != "" { continue } // Routine to pull blocks from other devices to synchronize the local // folder. Does not run when we are in read only (publish only) mode. if folder.ReadOnly { l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folder.ID) m.StartFolderRO(folder.ID) } else { l.Okf("Ready to synchronize %s (read-write)", folder.ID) m.StartFolderRW(folder.ID) } } if cpuProfile { f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid())) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } for _, device := range cfg.Devices() { if len(device.Name) > 0 { l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses) } } if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion { l.Infoln("Anonymous usage report has changed; revoking acceptance") opts.URAccepted = 0 opts.URUniqueID = "" cfg.SetOptions(opts) } if opts.URAccepted >= usageReportVersion { if opts.URUniqueID == "" { // Previously the ID was generated from the node ID. We now need // to generate a new one. opts.URUniqueID = randomString(8) cfg.SetOptions(opts) cfg.Save() } go usageReportingLoop(m) go func() { time.Sleep(10 * time.Minute) err := sendUsageReport(m) if err != nil { l.Infoln("Usage report:", err) } }() } if opts.RestartOnWakeup { go standbyMonitor() } if opts.AutoUpgradeIntervalH > 0 { if noUpgrade { l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.") } else if IsRelease { go autoUpgrade() } else { l.Infof("No automatic upgrades; %s is not a relase version.", Version) } } events.Default.Log(events.StartupComplete, nil) go generateEvents() code := <-stop l.Okln("Exiting") os.Exit(code) }
func TestAddDeviceWithoutRestart(t *testing.T) { log.Println("Cleaning...") err := removeAll("s1", "h1/index*", "s4", "h4/index*") if err != nil { t.Fatal(err) } log.Println("Generating files...") err = generateFiles("s1", 100, 18, "../LICENSE") if err != nil { t.Fatal(err) } p1 := startInstance(t, 1) defer checkedStop(t, p1) p4 := startInstance(t, 4) defer checkedStop(t, p4) if ok, err := p1.ConfigInSync(); err != nil || !ok { t.Fatal("p1 should be in sync;", ok, err) } if ok, err := p4.ConfigInSync(); err != nil || !ok { t.Fatal("p4 should be in sync;", ok, err) } // Add the p1 device to p4. Back up and restore p4's config first. log.Println("Adding p1 to p4...") os.Remove("h4/config.xml.orig") os.Rename("h4/config.xml", "h4/config.xml.orig") defer osutil.Rename("h4/config.xml.orig", "h4/config.xml") cfg, err := p4.GetConfig() if err != nil { t.Fatal(err) } devCfg := config.DeviceConfiguration{ DeviceID: p1.ID(), Name: "s1", Addresses: []string{"127.0.0.1:22001"}, Compression: protocol.CompressMetadata, } cfg.Devices = append(cfg.Devices, devCfg) cfg.Folders[0].Devices = append(cfg.Folders[0].Devices, config.FolderDeviceConfiguration{DeviceID: p1.ID()}) if err = p4.PostConfig(cfg); err != nil { t.Fatal(err) } // The change should not require a restart, so the config should be "in sync" if ok, err := p4.ConfigInSync(); err != nil || !ok { t.Fatal("p4 should be in sync;", ok, err) } // Wait for the devices to connect and sync. log.Println("Waiting for p1 and p4 to connect and sync...") rc.AwaitSync("default", p1, p4) }
// Move away the named file to a version archive. If this function returns // nil, the named file does not exist any more (has been archived). func (v Staggered) Archive(filePath string) error { if debug { l.Debugln("Waiting for lock on ", v.versionsPath) } v.mutex.Lock() defer v.mutex.Unlock() fileInfo, err := os.Stat(filePath) if err != nil { if os.IsNotExist(err) { if debug { l.Debugln("not archiving nonexistent file", filePath) } return nil } else { return err } } _, err = os.Stat(v.versionsPath) if err != nil { if os.IsNotExist(err) { if debug { l.Debugln("creating versions dir", v.versionsPath) } os.MkdirAll(v.versionsPath, 0755) osutil.HideFile(v.versionsPath) } else { return err } } if debug { l.Debugln("archiving", filePath) } file := filepath.Base(filePath) inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath)) if err != nil { return err } dir := filepath.Join(v.versionsPath, inFolderPath) err = os.MkdirAll(dir, 0755) if err != nil && !os.IsExist(err) { return err } ver := file + "~" + fileInfo.ModTime().Format(TimeLayout) dst := filepath.Join(dir, ver) if debug { l.Debugln("moving to", dst) } err = osutil.Rename(filePath, dst) if err != nil { return err } versions, err := filepath.Glob(filepath.Join(dir, file+"~[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]")) if err != nil { l.Warnln("Versioner: error finding versions for", file, err) return nil } sort.Strings(versions) v.expire(versions) return nil }
func (p *rwFolder) performFinish(state *sharedPullerState) error { // Set the correct permission bits on the new file if !p.ignorePermissions(state.file) { if err := os.Chmod(state.tempName, os.FileMode(state.file.Flags&0777)); err != nil { return err } } // Set the correct timestamp on the new file t := time.Unix(state.file.Modified, 0) if err := os.Chtimes(state.tempName, t, t); err != nil { // Try using virtual mtimes instead info, err := os.Stat(state.tempName) if err != nil { return err } p.virtualMtimeRepo.UpdateMtime(state.file.Name, info.ModTime(), t) } var err error if p.inConflict(state.version, state.file.Version) { // The new file has been changed in conflict with the existing one. We // should file it away as a conflict instead of just removing or // archiving. Also merge with the version vector we had, to indicate // we have resolved the conflict. state.file.Version = state.file.Version.Merge(state.version) err = osutil.InWritableDir(moveForConflict, state.realName) } else if p.versioner != nil { // If we should use versioning, let the versioner archive the old // file before we replace it. Archiving a non-existent file is not // an error. err = p.versioner.Archive(state.realName) } else { err = nil } if err != nil { return err } // If the target path is a symlink or a directory, we cannot copy // over it, hence remove it before proceeding. stat, err := osutil.Lstat(state.realName) if err == nil && (stat.IsDir() || stat.Mode()&os.ModeSymlink != 0) { osutil.InWritableDir(osutil.Remove, state.realName) } // Replace the original content with the new one if err = osutil.Rename(state.tempName, state.realName); err != nil { return err } // If it's a symlink, the target of the symlink is inside the file. if state.file.IsSymlink() { content, err := ioutil.ReadFile(state.realName) if err != nil { return err } // Remove the file, and replace it with a symlink. err = osutil.InWritableDir(func(path string) error { os.Remove(path) return symlinks.Create(path, string(content), state.file.Flags) }, state.realName) if err != nil { return err } } // Record the updated file in the index p.dbUpdates <- dbUpdateJob{state.file, dbUpdateHandleFile} return nil }
func (p *Puller) performFinish(state *sharedPullerState) { // Verify the file against expected hashes fd, err := os.Open(state.tempName) if err != nil { l.Warnln("puller: final:", err) return } err = scanner.Verify(fd, protocol.BlockSize, state.file.Blocks) fd.Close() if err != nil { l.Infoln("puller:", state.file.Name, err, "(file changed during pull?)") return } // Set the correct permission bits on the new file if !p.ignorePerms { err = os.Chmod(state.tempName, os.FileMode(state.file.Flags&0777)) if err != nil { l.Warnln("puller: final:", err) return } } // Set the correct timestamp on the new file t := time.Unix(state.file.Modified, 0) err = os.Chtimes(state.tempName, t, t) if err != nil { if p.lenientMtimes { // We accept the failure with a warning here and allow the sync to // continue. We'll sync the new mtime back to the other devices later. // If they have the same problem & setting, we might never get in // sync. l.Infof("Puller (folder %q, file %q): final: %v (continuing anyway as requested)", p.folder, state.file.Name, err) } else { l.Warnln("puller: final:", err) return } } // If we should use versioning, let the versioner archive the old // file before we replace it. Archiving a non-existent file is not // an error. if p.versioner != nil { err = p.versioner.Archive(state.realName) if err != nil { l.Warnln("puller: final:", err) return } } // If the target path is a symlink or a directory, we cannot copy // over it, hence remove it before proceeding. stat, err := os.Lstat(state.realName) isLink, _ := symlinks.IsSymlink(state.realName) if isLink || (err == nil && stat.IsDir()) { osutil.InWritableDir(os.Remove, state.realName) } // Replace the original content with the new one err = osutil.Rename(state.tempName, state.realName) if err != nil { l.Warnln("puller: final:", err) return } // If it's a symlink, the target of the symlink is inside the file. if state.file.IsSymlink() { content, err := ioutil.ReadFile(state.realName) if err != nil { l.Warnln("puller: final: reading symlink:", err) return } // Remove the file, and replace it with a symlink. err = osutil.InWritableDir(func(path string) error { os.Remove(path) return symlinks.Create(path, string(content), state.file.Flags) }, state.realName) if err != nil { l.Warnln("puller: final: creating symlink:", err) return } } // Record the updated file in the index p.model.updateLocal(p.folder, state.file) }
func TestFolderWithoutRestart(t *testing.T) { log.Println("Cleaning...") err := removeAll("testfolder-p1", "testfolder-p4", "h1/index*", "h4/index*") if err != nil { t.Fatal(err) } defer removeAll("testfolder-p1", "testfolder-p4") if err := generateFiles("testfolder-p1", 50, 18, "../LICENSE"); err != nil { t.Fatal(err) } p1 := startInstance(t, 1) defer checkedStop(t, p1) p4 := startInstance(t, 4) defer checkedStop(t, p4) if ok, err := p1.ConfigInSync(); err != nil || !ok { t.Fatal("p1 should be in sync;", ok, err) } if ok, err := p4.ConfigInSync(); err != nil || !ok { t.Fatal("p4 should be in sync;", ok, err) } // Add a new folder to p1, shared with p4. Back up and restore the config // first. log.Println("Adding testfolder to p1...") os.Remove("h1/config.xml.orig") os.Rename("h1/config.xml", "h1/config.xml.orig") defer osutil.Rename("h1/config.xml.orig", "h1/config.xml") cfg, err := p1.GetConfig() if err != nil { t.Fatal(err) } newFolder := config.FolderConfiguration{ ID: "testfolder", RawPath: "testfolder-p1", RescanIntervalS: 86400, Copiers: 1, Hashers: 1, Pullers: 1, Devices: []config.FolderDeviceConfiguration{{DeviceID: p4.ID()}}, } newDevice := config.DeviceConfiguration{ DeviceID: p4.ID(), Name: "p4", Addresses: []string{"dynamic"}, Compression: protocol.CompressMetadata, } cfg.Folders = append(cfg.Folders, newFolder) cfg.Devices = append(cfg.Devices, newDevice) if err = p1.PostConfig(cfg); err != nil { t.Fatal(err) } // Add a new folder to p4, shared with p1. Back up and restore the config // first. log.Println("Adding testfolder to p4...") os.Remove("h4/config.xml.orig") os.Rename("h4/config.xml", "h4/config.xml.orig") defer osutil.Rename("h4/config.xml.orig", "h4/config.xml") cfg, err = p4.GetConfig() if err != nil { t.Fatal(err) } newFolder.RawPath = "testfolder-p4" newFolder.Devices = []config.FolderDeviceConfiguration{{DeviceID: p1.ID()}} newDevice.DeviceID = p1.ID() newDevice.Name = "p1" newDevice.Addresses = []string{"127.0.0.1:22001"} cfg.Folders = append(cfg.Folders, newFolder) cfg.Devices = append(cfg.Devices, newDevice) if err = p4.PostConfig(cfg); err != nil { t.Fatal(err) } // The change should not require a restart, so the config should be "in sync" if ok, err := p1.ConfigInSync(); err != nil || !ok { t.Fatal("p1 should be in sync;", ok, err) } if ok, err := p4.ConfigInSync(); err != nil || !ok { t.Fatal("p4 should be in sync;", ok, err) } // The folder should start and scan - wait for the event that signals this // has happened. log.Println("Waiting for testfolder to scan...") since := 0 outer: for { events, err := p4.Events(since) if err != nil { t.Fatal(err) } for _, event := range events { if event.Type == "StateChanged" { data := event.Data.(map[string]interface{}) folder := data["folder"].(string) from := data["from"].(string) to := data["to"].(string) if folder == "testfolder" && from == "scanning" && to == "idle" { break outer } } since = event.ID } } // It should sync to the other side successfully log.Println("Waiting for p1 and p4 to connect and sync...") rc.AwaitSync("testfolder", p1, p4) }
func syncthingMain() { // Create a main service manager. We'll add things to this as we go along. // We want any logging it does to go through our log system. mainSvc := suture.New("main", suture.Spec{ Log: func(line string) { if debugSuture { l.Debugln(line) } }, }) mainSvc.ServeBackground() // Set a log prefix similar to the ID we will have later on, or early log // lines look ugly. l.SetPrefix("[start] ") if auditEnabled { startAuditing(mainSvc) } if verbose { mainSvc.Add(newVerboseSvc()) } // Event subscription for the API; must start early to catch the early events. apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000) if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } // Ensure that that we have a certificate and key. cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile]) if err != nil { cert, err = newCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName) if err != nil { l.Fatalln("load cert:", err) } } // We reinitialize the predictable RNG with our device ID, to get a // sequence that is always the same but unique to this syncthing instance. predictableRandom.Seed(seedFromBytes(cert.Certificate[0])) myID = protocol.NewDeviceID(cert.Certificate[0]) l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5])) l.Infoln(LongVersion) l.Infoln("My ID:", myID) // Emit the Starting event, now that we know who we are. events.Default.Log(events.Starting, map[string]string{ "home": baseDirs["config"], "myID": myID.String(), }) // Prepare to be able to save configuration cfgFile := locations[locConfigFile] var myName string // Load the configuration file, if it exists. // If it does not, create a template. if info, err := os.Stat(cfgFile); err == nil { if !info.Mode().IsRegular() { l.Fatalln("Config file is not a file?") } cfg, err = config.Load(cfgFile, myID) if err == nil { myCfg := cfg.Devices()[myID] if myCfg.Name == "" { myName, _ = os.Hostname() } else { myName = myCfg.Name } } else { l.Fatalln("Configuration:", err) } } else { l.Infoln("No config file; starting with empty defaults") myName, _ = os.Hostname() newCfg := defaultConfig(myName) cfg = config.Wrap(cfgFile, newCfg) cfg.Save() l.Infof("Edit %s to taste or use the GUI\n", cfgFile) } if cfg.Raw().OriginalVersion != config.CurrentVersion { l.Infoln("Archiving a copy of old config file format") // Archive a copy osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion)) // Save the new version cfg.Save() } if err := checkShortIDs(cfg); err != nil { l.Fatalln("Short device IDs are in conflict. Unlucky!\n Regenerate the device ID of one if the following:\n ", err) } if len(profiler) > 0 { go func() { l.Debugln("Starting profiler on", profiler) runtime.SetBlockProfileRate(1) err := http.ListenAndServe(profiler, nil) if err != nil { l.Fatalln(err) } }() } // The TLS configuration is used for both the listening socket and outgoing // connections. tlsCfg := &tls.Config{ Certificates: []tls.Certificate{cert}, NextProtos: []string{bepProtocolName}, ClientAuth: tls.RequestClientCert, SessionTicketsDisabled: true, InsecureSkipVerify: true, MinVersion: tls.VersionTLS12, CipherSuites: []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, }, } // If the read or write rate should be limited, set up a rate limiter for it. // This will be used on connections created in the connect and listen routines. opts := cfg.Options() if !opts.SymlinksEnabled { symlinks.Supported = false } protocol.PingTimeout = time.Duration(opts.PingTimeoutS) * time.Second protocol.PingIdleTime = time.Duration(opts.PingIdleTimeS) * time.Second if opts.MaxSendKbps > 0 { writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps)) } if opts.MaxRecvKbps > 0 { readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps)) } if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan { lans, _ = osutil.GetLans() networks := make([]string, 0, len(lans)) for _, lan := range lans { networks = append(networks, lan.String()) } l.Infoln("Local networks:", strings.Join(networks, ", ")) } dbFile := locations[locDatabase] ldb, err := leveldb.OpenFile(dbFile, dbOpts()) if err != nil && errors.IsCorrupted(err) { ldb, err = leveldb.RecoverFile(dbFile, dbOpts()) } if err != nil { l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?") } // Remove database entries for folders that no longer exist in the config folders := cfg.Folders() for _, folder := range db.ListFolders(ldb) { if _, ok := folders[folder]; !ok { l.Infof("Cleaning data for dropped folder %q", folder) db.DropFolder(ldb, folder) } } m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb) cfg.Subscribe(m) if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 { it, err := strconv.Atoi(t) if err == nil { m.StartDeadlockDetector(time.Duration(it) * time.Second) } } else if !IsRelease || IsBeta { m.StartDeadlockDetector(20 * 60 * time.Second) } // Clear out old indexes for other devices. Otherwise we'll start up and // start needing a bunch of files which are nowhere to be found. This // needs to be changed when we correctly do persistent indexes. for _, folderCfg := range cfg.Folders() { m.AddFolder(folderCfg) for _, device := range folderCfg.DeviceIDs() { if device == myID { continue } m.Index(device, folderCfg.ID, nil, 0, nil) } // Routine to pull blocks from other devices to synchronize the local // folder. Does not run when we are in read only (publish only) mode. if folderCfg.ReadOnly { l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folderCfg.ID) m.StartFolderRO(folderCfg.ID) } else { l.Okf("Ready to synchronize %s (read-write)", folderCfg.ID) m.StartFolderRW(folderCfg.ID) } } mainSvc.Add(m) // GUI setupGUI(mainSvc, cfg, m, apiSub) // The default port we announce, possibly modified by setupUPnP next. addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0]) if err != nil { l.Fatalln("Bad listen address:", err) } // Start discovery localPort := addr.Port discoverer = discovery(localPort) // Start UPnP. The UPnP service will restart global discovery if the // external port changes. if opts.UPnPEnabled { upnpSvc := newUPnPSvc(cfg, localPort) mainSvc.Add(upnpSvc) } connectionSvc := newConnectionSvc(cfg, myID, m, tlsCfg) cfg.Subscribe(connectionSvc) mainSvc.Add(connectionSvc) if cpuProfile { f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid())) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } for _, device := range cfg.Devices() { if len(device.Name) > 0 { l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses) } } if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion { l.Infoln("Anonymous usage report has changed; revoking acceptance") opts.URAccepted = 0 opts.URUniqueID = "" cfg.SetOptions(opts) } if opts.URAccepted >= usageReportVersion { if opts.URUniqueID == "" { // Previously the ID was generated from the node ID. We now need // to generate a new one. opts.URUniqueID = randomString(8) cfg.SetOptions(opts) cfg.Save() } } // The usageReportingManager registers itself to listen to configuration // changes, and there's nothing more we need to tell it from the outside. // Hence we don't keep the returned pointer. newUsageReportingManager(m, cfg) if opts.RestartOnWakeup { go standbyMonitor() } if opts.AutoUpgradeIntervalH > 0 { if noUpgrade { l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.") } else if IsRelease { go autoUpgrade() } else { l.Infof("No automatic upgrades; %s is not a release version.", Version) } } events.Default.Log(events.StartupComplete, map[string]string{ "myID": myID.String(), }) go generatePingEvents() cleanConfigDirectory() code := <-stop mainSvc.Stop() l.Okln("Exiting") os.Exit(code) }
func TestManyPeers(t *testing.T) { log.Println("Cleaning...") err := removeAll("s1", "s2", "h1/index", "h2/index") if err != nil { t.Fatal(err) } log.Println("Generating files...") err = generateFiles("s1", 200, 20, "../LICENSE") if err != nil { t.Fatal(err) } receiver := syncthingProcess{ // id2 instance: "2", argv: []string{"-home", "h2"}, port: 8082, apiKey: apiKey, } err = receiver.start() if err != nil { t.Fatal(err) } defer receiver.stop() resp, err := receiver.get("/rest/config") if err != nil { t.Fatal(err) } if resp.StatusCode != 200 { t.Fatalf("Code %d != 200", resp.StatusCode) } var cfg config.Configuration json.NewDecoder(resp.Body).Decode(&cfg) resp.Body.Close() for len(cfg.Devices) < 100 { bs := make([]byte, 16) ReadRand(bs) id := protocol.NewDeviceID(bs) cfg.Devices = append(cfg.Devices, config.DeviceConfiguration{DeviceID: id}) cfg.Folders[0].Devices = append(cfg.Folders[0].Devices, config.FolderDeviceConfiguration{DeviceID: id}) } osutil.Rename("h2/config.xml", "h2/config.xml.orig") defer osutil.Rename("h2/config.xml.orig", "h2/config.xml") var buf bytes.Buffer json.NewEncoder(&buf).Encode(cfg) resp, err = receiver.post("/rest/config", &buf) if err != nil { t.Fatal(err) } if resp.StatusCode != 200 { t.Fatalf("Code %d != 200", resp.StatusCode) } resp.Body.Close() log.Println("Starting up...") sender := syncthingProcess{ // id1 instance: "1", argv: []string{"-home", "h1"}, port: 8081, apiKey: apiKey, } err = sender.start() if err != nil { t.Fatal(err) } defer sender.stop() for { comp, err := sender.peerCompletion() if err != nil { if isTimeout(err) { time.Sleep(250 * time.Millisecond) continue } t.Fatal(err) } if comp[id2] == 100 { return } time.Sleep(2 * time.Second) } log.Println("Comparing directories...") err = compareDirectories("s1", "s2") if err != nil { t.Fatal(err) } }
// Archive moves the named file away to a version archive. If this function // returns nil, the named file does not exist any more (has been archived). func (v Staggered) Archive(filePath string) error { if debug { l.Debugln("Waiting for lock on ", v.versionsPath) } v.mutex.Lock() defer v.mutex.Unlock() _, err := osutil.Lstat(filePath) if os.IsNotExist(err) { if debug { l.Debugln("not archiving nonexistent file", filePath) } return nil } else if err != nil { return err } if _, err := os.Stat(v.versionsPath); err != nil { if os.IsNotExist(err) { if debug { l.Debugln("creating versions dir", v.versionsPath) } osutil.MkdirAll(v.versionsPath, 0755) osutil.HideFile(v.versionsPath) } else { return err } } if debug { l.Debugln("archiving", filePath) } file := filepath.Base(filePath) inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath)) if err != nil { return err } dir := filepath.Join(v.versionsPath, inFolderPath) err = osutil.MkdirAll(dir, 0755) if err != nil && !os.IsExist(err) { return err } ver := taggedFilename(file, time.Now().Format(TimeFormat)) dst := filepath.Join(dir, ver) if debug { l.Debugln("moving to", dst) } err = osutil.Rename(filePath, dst) if err != nil { return err } // Glob according to the new file~timestamp.ext pattern. newVersions, err := osutil.Glob(filepath.Join(dir, taggedFilename(file, TimeGlob))) if err != nil { l.Warnln("globbing:", err) return nil } // Also according to the old file.ext~timestamp pattern. oldVersions, err := osutil.Glob(filepath.Join(dir, file+"~"+TimeGlob)) if err != nil { l.Warnln("globbing:", err) return nil } // Use all the found filenames. versions := append(oldVersions, newVersions...) v.expire(uniqueSortedStrings(versions)) return nil }
// Move away the named file to a version archive. If this function returns // nil, the named file does not exist any more (has been archived). func (v Simple) Archive(filePath string) error { fileInfo, err := os.Stat(filePath) if err != nil { if os.IsNotExist(err) { if debug { l.Debugln("not archiving nonexistent file", filePath) } return nil } else { return err } } versionsDir := filepath.Join(v.folderPath, ".stversions") _, err = os.Stat(versionsDir) if err != nil { if os.IsNotExist(err) { if debug { l.Debugln("creating versions dir", versionsDir) } os.MkdirAll(versionsDir, 0755) osutil.HideFile(versionsDir) } else { return err } } if debug { l.Debugln("archiving", filePath) } file := filepath.Base(filePath) inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath)) if err != nil { return err } dir := filepath.Join(versionsDir, inFolderPath) err = os.MkdirAll(dir, 0755) if err != nil && !os.IsExist(err) { return err } ver := file + "~" + fileInfo.ModTime().Format("20060102-150405") dst := filepath.Join(dir, ver) if debug { l.Debugln("moving to", dst) } err = osutil.Rename(filePath, dst) if err != nil { return err } versions, err := filepath.Glob(filepath.Join(dir, file+"~[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]")) if err != nil { l.Warnln("globbing:", err) return nil } if len(versions) > v.keep { sort.Strings(versions) for _, toRemove := range versions[:len(versions)-v.keep] { if debug { l.Debugln("cleaning out", toRemove) } err = os.Remove(toRemove) if err != nil { l.Warnln("removing old version:", err) } } } return nil }
func syncthingMain() { var err error if len(os.Getenv("GOGC")) == 0 { debug.SetGCPercent(25) } if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } events.Default.Log(events.Starting, map[string]string{"home": confDir}) // Ensure that that we have a certificate and key. cert, err = loadCert(confDir, "") if err != nil { newCertificate(confDir, "") cert, err = loadCert(confDir, "") if err != nil { l.Fatalln("load cert:", err) } } myID = protocol.NewDeviceID(cert.Certificate[0]) l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5])) l.Infoln(LongVersion) l.Infoln("My ID:", myID) // Prepare to be able to save configuration cfgFile := filepath.Join(confDir, "config.xml") var myName string // Load the configuration file, if it exists. // If it does not, create a template. cfg, err = config.Load(cfgFile, myID) if err == nil { myCfg := cfg.Devices()[myID] if myCfg.Name == "" { myName, _ = os.Hostname() } else { myName = myCfg.Name } } else { l.Infoln("No config file; starting with empty defaults") myName, _ = os.Hostname() defaultFolder, err := osutil.ExpandTilde("~/Sync") if err != nil { l.Fatalln("home:", err) } newCfg := config.New(myID) newCfg.Folders = []config.FolderConfiguration{ { ID: "default", Path: defaultFolder, RescanIntervalS: 60, Devices: []config.FolderDeviceConfiguration{{DeviceID: myID}}, }, } newCfg.Devices = []config.DeviceConfiguration{ { DeviceID: myID, Addresses: []string{"dynamic"}, Name: myName, }, } port, err := getFreePort("127.0.0.1", 8080) if err != nil { l.Fatalln("get free port (GUI):", err) } newCfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port) port, err = getFreePort("0.0.0.0", 22000) if err != nil { l.Fatalln("get free port (BEP):", err) } newCfg.Options.ListenAddress = []string{fmt.Sprintf("0.0.0.0:%d", port)} cfg = config.Wrap(cfgFile, newCfg) cfg.Save() l.Infof("Edit %s to taste or use the GUI\n", cfgFile) } if cfg.Raw().OriginalVersion != config.CurrentVersion { l.Infoln("Archiving a copy of old config file format") // Archive a copy osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion)) // Save the new version cfg.Save() } if len(profiler) > 0 { go func() { l.Debugln("Starting profiler on", profiler) runtime.SetBlockProfileRate(1) err := http.ListenAndServe(profiler, nil) if err != nil { l.Fatalln(err) } }() } // The TLS configuration is used for both the listening socket and outgoing // connections. tlsCfg := &tls.Config{ Certificates: []tls.Certificate{cert}, NextProtos: []string{"bep/1.0"}, ServerName: myID.String(), ClientAuth: tls.RequestClientCert, SessionTicketsDisabled: true, InsecureSkipVerify: true, MinVersion: tls.VersionTLS12, } // If the read or write rate should be limited, set up a rate limiter for it. // This will be used on connections created in the connect and listen routines. opts := cfg.Options() if opts.MaxSendKbps > 0 { writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps)) } if opts.MaxRecvKbps > 0 { readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps)) } // If this is the first time the user runs v0.9, archive the old indexes and config. archiveLegacyConfig() db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{CachedOpenFiles: 100}) if err != nil { l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?") } // Remove database entries for folders that no longer exist in the config folders := cfg.Folders() for _, folder := range files.ListFolders(db) { if _, ok := folders[folder]; !ok { l.Infof("Cleaning data for dropped folder %q", folder) files.DropFolder(db, folder) } } m := model.NewModel(cfg, myName, "syncthing", Version, db) nextFolder: for id, folder := range cfg.Folders() { if folder.Invalid != "" { continue } folder.Path, err = osutil.ExpandTilde(folder.Path) if err != nil { l.Fatalln("home:", err) } m.AddFolder(folder) fi, err := os.Stat(folder.Path) if m.CurrentLocalVersion(id) > 0 { // Safety check. If the cached index contains files but the // folder doesn't exist, we have a problem. We would assume // that all files have been deleted which might not be the case, // so mark it as invalid instead. if err != nil || !fi.IsDir() { l.Warnf("Stopping folder %q - path does not exist, but has files in index", folder.ID) cfg.InvalidateFolder(id, "folder path missing") continue nextFolder } } else if os.IsNotExist(err) { // If we don't have any files in the index, and the directory // doesn't exist, try creating it. err = os.MkdirAll(folder.Path, 0700) } if err != nil { // If there was another error or we could not create the // path, the folder is invalid. l.Warnf("Stopping folder %q - %v", err) cfg.InvalidateFolder(id, err.Error()) continue nextFolder } } // GUI guiCfg := overrideGUIConfig(cfg.GUI(), guiAddress, guiAuthentication, guiAPIKey) if guiCfg.Enabled && guiCfg.Address != "" { addr, err := net.ResolveTCPAddr("tcp", guiCfg.Address) if err != nil { l.Fatalf("Cannot start GUI on %q: %v", guiCfg.Address, err) } else { var hostOpen, hostShow string switch { case addr.IP == nil: hostOpen = "localhost" hostShow = "0.0.0.0" case addr.IP.IsUnspecified(): hostOpen = "localhost" hostShow = addr.IP.String() default: hostOpen = addr.IP.String() hostShow = hostOpen } var proto = "http" if guiCfg.UseTLS { proto = "https" } urlShow := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port))) l.Infoln("Starting web GUI on", urlShow) err := startGUI(guiCfg, guiAssets, m) if err != nil { l.Fatalln("Cannot start GUI:", err) } if opts.StartBrowser && !noBrowser && !stRestarting { urlOpen := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostOpen, strconv.Itoa(addr.Port))) openURL(urlOpen) } } } // Clear out old indexes for other devices. Otherwise we'll start up and // start needing a bunch of files which are nowhere to be found. This // needs to be changed when we correctly do persistent indexes. for _, folderCfg := range cfg.Folders() { if folderCfg.Invalid != "" { continue } for _, device := range folderCfg.DeviceIDs() { if device == myID { continue } m.Index(device, folderCfg.ID, nil) } } // Remove all .idx* files that don't belong to an active folder. validIndexes := make(map[string]bool) for _, folder := range cfg.Folders() { dir, err := osutil.ExpandTilde(folder.Path) if err != nil { l.Fatalln("home:", err) } id := fmt.Sprintf("%x", sha1.Sum([]byte(dir))) validIndexes[id] = true } allIndexes, err := filepath.Glob(filepath.Join(confDir, "*.idx*")) if err == nil { for _, idx := range allIndexes { bn := filepath.Base(idx) fs := strings.Split(bn, ".") if len(fs) > 1 { if _, ok := validIndexes[fs[0]]; !ok { l.Infoln("Removing old index", bn) os.Remove(idx) } } } } // The default port we announce, possibly modified by setupUPnP next. addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0]) if err != nil { l.Fatalln("Bad listen address:", err) } externalPort = addr.Port // UPnP if opts.UPnPEnabled { setupUPnP() } // Routine to connect out to configured devices discoverer = discovery(externalPort) go listenConnect(myID, m, tlsCfg) for _, folder := range cfg.Folders() { if folder.Invalid != "" { continue } // Routine to pull blocks from other devices to synchronize the local // folder. Does not run when we are in read only (publish only) mode. if folder.ReadOnly { l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folder.ID) m.StartFolderRO(folder.ID) } else { l.Okf("Ready to synchronize %s (read-write)", folder.ID) m.StartFolderRW(folder.ID) } } if cpuProfile { f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid())) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } for _, device := range cfg.Devices() { if len(device.Name) > 0 { l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses) } } if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion { l.Infoln("Anonymous usage report has changed; revoking acceptance") opts.URAccepted = 0 cfg.SetOptions(opts) } if opts.URAccepted >= usageReportVersion { go usageReportingLoop(m) go func() { time.Sleep(10 * time.Minute) err := sendUsageReport(m) if err != nil { l.Infoln("Usage report:", err) } }() } if opts.RestartOnWakeup { go standbyMonitor() } if opts.AutoUpgradeIntervalH > 0 { go autoUpgrade() } events.Default.Log(events.StartupComplete, nil) go generateEvents() code := <-stop l.Okln("Exiting") os.Exit(code) }