func init() { device1, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR") device2, _ = protocol.DeviceIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY") defaultFolderConfig = config.FolderConfiguration{ ID: "default", RawPath: "testdata", Devices: []config.FolderDeviceConfiguration{ { DeviceID: device1, }, }, } _defaultConfig := config.Configuration{ Folders: []config.FolderConfiguration{defaultFolderConfig}, Devices: []config.DeviceConfiguration{ { DeviceID: device1, }, }, Options: config.OptionsConfiguration{ // Don't remove temporaries directly on startup KeepTemporariesH: 1, }, } defaultConfig = config.Wrap("/tmp/test", _defaultConfig) }
func TestDeviceRename(t *testing.T) { ccm := protocol.ClusterConfigMessage{ ClientName: "syncthing", ClientVersion: "v0.9.4", } defer os.Remove("tmpconfig.xml") rawCfg := config.New(device1) rawCfg.Devices = []config.DeviceConfiguration{ { DeviceID: device1, }, } cfg := config.Wrap("tmpconfig.xml", rawCfg) db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", db) m.ServeBackground() if cfg.Devices()[device1].Name != "" { t.Errorf("Device already has a name") } m.ClusterConfig(device1, ccm) if cfg.Devices()[device1].Name != "" { t.Errorf("Device already has a name") } ccm.Options = []protocol.Option{ { Key: "name", Value: "tester", }, } m.ClusterConfig(device1, ccm) if cfg.Devices()[device1].Name != "tester" { t.Errorf("Device did not get a name") } ccm.Options[0].Value = "tester2" m.ClusterConfig(device1, ccm) if cfg.Devices()[device1].Name != "tester" { t.Errorf("Device name got overwritten") } cfgw, err := config.Load("tmpconfig.xml", protocol.LocalDeviceID) if err != nil { t.Error(err) return } if cfgw.Devices()[device1].Name != "tester" { t.Errorf("Device name not saved in config") } }
func TestHandleFileWithTemp(t *testing.T) { // After diff between required and existing we should: // Copy: 2, 5, 8 // Pull: 1, 3, 4, 6, 7 // After dropping out blocks already on the temp file we should: // Copy: 5, 8 // Pull: 1, 6 // Create existing file existingFile := protocol.FileInfo{ Name: "file", Flags: 0, Modified: 0, Blocks: []protocol.BlockInfo{ blocks[0], blocks[2], blocks[0], blocks[0], blocks[5], blocks[0], blocks[0], blocks[8], }, } // Create target file requiredFile := existingFile requiredFile.Blocks = blocks[1:] db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel(config.Wrap("/tmp/test", config.Configuration{}), "device", "syncthing", "dev", db) m.AddFolder(config.FolderConfiguration{ID: "default", Path: "testdata"}) // Update index m.updateLocal("default", existingFile) p := Puller{ folder: "default", dir: "testdata", model: m, } copyChan := make(chan copyBlocksState, 1) p.handleFile(requiredFile, copyChan, nil) // Receive the results toCopy := <-copyChan if len(toCopy.blocks) != 4 { t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks)) } for i, eq := range []int{1, 5, 6, 8} { if string(toCopy.blocks[i].Hash) != string(blocks[eq].Hash) { t.Errorf("Block mismatch: %s != %s", toCopy.blocks[i].String(), blocks[eq].String()) } } }
// Test that updating a file removes it's old blocks from the blockmap func TestCopierCleanup(t *testing.T) { iterFn := func(folder, file string, index uint32) bool { return true } fcfg := config.FolderConfiguration{ID: "default", Path: "testdata"} cfg := config.Configuration{Folders: []config.FolderConfiguration{fcfg}} db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel(config.Wrap("/tmp/test", cfg), "device", "syncthing", "dev", db) m.AddFolder(fcfg) // Create a file file := protocol.FileInfo{ Name: "test", Flags: 0, Modified: 0, Blocks: []protocol.BlockInfo{blocks[0]}, } // Add file to index m.updateLocal("default", file) if !m.finder.Iterate(blocks[0].Hash, iterFn) { t.Error("Expected block not found") } file.Blocks = []protocol.BlockInfo{blocks[1]} file.Version++ // Update index (removing old blocks) m.updateLocal("default", file) if m.finder.Iterate(blocks[0].Hash, iterFn) { t.Error("Unexpected block found") } if !m.finder.Iterate(blocks[1].Hash, iterFn) { t.Error("Expected block not found") } file.Blocks = []protocol.BlockInfo{blocks[0]} file.Version++ // Update index (removing old blocks) m.updateLocal("default", file) if !m.finder.Iterate(blocks[0].Hash, iterFn) { t.Error("Unexpected block found") } if m.finder.Iterate(blocks[1].Hash, iterFn) { t.Error("Expected block not found") } }
func TestProgressEmitter(t *testing.T) { l.Debugln("test progress emitter") w := events.Default.Subscribe(events.DownloadProgress) c := config.Wrap("/tmp/test", config.Configuration{}) c.SetOptions(config.OptionsConfiguration{ ProgressUpdateIntervalS: 0, }) p := NewProgressEmitter(c) go p.Serve() expectTimeout(w, t) s := sharedPullerState{} p.Register(&s) expectEvent(w, t, 1) expectTimeout(w, t) s.copyDone() expectEvent(w, t, 1) expectTimeout(w, t) s.copiedFromOrigin() expectEvent(w, t, 1) expectTimeout(w, t) s.pullStarted() expectEvent(w, t, 1) expectTimeout(w, t) s.pullDone() expectEvent(w, t, 1) expectTimeout(w, t) p.Deregister(&s) expectEvent(w, t, 0) expectTimeout(w, t) }
func setup() (*leveldb.DB, *BlockFinder) { // Setup db, err := leveldb.Open(storage.NewMemStorage(), nil) if err != nil { panic(err) } wrapper := config.Wrap("", config.Configuration{}) wrapper.SetFolder(config.FolderConfiguration{ ID: "folder1", }) wrapper.SetFolder(config.FolderConfiguration{ ID: "folder2", }) return db, NewBlockFinder(db, wrapper) }
func TestDeviceRename(t *testing.T) { ccm := protocol.ClusterConfigMessage{ ClientName: "syncthing", ClientVersion: "v0.9.4", } cfg := config.New(device1) cfg.Devices = []config.DeviceConfiguration{ { DeviceID: device1, }, } db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel(config.Wrap("/tmp/test", cfg), "device", "syncthing", "dev", db) if cfg.Devices[0].Name != "" { t.Errorf("Device already has a name") } m.ClusterConfig(device1, ccm) if cfg.Devices[0].Name != "" { t.Errorf("Device already has a name") } ccm.Options = []protocol.Option{ { Key: "name", Value: "tester", }, } m.ClusterConfig(device1, ccm) if cfg.Devices[0].Name != "tester" { t.Errorf("Device did not get a name") } ccm.Options[0].Value = "tester2" m.ClusterConfig(device1, ccm) if cfg.Devices[0].Name != "tester" { t.Errorf("Device name got overwritten") } }
func TestRequest(t *testing.T) { db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel(config.Wrap("/tmp/test", config.Configuration{}), "device", "syncthing", "dev", db) m.AddFolder(config.FolderConfiguration{ID: "default", Path: "testdata"}) m.ScanFolder("default") bs, err := m.Request(device1, "default", "foo", 0, 6) if err != nil { t.Fatal(err) } if bytes.Compare(bs, []byte("foobar")) != 0 { t.Errorf("Incorrect data from request: %q", string(bs)) } bs, err = m.Request(device1, "default", "../walk.go", 0, 6) if err == nil { t.Error("Unexpected nil error on insecure file read") } if bs != nil { t.Errorf("Unexpected non nil data on insecure file read: %q", string(bs)) } }
func TestCopierFinder(t *testing.T) { // After diff between required and existing we should: // Copy: 1, 2, 3, 4, 6, 7, 8 // Since there is no existing file, nor a temp file // After dropping out blocks found locally: // Pull: 1, 5, 6, 8 tempFile := filepath.Join("testdata", defTempNamer.TempName("file2")) err := os.Remove(tempFile) if err != nil && !os.IsNotExist(err) { t.Error(err) } // Create existing file existingFile := protocol.FileInfo{ Name: defTempNamer.TempName("file"), Flags: 0, Modified: 0, Blocks: []protocol.BlockInfo{ blocks[0], blocks[2], blocks[3], blocks[4], blocks[0], blocks[0], blocks[7], blocks[0], }, } // Create target file requiredFile := existingFile requiredFile.Blocks = blocks[1:] requiredFile.Name = "file2" fcfg := config.FolderConfiguration{ID: "default", Path: "testdata"} cfg := config.Configuration{Folders: []config.FolderConfiguration{fcfg}} db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel(config.Wrap("/tmp/test", cfg), "device", "syncthing", "dev", db) m.AddFolder(fcfg) // Update index m.updateLocal("default", existingFile) iterFn := func(folder, file string, index uint32) bool { return true } // Verify that the blocks we say exist on file, really exist in the db. for _, idx := range []int{2, 3, 4, 7} { if m.finder.Iterate(blocks[idx].Hash, iterFn) == false { t.Error("Didn't find block") } } p := Puller{ folder: "default", dir: "testdata", model: m, } copyChan := make(chan copyBlocksState) pullChan := make(chan pullBlockState, 4) finisherChan := make(chan *sharedPullerState, 1) // Run a single fetcher routine go p.copierRoutine(copyChan, pullChan, finisherChan, false) p.handleFile(requiredFile, copyChan, finisherChan) pulls := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan} finish := <-finisherChan select { case <-pullChan: t.Fatal("Finisher channel has data to be read") case <-finisherChan: t.Fatal("Finisher channel has data to be read") default: } // Verify that the right blocks went into the pull list for i, eq := range []int{1, 5, 6, 8} { if string(pulls[i].block.Hash) != string(blocks[eq].Hash) { t.Errorf("Block %d mismatch: %s != %s", eq, pulls[i].block.String(), blocks[eq].String()) } if string(finish.file.Blocks[eq-1].Hash) != string(blocks[eq].Hash) { t.Errorf("Block %d mismatch: %s != %s", eq, finish.file.Blocks[eq-1].String(), blocks[eq].String()) } } // Verify that the fetched blocks have actually been written to the temp file blks, err := scanner.HashFile(tempFile, protocol.BlockSize) if err != nil { t.Log(err) } for _, eq := range []int{2, 3, 4, 7} { if string(blks[eq-1].Hash) != string(blocks[eq].Hash) { t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String()) } } finish.fd.Close() os.Remove(tempFile) }
func main() { if runtime.GOOS == "windows" { // On Windows, we use a log file by default. Setting the -logfile flag // to "-" disables this behavior. flag.StringVar(&logFile, "logfile", "", "Log file name (use \"-\" for stdout)") // We also add an option to hide the console window flag.BoolVar(&noConsole, "no-console", false, "Hide console window") } flag.StringVar(&generateDir, "generate", "", "Generate key and config in specified dir, then exit") flag.StringVar(&guiAddress, "gui-address", guiAddress, "Override GUI address") flag.StringVar(&guiAuthentication, "gui-authentication", guiAuthentication, "Override GUI authentication; username:password") flag.StringVar(&guiAPIKey, "gui-apikey", guiAPIKey, "Override GUI API key") flag.StringVar(&confDir, "home", "", "Set configuration directory") flag.IntVar(&logFlags, "logflags", logFlags, "Select information in log line prefix") flag.BoolVar(&noBrowser, "no-browser", false, "Do not start browser") flag.BoolVar(&noRestart, "no-restart", noRestart, "Do not restart; just exit") flag.BoolVar(&reset, "reset", false, "Reset the database") flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade") flag.BoolVar(&doUpgradeCheck, "upgrade-check", false, "Check for available upgrade") flag.BoolVar(&showVersion, "version", false, "Show version") flag.StringVar(&upgradeTo, "upgrade-to", upgradeTo, "Force upgrade directly from specified URL") flag.BoolVar(&auditEnabled, "audit", false, "Write events to audit file") flag.BoolVar(&verbose, "verbose", false, "Print verbose log output") flag.Usage = usageFor(flag.CommandLine, usage, fmt.Sprintf(extraUsage, baseDirs["config"])) flag.Parse() if noConsole { osutil.HideConsole() } if confDir != "" { // Not set as default above because the string can be really long. baseDirs["config"] = confDir } if err := expandLocations(); err != nil { l.Fatalln(err) } if guiAssets == "" { guiAssets = locations[locGUIAssets] } if runtime.GOOS == "windows" { if logFile == "" { // Use the default log file location logFile = locations[locLogFile] } else if logFile == "-" { // Don't use a logFile logFile = "" } } if showVersion { fmt.Println(LongVersion) return } l.SetFlags(logFlags) if generateDir != "" { dir, err := osutil.ExpandTilde(generateDir) if err != nil { l.Fatalln("generate:", err) } info, err := os.Stat(dir) if err == nil && !info.IsDir() { l.Fatalln(dir, "is not a directory") } if err != nil && os.IsNotExist(err) { err = osutil.MkdirAll(dir, 0700) if err != nil { l.Fatalln("generate:", err) } } certFile, keyFile := filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem") cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err == nil { l.Warnln("Key exists; will not overwrite.") l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0])) } else { cert, err = newCertificate(certFile, keyFile, tlsDefaultCommonName) myID = protocol.NewDeviceID(cert.Certificate[0]) if err != nil { l.Fatalln("load cert:", err) } if err == nil { l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0])) } } cfgFile := filepath.Join(dir, "config.xml") if _, err := os.Stat(cfgFile); err == nil { l.Warnln("Config exists; will not overwrite.") return } var myName, _ = os.Hostname() var newCfg = defaultConfig(myName) var cfg = config.Wrap(cfgFile, newCfg) err = cfg.Save() if err != nil { l.Warnln("Failed to save config", err) } return } if info, err := os.Stat(baseDirs["config"]); err == nil && !info.IsDir() { l.Fatalln("Config directory", baseDirs["config"], "is not a directory") } // Ensure that our home directory exists. ensureDir(baseDirs["config"], 0700) if upgradeTo != "" { err := upgrade.ToURL(upgradeTo) if err != nil { l.Fatalln("Upgrade:", err) // exits 1 } l.Okln("Upgraded from", upgradeTo) return } if doUpgrade || doUpgradeCheck { rel, err := upgrade.LatestRelease(Version) if err != nil { l.Fatalln("Upgrade:", err) // exits 1 } if upgrade.CompareVersions(rel.Tag, Version) <= 0 { l.Infof("No upgrade available (current %q >= latest %q).", Version, rel.Tag) os.Exit(exitNoUpgradeAvailable) } l.Infof("Upgrade available (current %q < latest %q)", Version, rel.Tag) if doUpgrade { // Use leveldb database locks to protect against concurrent upgrades _, err = leveldb.OpenFile(locations[locDatabase], &opt.Options{OpenFilesCacheCapacity: 100}) if err != nil { l.Infoln("Attempting upgrade through running Syncthing...") err = upgradeViaRest() if err != nil { l.Fatalln("Upgrade:", err) } l.Okln("Syncthing upgrading") return } err = upgrade.To(rel) if err != nil { l.Fatalln("Upgrade:", err) // exits 1 } l.Okf("Upgraded to %q", rel.Tag) } return } if reset { resetDB() return } if noRestart { syncthingMain() } else { monitorMain() } }
func syncthingMain() { // Create a main service manager. We'll add things to this as we go along. // We want any logging it does to go through our log system. mainSvc := suture.New("main", suture.Spec{ Log: func(line string) { if debugSuture { l.Debugln(line) } }, }) mainSvc.ServeBackground() // Set a log prefix similar to the ID we will have later on, or early log // lines look ugly. l.SetPrefix("[start] ") if auditEnabled { startAuditing(mainSvc) } if verbose { mainSvc.Add(newVerboseSvc()) } // Event subscription for the API; must start early to catch the early events. apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000) if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } // Ensure that that we have a certificate and key. cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile]) if err != nil { cert, err = newCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName) if err != nil { l.Fatalln("load cert:", err) } } // We reinitialize the predictable RNG with our device ID, to get a // sequence that is always the same but unique to this syncthing instance. predictableRandom.Seed(seedFromBytes(cert.Certificate[0])) myID = protocol.NewDeviceID(cert.Certificate[0]) l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5])) l.Infoln(LongVersion) l.Infoln("My ID:", myID) // Emit the Starting event, now that we know who we are. events.Default.Log(events.Starting, map[string]string{ "home": baseDirs["config"], "myID": myID.String(), }) // Prepare to be able to save configuration cfgFile := locations[locConfigFile] var myName string // Load the configuration file, if it exists. // If it does not, create a template. if info, err := os.Stat(cfgFile); err == nil { if !info.Mode().IsRegular() { l.Fatalln("Config file is not a file?") } cfg, err = config.Load(cfgFile, myID) if err == nil { myCfg := cfg.Devices()[myID] if myCfg.Name == "" { myName, _ = os.Hostname() } else { myName = myCfg.Name } } else { l.Fatalln("Configuration:", err) } } else { l.Infoln("No config file; starting with empty defaults") myName, _ = os.Hostname() newCfg := defaultConfig(myName) cfg = config.Wrap(cfgFile, newCfg) cfg.Save() l.Infof("Edit %s to taste or use the GUI\n", cfgFile) } if cfg.Raw().OriginalVersion != config.CurrentVersion { l.Infoln("Archiving a copy of old config file format") // Archive a copy osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion)) // Save the new version cfg.Save() } if err := checkShortIDs(cfg); err != nil { l.Fatalln("Short device IDs are in conflict. Unlucky!\n Regenerate the device ID of one if the following:\n ", err) } if len(profiler) > 0 { go func() { l.Debugln("Starting profiler on", profiler) runtime.SetBlockProfileRate(1) err := http.ListenAndServe(profiler, nil) if err != nil { l.Fatalln(err) } }() } // The TLS configuration is used for both the listening socket and outgoing // connections. tlsCfg := &tls.Config{ Certificates: []tls.Certificate{cert}, NextProtos: []string{bepProtocolName}, ClientAuth: tls.RequestClientCert, SessionTicketsDisabled: true, InsecureSkipVerify: true, MinVersion: tls.VersionTLS12, CipherSuites: []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, }, } // If the read or write rate should be limited, set up a rate limiter for it. // This will be used on connections created in the connect and listen routines. opts := cfg.Options() if !opts.SymlinksEnabled { symlinks.Supported = false } protocol.PingTimeout = time.Duration(opts.PingTimeoutS) * time.Second protocol.PingIdleTime = time.Duration(opts.PingIdleTimeS) * time.Second if opts.MaxSendKbps > 0 { writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps)) } if opts.MaxRecvKbps > 0 { readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps)) } if (opts.MaxRecvKbps > 0 || opts.MaxSendKbps > 0) && !opts.LimitBandwidthInLan { lans, _ = osutil.GetLans() networks := make([]string, 0, len(lans)) for _, lan := range lans { networks = append(networks, lan.String()) } l.Infoln("Local networks:", strings.Join(networks, ", ")) } dbFile := locations[locDatabase] ldb, err := leveldb.OpenFile(dbFile, dbOpts()) if err != nil && errors.IsCorrupted(err) { ldb, err = leveldb.RecoverFile(dbFile, dbOpts()) } if err != nil { l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?") } // Remove database entries for folders that no longer exist in the config folders := cfg.Folders() for _, folder := range db.ListFolders(ldb) { if _, ok := folders[folder]; !ok { l.Infof("Cleaning data for dropped folder %q", folder) db.DropFolder(ldb, folder) } } m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb) cfg.Subscribe(m) if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 { it, err := strconv.Atoi(t) if err == nil { m.StartDeadlockDetector(time.Duration(it) * time.Second) } } else if !IsRelease || IsBeta { m.StartDeadlockDetector(20 * 60 * time.Second) } // Clear out old indexes for other devices. Otherwise we'll start up and // start needing a bunch of files which are nowhere to be found. This // needs to be changed when we correctly do persistent indexes. for _, folderCfg := range cfg.Folders() { m.AddFolder(folderCfg) for _, device := range folderCfg.DeviceIDs() { if device == myID { continue } m.Index(device, folderCfg.ID, nil, 0, nil) } // Routine to pull blocks from other devices to synchronize the local // folder. Does not run when we are in read only (publish only) mode. if folderCfg.ReadOnly { l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folderCfg.ID) m.StartFolderRO(folderCfg.ID) } else { l.Okf("Ready to synchronize %s (read-write)", folderCfg.ID) m.StartFolderRW(folderCfg.ID) } } mainSvc.Add(m) // GUI setupGUI(mainSvc, cfg, m, apiSub) // The default port we announce, possibly modified by setupUPnP next. addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0]) if err != nil { l.Fatalln("Bad listen address:", err) } // Start discovery localPort := addr.Port discoverer = discovery(localPort) // Start UPnP. The UPnP service will restart global discovery if the // external port changes. if opts.UPnPEnabled { upnpSvc := newUPnPSvc(cfg, localPort) mainSvc.Add(upnpSvc) } connectionSvc := newConnectionSvc(cfg, myID, m, tlsCfg) cfg.Subscribe(connectionSvc) mainSvc.Add(connectionSvc) if cpuProfile { f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid())) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } for _, device := range cfg.Devices() { if len(device.Name) > 0 { l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses) } } if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion { l.Infoln("Anonymous usage report has changed; revoking acceptance") opts.URAccepted = 0 opts.URUniqueID = "" cfg.SetOptions(opts) } if opts.URAccepted >= usageReportVersion { if opts.URUniqueID == "" { // Previously the ID was generated from the node ID. We now need // to generate a new one. opts.URUniqueID = randomString(8) cfg.SetOptions(opts) cfg.Save() } } // The usageReportingManager registers itself to listen to configuration // changes, and there's nothing more we need to tell it from the outside. // Hence we don't keep the returned pointer. newUsageReportingManager(m, cfg) if opts.RestartOnWakeup { go standbyMonitor() } if opts.AutoUpgradeIntervalH > 0 { if noUpgrade { l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.") } else if IsRelease { go autoUpgrade() } else { l.Infof("No automatic upgrades; %s is not a release version.", Version) } } events.Default.Log(events.StartupComplete, map[string]string{ "myID": myID.String(), }) go generatePingEvents() cleanConfigDirectory() code := <-stop mainSvc.Stop() l.Okln("Exiting") os.Exit(code) }
func syncthingMain() { var err error if len(os.Getenv("GOGC")) == 0 { debug.SetGCPercent(25) } if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } events.Default.Log(events.Starting, map[string]string{"home": confDir}) // Ensure that that we have a certificate and key. cert, err = loadCert(confDir, "") if err != nil { newCertificate(confDir, "", tlsDefaultCommonName) cert, err = loadCert(confDir, "") if err != nil { l.Fatalln("load cert:", err) } } // We reinitialize the predictable RNG with our device ID, to get a // sequence that is always the same but unique to this syncthing instance. predictableRandom.Seed(seedFromBytes(cert.Certificate[0])) myID = protocol.NewDeviceID(cert.Certificate[0]) l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5])) l.Infoln(LongVersion) l.Infoln("My ID:", myID) // Prepare to be able to save configuration cfgFile := filepath.Join(confDir, "config.xml") var myName string // Load the configuration file, if it exists. // If it does not, create a template. if info, err := os.Stat(cfgFile); err == nil { if !info.Mode().IsRegular() { l.Fatalln("Config file is not a file?") } cfg, err = config.Load(cfgFile, myID) if err == nil { myCfg := cfg.Devices()[myID] if myCfg.Name == "" { myName, _ = os.Hostname() } else { myName = myCfg.Name } } else { l.Fatalln("Configuration:", err) } } else { l.Infoln("No config file; starting with empty defaults") myName, _ = os.Hostname() newCfg := defaultConfig(myName) cfg = config.Wrap(cfgFile, newCfg) cfg.Save() l.Infof("Edit %s to taste or use the GUI\n", cfgFile) } if cfg.Raw().OriginalVersion != config.CurrentVersion { l.Infoln("Archiving a copy of old config file format") // Archive a copy osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion)) // Save the new version cfg.Save() } if len(profiler) > 0 { go func() { l.Debugln("Starting profiler on", profiler) runtime.SetBlockProfileRate(1) err := http.ListenAndServe(profiler, nil) if err != nil { l.Fatalln(err) } }() } // The TLS configuration is used for both the listening socket and outgoing // connections. tlsCfg := &tls.Config{ Certificates: []tls.Certificate{cert}, NextProtos: []string{"bep/1.0"}, ClientAuth: tls.RequestClientCert, SessionTicketsDisabled: true, InsecureSkipVerify: true, MinVersion: tls.VersionTLS12, CipherSuites: []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, }, } // If the read or write rate should be limited, set up a rate limiter for it. // This will be used on connections created in the connect and listen routines. opts := cfg.Options() if !opts.SymlinksEnabled { symlinks.Supported = false } if opts.MaxSendKbps > 0 { writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps)) } if opts.MaxRecvKbps > 0 { readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps)) } ldb, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{OpenFilesCacheCapacity: 100}) if err != nil { l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?") } // Remove database entries for folders that no longer exist in the config folders := cfg.Folders() for _, folder := range db.ListFolders(ldb) { if _, ok := folders[folder]; !ok { l.Infof("Cleaning data for dropped folder %q", folder) db.DropFolder(ldb, folder) } } m := model.NewModel(cfg, myName, "syncthing", Version, ldb) sanityCheckFolders(cfg, m) // GUI setupGUI(cfg, m) // Clear out old indexes for other devices. Otherwise we'll start up and // start needing a bunch of files which are nowhere to be found. This // needs to be changed when we correctly do persistent indexes. for _, folderCfg := range cfg.Folders() { if folderCfg.Invalid != "" { continue } for _, device := range folderCfg.DeviceIDs() { if device == myID { continue } m.Index(device, folderCfg.ID, nil) } } // The default port we announce, possibly modified by setupUPnP next. addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0]) if err != nil { l.Fatalln("Bad listen address:", err) } externalPort = addr.Port // UPnP igd = nil if opts.UPnPEnabled { setupUPnP() } // Routine to connect out to configured devices discoverer = discovery(externalPort) go listenConnect(myID, m, tlsCfg) for _, folder := range cfg.Folders() { if folder.Invalid != "" { continue } // Routine to pull blocks from other devices to synchronize the local // folder. Does not run when we are in read only (publish only) mode. if folder.ReadOnly { l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folder.ID) m.StartFolderRO(folder.ID) } else { l.Okf("Ready to synchronize %s (read-write)", folder.ID) m.StartFolderRW(folder.ID) } } if cpuProfile { f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid())) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } for _, device := range cfg.Devices() { if len(device.Name) > 0 { l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses) } } if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion { l.Infoln("Anonymous usage report has changed; revoking acceptance") opts.URAccepted = 0 opts.URUniqueID = "" cfg.SetOptions(opts) } if opts.URAccepted >= usageReportVersion { if opts.URUniqueID == "" { // Previously the ID was generated from the node ID. We now need // to generate a new one. opts.URUniqueID = randomString(8) cfg.SetOptions(opts) cfg.Save() } go usageReportingLoop(m) go func() { time.Sleep(10 * time.Minute) err := sendUsageReport(m) if err != nil { l.Infoln("Usage report:", err) } }() } if opts.RestartOnWakeup { go standbyMonitor() } if opts.AutoUpgradeIntervalH > 0 { if noUpgrade { l.Infof("No automatic upgrades; STNOUPGRADE environment variable defined.") } else if IsRelease { go autoUpgrade() } else { l.Infof("No automatic upgrades; %s is not a relase version.", Version) } } events.Default.Log(events.StartupComplete, nil) go generateEvents() code := <-stop l.Okln("Exiting") os.Exit(code) }
func TestSanityCheck(t *testing.T) { fcfg := config.FolderConfiguration{ ID: "folder", Path: "testdata/testfolder", } cfg := config.Wrap("/tmp/test", config.Configuration{ Folders: []config.FolderConfiguration{fcfg}, }) for _, file := range []string{".stfolder", "testfolder", "testfolder/.stfolder"} { _, err := os.Stat("testdata/" + file) if err == nil { t.Error("Found unexpected file") } } db, _ := leveldb.Open(storage.NewMemStorage(), nil) // Case 1 - new folder, directory and marker created m := model.NewModel(cfg, "device", "syncthing", "dev", db) sanityCheckFolders(cfg, m) if cfg.Folders()["folder"].Invalid != "" { t.Error("Unexpected error", cfg.Folders()["folder"].Invalid) } s, err := os.Stat("testdata/testfolder") if err != nil || !s.IsDir() { t.Error(err) } _, err = os.Stat("testdata/testfolder/.stfolder") if err != nil { t.Error(err) } os.Remove("testdata/testfolder/.stfolder") os.Remove("testdata/testfolder/") // Case 2 - new folder, marker created fcfg.Path = "testdata/" cfg = config.Wrap("/tmp/test", config.Configuration{ Folders: []config.FolderConfiguration{fcfg}, }) m = model.NewModel(cfg, "device", "syncthing", "dev", db) sanityCheckFolders(cfg, m) if cfg.Folders()["folder"].Invalid != "" { t.Error("Unexpected error", cfg.Folders()["folder"].Invalid) } _, err = os.Stat("testdata/.stfolder") if err != nil { t.Error(err) } os.Remove("testdata/.stfolder") // Case 3 - marker missing set := files.NewSet("folder", db) set.Update(protocol.LocalDeviceID, []protocol.FileInfo{ {Name: "dummyfile"}, }) m = model.NewModel(cfg, "device", "syncthing", "dev", db) sanityCheckFolders(cfg, m) if cfg.Folders()["folder"].Invalid != "folder marker missing" { t.Error("Incorrect error") } // Case 4 - path missing fcfg.Path = "testdata/testfolder" cfg = config.Wrap("/tmp/test", config.Configuration{ Folders: []config.FolderConfiguration{fcfg}, }) m = model.NewModel(cfg, "device", "syncthing", "dev", db) sanityCheckFolders(cfg, m) if cfg.Folders()["folder"].Invalid != "folder path missing" { t.Error("Incorrect error") } }
// On the 10th iteration, we start hashing the content which we receive by // following blockfinder's instructions. Make sure that the copier routine // hashes the content when asked, and pulls if it fails to find the block. func TestLastResortPulling(t *testing.T) { fcfg := config.FolderConfiguration{ID: "default", Path: "testdata"} cfg := config.Configuration{Folders: []config.FolderConfiguration{fcfg}} db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel(config.Wrap("/tmp/test", cfg), "device", "syncthing", "dev", db) m.AddFolder(fcfg) // Add a file to index (with the incorrect block representation, as content // doesn't actually match the block list) file := protocol.FileInfo{ Name: "empty", Flags: 0, Modified: 0, Blocks: []protocol.BlockInfo{blocks[0]}, } m.updateLocal("default", file) // Pretend that we are handling a new file of the same content but // with a different name (causing to copy that particular block) file.Name = "newfile" iterFn := func(folder, file string, index uint32) bool { return true } // Check that that particular block is there if !m.finder.Iterate(blocks[0].Hash, iterFn) { t.Error("Expected block not found") } p := Puller{ folder: "default", dir: "testdata", model: m, } copyChan := make(chan copyBlocksState) pullChan := make(chan pullBlockState, 1) finisherChan := make(chan *sharedPullerState, 1) // Run a single copier routine with checksumming enabled go p.copierRoutine(copyChan, pullChan, finisherChan, true) p.handleFile(file, copyChan, finisherChan) // Copier should hash empty file, realise that the region it has read // doesn't match the hash which was advertised by the block map, fix it // and ask to pull the block. <-pullChan // Verify that it did fix the incorrect hash. if m.finder.Iterate(blocks[0].Hash, iterFn) { t.Error("Found unexpected block") } if !m.finder.Iterate(scanner.SHA256OfNothing, iterFn) { t.Error("Expected block not found") } (<-finisherChan).fd.Close() os.Remove(filepath.Join("testdata", defTempNamer.TempName("newfile"))) }
func main() { defConfDir, err := getDefaultConfDir() if err != nil { l.Fatalln("home:", err) } if runtime.GOOS == "windows" { // On Windows, we use a log file by default. Setting the -logfile flag // to the empty string disables this behavior. logFile = filepath.Join(defConfDir, "syncthing.log") flag.StringVar(&logFile, "logfile", logFile, "Log file name (blank for stdout)") // We also add an option to hide the console window flag.BoolVar(&noConsole, "no-console", false, "Hide console window") } flag.StringVar(&generateDir, "generate", "", "Generate key and config in specified dir, then exit") flag.StringVar(&guiAddress, "gui-address", guiAddress, "Override GUI address") flag.StringVar(&guiAuthentication, "gui-authentication", guiAuthentication, "Override GUI authentication; username:password") flag.StringVar(&guiAPIKey, "gui-apikey", guiAPIKey, "Override GUI API key") flag.StringVar(&confDir, "home", "", "Set configuration directory") flag.IntVar(&logFlags, "logflags", logFlags, "Select information in log line prefix") flag.BoolVar(&noBrowser, "no-browser", false, "Do not start browser") flag.BoolVar(&noRestart, "no-restart", noRestart, "Do not restart; just exit") flag.BoolVar(&reset, "reset", false, "Prepare to resync from cluster") flag.BoolVar(&doUpgrade, "upgrade", false, "Perform upgrade") flag.BoolVar(&doUpgradeCheck, "upgrade-check", false, "Check for available upgrade") flag.BoolVar(&showVersion, "version", false, "Show version") flag.StringVar(&upgradeTo, "upgrade-to", upgradeTo, "Force upgrade directly from specified URL") flag.Usage = usageFor(flag.CommandLine, usage, fmt.Sprintf(extraUsage, defConfDir)) flag.Parse() if noConsole { osutil.HideConsole() } if confDir == "" { // Not set as default above because the string can be really long. confDir = defConfDir } if confDir != defConfDir && filepath.Dir(logFile) == defConfDir { // The user changed the config dir with -home, but not the log file // location. In this case we assume they meant for the logfile to // still live in it's default location *relative to the config dir*. logFile = filepath.Join(confDir, "syncthing.log") } if showVersion { fmt.Println(LongVersion) return } l.SetFlags(logFlags) if generateDir != "" { dir, err := osutil.ExpandTilde(generateDir) if err != nil { l.Fatalln("generate:", err) } info, err := os.Stat(dir) if err == nil && !info.IsDir() { l.Fatalln(dir, "is not a directory") } if err != nil && os.IsNotExist(err) { err = os.MkdirAll(dir, 0700) if err != nil { l.Fatalln("generate:", err) } } cert, err := loadCert(dir, "") if err == nil { l.Warnln("Key exists; will not overwrite.") l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0])) } else { newCertificate(dir, "", tlsDefaultCommonName) cert, err = loadCert(dir, "") myID = protocol.NewDeviceID(cert.Certificate[0]) if err != nil { l.Fatalln("load cert:", err) } if err == nil { l.Infoln("Device ID:", protocol.NewDeviceID(cert.Certificate[0])) } } cfgFile := filepath.Join(dir, "config.xml") if _, err := os.Stat(cfgFile); err == nil { l.Warnln("Config exists; will not overwrite.") return } var myName, _ = os.Hostname() var newCfg = defaultConfig(myName) var cfg = config.Wrap(cfgFile, newCfg) err = cfg.Save() if err != nil { l.Warnln("Failed to save config", err) } return } confDir, err := osutil.ExpandTilde(confDir) if err != nil { l.Fatalln("home:", err) } if info, err := os.Stat(confDir); err == nil && !info.IsDir() { l.Fatalln("Config directory", confDir, "is not a directory") } // Ensure that our home directory exists. ensureDir(confDir, 0700) if upgradeTo != "" { err := upgrade.ToURL(upgradeTo) if err != nil { l.Fatalln("Upgrade:", err) // exits 1 } l.Okln("Upgraded from", upgradeTo) return } if doUpgrade || doUpgradeCheck { rel, err := upgrade.LatestRelease(IsBeta) if err != nil { l.Fatalln("Upgrade:", err) // exits 1 } if upgrade.CompareVersions(rel.Tag, Version) <= 0 { l.Infof("No upgrade available (current %q >= latest %q).", Version, rel.Tag) os.Exit(exitNoUpgradeAvailable) } l.Infof("Upgrade available (current %q < latest %q)", Version, rel.Tag) if doUpgrade { // Use leveldb database locks to protect against concurrent upgrades _, err = leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{OpenFilesCacheCapacity: 100}) if err != nil { l.Fatalln("Cannot upgrade, database seems to be locked. Is another copy of Syncthing already running?") } err = upgrade.To(rel) if err != nil { l.Fatalln("Upgrade:", err) // exits 1 } l.Okf("Upgraded to %q", rel.Tag) } return } if reset { resetFolders() return } if noRestart { syncthingMain() } else { monitorMain() } }
func TestDeregisterOnFailInPull(t *testing.T) { file := protocol.FileInfo{ Name: "filex", Flags: 0, Modified: 0, Blocks: []protocol.BlockInfo{ blocks[0], blocks[2], blocks[0], blocks[0], blocks[5], blocks[0], blocks[0], blocks[8], }, } defer os.Remove("testdata/" + defTempNamer.TempName("filex")) db, _ := leveldb.Open(storage.NewMemStorage(), nil) cw := config.Wrap("/tmp/test", config.Configuration{}) m := NewModel(cw, "device", "syncthing", "dev", db) m.AddFolder(config.FolderConfiguration{ID: "default", Path: "testdata"}) emitter := NewProgressEmitter(cw) go emitter.Serve() p := Puller{ folder: "default", dir: "testdata", model: m, queue: newJobQueue(), progressEmitter: emitter, } // queue.Done should be called by the finisher routine p.queue.Push("filex") p.queue.Pop() if len(p.queue.progress) != 1 { t.Fatal("Expected file in progress") } copyChan := make(chan copyBlocksState) pullChan := make(chan pullBlockState) finisherBufferChan := make(chan *sharedPullerState) finisherChan := make(chan *sharedPullerState) go p.copierRoutine(copyChan, pullChan, finisherBufferChan) go p.pullerRoutine(pullChan, finisherBufferChan) go p.finisherRoutine(finisherChan) p.handleFile(file, copyChan, finisherChan) // Receove at finisher, we shoud error out as puller has nowhere to pull // from. select { case state := <-finisherBufferChan: // At this point the file should still be registered with both the job // queue, and the progress emitter. Verify this. if len(p.progressEmitter.registry) != 1 || len(p.queue.progress) != 1 || len(p.queue.queued) != 0 { t.Fatal("Could not find file") } // Pass the file down the real finisher, and give it time to consume finisherChan <- state time.Sleep(100 * time.Millisecond) if state.fd != nil { t.Fatal("File not closed?") } if len(p.progressEmitter.registry) != 0 || len(p.queue.progress) != 0 || len(p.queue.queued) != 0 { t.Fatal("Still registered", len(p.progressEmitter.registry), len(p.queue.progress), len(p.queue.queued)) } // Doing it again should have no effect finisherChan <- state time.Sleep(100 * time.Millisecond) if len(p.progressEmitter.registry) != 0 || len(p.queue.progress) != 0 || len(p.queue.queued) != 0 { t.Fatal("Still registered") } case <-time.After(time.Second): t.Fatal("Didn't get anything to the finisher") } }
func TestClusterConfig(t *testing.T) { cfg := config.New(device1) cfg.Devices = []config.DeviceConfiguration{ { DeviceID: device1, Introducer: true, }, { DeviceID: device2, }, } cfg.Folders = []config.FolderConfiguration{ { ID: "folder1", Devices: []config.FolderDeviceConfiguration{ {DeviceID: device1}, {DeviceID: device2}, }, }, { ID: "folder2", Devices: []config.FolderDeviceConfiguration{ {DeviceID: device1}, {DeviceID: device2}, }, }, } db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel(config.Wrap("/tmp/test", cfg), protocol.LocalDeviceID, "device", "syncthing", "dev", db) m.AddFolder(cfg.Folders[0]) m.AddFolder(cfg.Folders[1]) m.ServeBackground() cm := m.clusterConfig(device2) if l := len(cm.Folders); l != 2 { t.Fatalf("Incorrect number of folders %d != 2", l) } r := cm.Folders[0] if r.ID != "folder1" { t.Errorf("Incorrect folder %q != folder1", r.ID) } if l := len(r.Devices); l != 2 { t.Errorf("Incorrect number of devices %d != 2", l) } if id := r.Devices[0].ID; bytes.Compare(id, device1[:]) != 0 { t.Errorf("Incorrect device ID %x != %x", id, device1) } if r.Devices[0].Flags&protocol.FlagIntroducer == 0 { t.Error("Device1 should be flagged as Introducer") } if id := r.Devices[1].ID; bytes.Compare(id, device2[:]) != 0 { t.Errorf("Incorrect device ID %x != %x", id, device2) } if r.Devices[1].Flags&protocol.FlagIntroducer != 0 { t.Error("Device2 should not be flagged as Introducer") } r = cm.Folders[1] if r.ID != "folder2" { t.Errorf("Incorrect folder %q != folder2", r.ID) } if l := len(r.Devices); l != 2 { t.Errorf("Incorrect number of devices %d != 2", l) } if id := r.Devices[0].ID; bytes.Compare(id, device1[:]) != 0 { t.Errorf("Incorrect device ID %x != %x", id, device1) } if r.Devices[0].Flags&protocol.FlagIntroducer == 0 { t.Error("Device1 should be flagged as Introducer") } if id := r.Devices[1].ID; bytes.Compare(id, device2[:]) != 0 { t.Errorf("Incorrect device ID %x != %x", id, device2) } if r.Devices[1].Flags&protocol.FlagIntroducer != 0 { t.Error("Device2 should not be flagged as Introducer") } }
func TestIgnores(t *testing.T) { arrEqual := func(a, b []string) bool { if len(a) != len(b) { return false } for i := range a { if a[i] != b[i] { return false } } return true } db, _ := leveldb.Open(storage.NewMemStorage(), nil) fcfg := config.FolderConfiguration{ID: "default", Path: "testdata"} cfg := config.Wrap("/tmp", config.Configuration{ Folders: []config.FolderConfiguration{fcfg}, }) m := NewModel(cfg, "device", "syncthing", "dev", db) m.AddFolder(fcfg) expected := []string{ ".*", "quux", } ignores, _, err := m.GetIgnores("default") if err != nil { t.Error(err) } if !arrEqual(ignores, expected) { t.Errorf("Incorrect ignores: %v != %v", ignores, expected) } ignores = append(ignores, "pox") err = m.SetIgnores("default", ignores) if err != nil { t.Error(err) } ignores2, _, err := m.GetIgnores("default") if err != nil { t.Error(err) } if arrEqual(expected, ignores2) { t.Errorf("Incorrect ignores: %v == %v", ignores2, expected) } if !arrEqual(ignores, ignores2) { t.Errorf("Incorrect ignores: %v != %v", ignores2, ignores) } err = m.SetIgnores("default", expected) if err != nil { t.Error(err) } ignores, _, err = m.GetIgnores("default") if err != nil { t.Error(err) } if !arrEqual(ignores, expected) { t.Errorf("Incorrect ignores: %v != %v", ignores, expected) } ignores, _, err = m.GetIgnores("doesnotexist") if err == nil { t.Error("No error") } err = m.SetIgnores("doesnotexist", expected) if err == nil { t.Error("No error") } m.AddFolder(config.FolderConfiguration{ID: "fresh", Path: "XXX"}) ignores, _, err = m.GetIgnores("fresh") if err != nil { t.Error(err) } if len(ignores) > 0 { t.Errorf("Expected no ignores, got: %v", ignores) } }
func TestHandleFile(t *testing.T) { // After the diff between required and existing we should: // Copy: 2, 5, 8 // Pull: 1, 3, 4, 6, 7 // Create existing file, and update local index existingFile := protocol.FileInfo{ Name: "filex", Flags: 0, Modified: 0, Blocks: []protocol.BlockInfo{ blocks[0], blocks[2], blocks[0], blocks[0], blocks[5], blocks[0], blocks[0], blocks[8], }, } // Create target file requiredFile := existingFile requiredFile.Blocks = blocks[1:] db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel(config.Wrap("/tmp/test", config.Configuration{}), "device", "syncthing", "dev", db) m.AddFolder(config.FolderConfiguration{ID: "default", Path: "testdata"}) m.updateLocal("default", existingFile) p := Puller{ folder: "default", dir: "testdata", model: m, } copyChan := make(chan copyBlocksState, 1) // Copy chan gets all blocks needed to copy in a wrapper struct pullChan := make(chan pullBlockState, 5) // Pull chan gets blocks one by one p.handleFile(requiredFile, copyChan, pullChan, nil) // Receive the results toCopy := <-copyChan toPull := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan, <-pullChan} select { case <-pullChan: t.Error("Channel not empty!") default: } if len(toCopy.blocks) != 3 { t.Errorf("Unexpected count of copy blocks: %d != 3", len(toCopy.blocks)) } for i, eq := range []int{2, 5, 8} { if string(toCopy.blocks[i].Hash) != string(blocks[eq].Hash) { t.Errorf("Block mismatch: %s != %s", toCopy.blocks[i].String(), blocks[eq].String()) } } for i, eq := range []int{1, 3, 4, 6, 7} { if string(toPull[i].block.Hash) != string(blocks[eq].Hash) { t.Errorf("Block mismatch: %s != %s", toPull[i].block.String(), blocks[eq].String()) } } }
func TestRequest(t *testing.T) { db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel(config.Wrap("/tmp/test", config.Configuration{}), "device", "syncthing", "dev", db) // device1 shares default, but device2 doesn't m.AddFolder(config.FolderConfiguration{ID: "default", Path: "testdata", Devices: []config.FolderDeviceConfiguration{{DeviceID: device1}}}) m.ScanFolder("default") // Existing, shared file bs, err := m.Request(device1, "default", "foo", 0, 6) if err != nil { t.Error(err) } if bytes.Compare(bs, []byte("foobar")) != 0 { t.Errorf("Incorrect data from request: %q", string(bs)) } // Existing, nonshared file bs, err = m.Request(device2, "default", "foo", 0, 6) if err == nil { t.Error("Unexpected nil error on insecure file read") } if bs != nil { t.Errorf("Unexpected non nil data on insecure file read: %q", string(bs)) } // Nonexistent file bs, err = m.Request(device1, "default", "nonexistent", 0, 6) if err == nil { t.Error("Unexpected nil error on insecure file read") } if bs != nil { t.Errorf("Unexpected non nil data on insecure file read: %q", string(bs)) } // Shared folder, but disallowed file name bs, err = m.Request(device1, "default", "../walk.go", 0, 6) if err == nil { t.Error("Unexpected nil error on insecure file read") } if bs != nil { t.Errorf("Unexpected non nil data on insecure file read: %q", string(bs)) } // Larger block than available bs, err = m.Request(device1, "default", "foo", 0, 42) if err == nil { t.Error("Unexpected nil error on insecure file read") } if bs != nil { t.Errorf("Unexpected non nil data on insecure file read: %q", string(bs)) } // Negative offset bs, err = m.Request(device1, "default", "foo", -4, 6) if err == nil { t.Error("Unexpected nil error on insecure file read") } if bs != nil { t.Errorf("Unexpected non nil data on insecure file read: %q", string(bs)) } // Negative size bs, err = m.Request(device1, "default", "foo", 4, -4) if err == nil { t.Error("Unexpected nil error on insecure file read") } if bs != nil { t.Errorf("Unexpected non nil data on insecure file read: %q", string(bs)) } }
func TestRWScanRecovery(t *testing.T) { ldb, _ := leveldb.Open(storage.NewMemStorage(), nil) set := db.NewFileSet("default", ldb) set.Update(protocol.LocalDeviceID, []protocol.FileInfo{ {Name: "dummyfile"}, }) fcfg := config.FolderConfiguration{ ID: "default", RawPath: "testdata/rwtestfolder", RescanIntervalS: 1, } cfg := config.Wrap("/tmp/test", config.Configuration{ Folders: []config.FolderConfiguration{fcfg}, Devices: []config.DeviceConfiguration{ { DeviceID: device1, }, }, }) os.RemoveAll(fcfg.RawPath) m := NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb) m.AddFolder(fcfg) m.StartFolderRW("default") m.ServeBackground() waitFor := func(status string) error { timeout := time.Now().Add(2 * time.Second) for { if time.Now().After(timeout) { return fmt.Errorf("Timed out waiting for status: %s, current status: %s", status, m.cfg.Folders()["default"].Invalid) } _, _, err := m.State("default") if err == nil && status == "" { return nil } if err != nil && err.Error() == status { return nil } time.Sleep(10 * time.Millisecond) } } if err := waitFor("folder path missing"); err != nil { t.Error(err) return } os.Mkdir(fcfg.RawPath, 0700) if err := waitFor("folder marker missing"); err != nil { t.Error(err) return } fd, err := os.Create(filepath.Join(fcfg.RawPath, ".stfolder")) if err != nil { t.Error(err) return } fd.Close() if err := waitFor(""); err != nil { t.Error(err) return } os.Remove(filepath.Join(fcfg.RawPath, ".stfolder")) if err := waitFor("folder marker missing"); err != nil { t.Error(err) return } os.Remove(fcfg.RawPath) if err := waitFor("folder path missing"); err != nil { t.Error(err) return } }
func syncthingMain() { var err error if len(os.Getenv("GOGC")) == 0 { debug.SetGCPercent(25) } if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } events.Default.Log(events.Starting, map[string]string{"home": confDir}) // Ensure that that we have a certificate and key. cert, err = loadCert(confDir, "") if err != nil { newCertificate(confDir, "") cert, err = loadCert(confDir, "") if err != nil { l.Fatalln("load cert:", err) } } myID = protocol.NewDeviceID(cert.Certificate[0]) l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5])) l.Infoln(LongVersion) l.Infoln("My ID:", myID) // Prepare to be able to save configuration cfgFile := filepath.Join(confDir, "config.xml") var myName string // Load the configuration file, if it exists. // If it does not, create a template. cfg, err = config.Load(cfgFile, myID) if err == nil { myCfg := cfg.Devices()[myID] if myCfg.Name == "" { myName, _ = os.Hostname() } else { myName = myCfg.Name } } else { l.Infoln("No config file; starting with empty defaults") myName, _ = os.Hostname() defaultFolder, err := osutil.ExpandTilde("~/Sync") if err != nil { l.Fatalln("home:", err) } newCfg := config.New(myID) newCfg.Folders = []config.FolderConfiguration{ { ID: "default", Path: defaultFolder, RescanIntervalS: 60, Devices: []config.FolderDeviceConfiguration{{DeviceID: myID}}, }, } newCfg.Devices = []config.DeviceConfiguration{ { DeviceID: myID, Addresses: []string{"dynamic"}, Name: myName, }, } port, err := getFreePort("127.0.0.1", 8080) if err != nil { l.Fatalln("get free port (GUI):", err) } newCfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port) port, err = getFreePort("0.0.0.0", 22000) if err != nil { l.Fatalln("get free port (BEP):", err) } newCfg.Options.ListenAddress = []string{fmt.Sprintf("0.0.0.0:%d", port)} cfg = config.Wrap(cfgFile, newCfg) cfg.Save() l.Infof("Edit %s to taste or use the GUI\n", cfgFile) } if cfg.Raw().OriginalVersion != config.CurrentVersion { l.Infoln("Archiving a copy of old config file format") // Archive a copy osutil.Rename(cfgFile, cfgFile+fmt.Sprintf(".v%d", cfg.Raw().OriginalVersion)) // Save the new version cfg.Save() } if len(profiler) > 0 { go func() { l.Debugln("Starting profiler on", profiler) runtime.SetBlockProfileRate(1) err := http.ListenAndServe(profiler, nil) if err != nil { l.Fatalln(err) } }() } // The TLS configuration is used for both the listening socket and outgoing // connections. tlsCfg := &tls.Config{ Certificates: []tls.Certificate{cert}, NextProtos: []string{"bep/1.0"}, ServerName: myID.String(), ClientAuth: tls.RequestClientCert, SessionTicketsDisabled: true, InsecureSkipVerify: true, MinVersion: tls.VersionTLS12, } // If the read or write rate should be limited, set up a rate limiter for it. // This will be used on connections created in the connect and listen routines. opts := cfg.Options() if opts.MaxSendKbps > 0 { writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxSendKbps), int64(5*1000*opts.MaxSendKbps)) } if opts.MaxRecvKbps > 0 { readRateLimit = ratelimit.NewBucketWithRate(float64(1000*opts.MaxRecvKbps), int64(5*1000*opts.MaxRecvKbps)) } // If this is the first time the user runs v0.9, archive the old indexes and config. archiveLegacyConfig() db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{CachedOpenFiles: 100}) if err != nil { l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?") } // Remove database entries for folders that no longer exist in the config folders := cfg.Folders() for _, folder := range files.ListFolders(db) { if _, ok := folders[folder]; !ok { l.Infof("Cleaning data for dropped folder %q", folder) files.DropFolder(db, folder) } } m := model.NewModel(cfg, myName, "syncthing", Version, db) nextFolder: for id, folder := range cfg.Folders() { if folder.Invalid != "" { continue } folder.Path, err = osutil.ExpandTilde(folder.Path) if err != nil { l.Fatalln("home:", err) } m.AddFolder(folder) fi, err := os.Stat(folder.Path) if m.CurrentLocalVersion(id) > 0 { // Safety check. If the cached index contains files but the // folder doesn't exist, we have a problem. We would assume // that all files have been deleted which might not be the case, // so mark it as invalid instead. if err != nil || !fi.IsDir() { l.Warnf("Stopping folder %q - path does not exist, but has files in index", folder.ID) cfg.InvalidateFolder(id, "folder path missing") continue nextFolder } } else if os.IsNotExist(err) { // If we don't have any files in the index, and the directory // doesn't exist, try creating it. err = os.MkdirAll(folder.Path, 0700) } if err != nil { // If there was another error or we could not create the // path, the folder is invalid. l.Warnf("Stopping folder %q - %v", err) cfg.InvalidateFolder(id, err.Error()) continue nextFolder } } // GUI guiCfg := overrideGUIConfig(cfg.GUI(), guiAddress, guiAuthentication, guiAPIKey) if guiCfg.Enabled && guiCfg.Address != "" { addr, err := net.ResolveTCPAddr("tcp", guiCfg.Address) if err != nil { l.Fatalf("Cannot start GUI on %q: %v", guiCfg.Address, err) } else { var hostOpen, hostShow string switch { case addr.IP == nil: hostOpen = "localhost" hostShow = "0.0.0.0" case addr.IP.IsUnspecified(): hostOpen = "localhost" hostShow = addr.IP.String() default: hostOpen = addr.IP.String() hostShow = hostOpen } var proto = "http" if guiCfg.UseTLS { proto = "https" } urlShow := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port))) l.Infoln("Starting web GUI on", urlShow) err := startGUI(guiCfg, guiAssets, m) if err != nil { l.Fatalln("Cannot start GUI:", err) } if opts.StartBrowser && !noBrowser && !stRestarting { urlOpen := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostOpen, strconv.Itoa(addr.Port))) openURL(urlOpen) } } } // Clear out old indexes for other devices. Otherwise we'll start up and // start needing a bunch of files which are nowhere to be found. This // needs to be changed when we correctly do persistent indexes. for _, folderCfg := range cfg.Folders() { if folderCfg.Invalid != "" { continue } for _, device := range folderCfg.DeviceIDs() { if device == myID { continue } m.Index(device, folderCfg.ID, nil) } } // Remove all .idx* files that don't belong to an active folder. validIndexes := make(map[string]bool) for _, folder := range cfg.Folders() { dir, err := osutil.ExpandTilde(folder.Path) if err != nil { l.Fatalln("home:", err) } id := fmt.Sprintf("%x", sha1.Sum([]byte(dir))) validIndexes[id] = true } allIndexes, err := filepath.Glob(filepath.Join(confDir, "*.idx*")) if err == nil { for _, idx := range allIndexes { bn := filepath.Base(idx) fs := strings.Split(bn, ".") if len(fs) > 1 { if _, ok := validIndexes[fs[0]]; !ok { l.Infoln("Removing old index", bn) os.Remove(idx) } } } } // The default port we announce, possibly modified by setupUPnP next. addr, err := net.ResolveTCPAddr("tcp", opts.ListenAddress[0]) if err != nil { l.Fatalln("Bad listen address:", err) } externalPort = addr.Port // UPnP if opts.UPnPEnabled { setupUPnP() } // Routine to connect out to configured devices discoverer = discovery(externalPort) go listenConnect(myID, m, tlsCfg) for _, folder := range cfg.Folders() { if folder.Invalid != "" { continue } // Routine to pull blocks from other devices to synchronize the local // folder. Does not run when we are in read only (publish only) mode. if folder.ReadOnly { l.Okf("Ready to synchronize %s (read only; no external updates accepted)", folder.ID) m.StartFolderRO(folder.ID) } else { l.Okf("Ready to synchronize %s (read-write)", folder.ID) m.StartFolderRW(folder.ID) } } if cpuProfile { f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid())) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } for _, device := range cfg.Devices() { if len(device.Name) > 0 { l.Infof("Device %s is %q at %v", device.DeviceID, device.Name, device.Addresses) } } if opts.URAccepted > 0 && opts.URAccepted < usageReportVersion { l.Infoln("Anonymous usage report has changed; revoking acceptance") opts.URAccepted = 0 cfg.SetOptions(opts) } if opts.URAccepted >= usageReportVersion { go usageReportingLoop(m) go func() { time.Sleep(10 * time.Minute) err := sendUsageReport(m) if err != nil { l.Infoln("Usage report:", err) } }() } if opts.RestartOnWakeup { go standbyMonitor() } if opts.AutoUpgradeIntervalH > 0 { go autoUpgrade() } events.Default.Log(events.StartupComplete, nil) go generateEvents() code := <-stop l.Okln("Exiting") os.Exit(code) }