func TestNodeRename(t *testing.T) { ccm := protocol.ClusterConfigMessage{ ClientName: "syncthing", ClientVersion: "v0.9.4", } cfg := config.New("test", node1) cfg.Nodes = []config.NodeConfiguration{ { NodeID: node1, }, } db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel("/tmp", &cfg, "node", "syncthing", "dev", db) if cfg.Nodes[0].Name != "" { t.Errorf("Node already has a name") } m.ClusterConfig(node1, ccm) if cfg.Nodes[0].Name != "" { t.Errorf("Node already has a name") } ccm.Options = []protocol.Option{ { Key: "name", Value: "tester", }, } m.ClusterConfig(node1, ccm) if cfg.Nodes[0].Name != "tester" { t.Errorf("Node did not get a name") } ccm.Options[0].Value = "tester2" m.ClusterConfig(node1, ccm) if cfg.Nodes[0].Name != "tester" { t.Errorf("Node name got overwritten") } }
func syncthingMain() { var err error if len(os.Getenv("GOGC")) == 0 { debug.SetGCPercent(25) } if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } events.Default.Log(events.Starting, map[string]string{"home": confDir}) if _, err = os.Stat(confDir); err != nil && confDir == getDefaultConfDir() { // We are supposed to use the default configuration directory. It // doesn't exist. In the past our default has been ~/.syncthing, so if // that directory exists we move it to the new default location and // continue. We don't much care if this fails at this point, we will // be checking that later. var oldDefault string if runtime.GOOS == "windows" { oldDefault = filepath.Join(os.Getenv("AppData"), "Syncthing") } else { oldDefault = expandTilde("~/.syncthing") } if _, err := os.Stat(oldDefault); err == nil { os.MkdirAll(filepath.Dir(confDir), 0700) if err := os.Rename(oldDefault, confDir); err == nil { l.Infoln("Moved config dir", oldDefault, "to", confDir) } } } // Ensure that our home directory exists and that we have a certificate and key. ensureDir(confDir, 0700) cert, err = loadCert(confDir, "") if err != nil { newCertificate(confDir, "") cert, err = loadCert(confDir, "") l.FatalErr(err) } myID = protocol.NewNodeID(cert.Certificate[0]) l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5])) l.Infoln(LongVersion) l.Infoln("My ID:", myID) // Prepare to be able to save configuration cfgFile := filepath.Join(confDir, "config.xml") var myName string // Load the configuration file, if it exists. // If it does not, create a template. cfg, err = config.Load(cfgFile, myID) if err == nil { myCfg := cfg.GetNodeConfiguration(myID) if myCfg == nil || myCfg.Name == "" { myName, _ = os.Hostname() } else { myName = myCfg.Name } } else { l.Infoln("No config file; starting with empty defaults") myName, _ = os.Hostname() defaultRepo := filepath.Join(getHomeDir(), "Sync") cfg = config.New(cfgFile, myID) cfg.Repositories = []config.RepositoryConfiguration{ { ID: "default", Directory: defaultRepo, RescanIntervalS: 60, Nodes: []config.RepositoryNodeConfiguration{{NodeID: myID}}, }, } cfg.Nodes = []config.NodeConfiguration{ { NodeID: myID, Addresses: []string{"dynamic"}, Name: myName, }, } port, err := getFreePort("127.0.0.1", 8080) l.FatalErr(err) cfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port) port, err = getFreePort("0.0.0.0", 22000) l.FatalErr(err) cfg.Options.ListenAddress = []string{fmt.Sprintf("0.0.0.0:%d", port)} cfg.Save() l.Infof("Edit %s to taste or use the GUI\n", cfgFile) } if profiler := os.Getenv("STPROFILER"); len(profiler) > 0 { go func() { l.Debugln("Starting profiler on", profiler) runtime.SetBlockProfileRate(1) err := http.ListenAndServe(profiler, nil) if err != nil { l.Fatalln(err) } }() } // The TLS configuration is used for both the listening socket and outgoing // connections. tlsCfg := &tls.Config{ Certificates: []tls.Certificate{cert}, NextProtos: []string{"bep/1.0"}, ServerName: myID.String(), ClientAuth: tls.RequestClientCert, SessionTicketsDisabled: true, InsecureSkipVerify: true, MinVersion: tls.VersionTLS12, } // If the read or write rate should be limited, set up a rate limiter for it. // This will be used on connections created in the connect and listen routines. if cfg.Options.MaxSendKbps > 0 { writeRateLimit = ratelimit.NewBucketWithRate(float64(1000*cfg.Options.MaxSendKbps), int64(5*1000*cfg.Options.MaxSendKbps)) } if cfg.Options.MaxRecvKbps > 0 { readRateLimit = ratelimit.NewBucketWithRate(float64(1000*cfg.Options.MaxRecvKbps), int64(5*1000*cfg.Options.MaxRecvKbps)) } // If this is the first time the user runs v0.9, archive the old indexes and config. archiveLegacyConfig() db, err := leveldb.OpenFile(filepath.Join(confDir, "index"), &opt.Options{CachedOpenFiles: 100}) if err != nil { l.Fatalln("Cannot open database:", err, "- Is another copy of Syncthing already running?") } // Remove database entries for repos that no longer exist in the config repoMap := cfg.RepoMap() for _, repo := range files.ListRepos(db) { if _, ok := repoMap[repo]; !ok { l.Infof("Cleaning data for dropped repo %q", repo) files.DropRepo(db, repo) } } m := model.NewModel(confDir, &cfg, myName, "syncthing", Version, db) nextRepo: for i, repo := range cfg.Repositories { if repo.Invalid != "" { continue } repo.Directory = expandTilde(repo.Directory) fi, err := os.Stat(repo.Directory) if m.LocalVersion(repo.ID) > 0 { // Safety check. If the cached index contains files but the // repository doesn't exist, we have a problem. We would assume // that all files have been deleted which might not be the case, // so mark it as invalid instead. if err != nil || !fi.IsDir() { l.Warnf("Stopping repository %q - directory missing, but has files in index", repo.ID) cfg.Repositories[i].Invalid = "repo directory missing" continue nextRepo } } else if os.IsNotExist(err) { // If we don't have ny files in the index, and the directory does // exist, try creating it. err = os.MkdirAll(repo.Directory, 0700) } if err != nil { // If there was another error or we could not create the // directory, the repository is invalid. l.Warnf("Stopping repository %q - %v", err) cfg.Repositories[i].Invalid = err.Error() continue nextRepo } m.AddRepo(repo) } // GUI guiCfg := overrideGUIConfig(cfg.GUI, guiAddress, guiAuthentication, guiAPIKey) if guiCfg.Enabled && guiCfg.Address != "" { addr, err := net.ResolveTCPAddr("tcp", guiCfg.Address) if err != nil { l.Fatalf("Cannot start GUI on %q: %v", guiCfg.Address, err) } else { var hostOpen, hostShow string switch { case addr.IP == nil: hostOpen = "localhost" hostShow = "0.0.0.0" case addr.IP.IsUnspecified(): hostOpen = "localhost" hostShow = addr.IP.String() default: hostOpen = addr.IP.String() hostShow = hostOpen } var proto = "http" if guiCfg.UseTLS { proto = "https" } l.Infof("Starting web GUI on %s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port))) err := startGUI(guiCfg, os.Getenv("STGUIASSETS"), m) if err != nil { l.Fatalln("Cannot start GUI:", err) } if !noBrowser && cfg.Options.StartBrowser && len(os.Getenv("STRESTART")) == 0 { openURL(fmt.Sprintf("%s://%s:%d", proto, hostOpen, addr.Port)) } } } // Clear out old indexes for other nodes. Otherwise we'll start up and // start needing a bunch of files which are nowhere to be found. This // needs to be changed when we correctly do persistent indexes. for _, repoCfg := range cfg.Repositories { if repoCfg.Invalid != "" { continue } for _, node := range repoCfg.NodeIDs() { if node == myID { continue } m.Index(node, repoCfg.ID, nil) } } // Walk the repository and update the local model before establishing any // connections to other nodes. m.CleanRepos() l.Infoln("Performing initial repository scan") m.ScanRepos() // Remove all .idx* files that don't belong to an active repo. validIndexes := make(map[string]bool) for _, repo := range cfg.Repositories { dir := expandTilde(repo.Directory) id := fmt.Sprintf("%x", sha1.Sum([]byte(dir))) validIndexes[id] = true } allIndexes, err := filepath.Glob(filepath.Join(confDir, "*.idx*")) if err == nil { for _, idx := range allIndexes { bn := filepath.Base(idx) fs := strings.Split(bn, ".") if len(fs) > 1 { if _, ok := validIndexes[fs[0]]; !ok { l.Infoln("Removing old index", bn) os.Remove(idx) } } } } // The default port we announce, possibly modified by setupUPnP next. addr, err := net.ResolveTCPAddr("tcp", cfg.Options.ListenAddress[0]) if err != nil { l.Fatalln("Bad listen address:", err) } externalPort = addr.Port // UPnP if cfg.Options.UPnPEnabled { setupUPnP() } // Routine to connect out to configured nodes discoverer = discovery(externalPort) go listenConnect(myID, m, tlsCfg) for _, repo := range cfg.Repositories { if repo.Invalid != "" { continue } // Routine to pull blocks from other nodes to synchronize the local // repository. Does not run when we are in read only (publish only) mode. if repo.ReadOnly { l.Okf("Ready to synchronize %s (read only; no external updates accepted)", repo.ID) m.StartRepoRO(repo.ID) } else { l.Okf("Ready to synchronize %s (read-write)", repo.ID) m.StartRepoRW(repo.ID, cfg.Options.ParallelRequests) } } if cpuprof := os.Getenv("STCPUPROFILE"); len(cpuprof) > 0 { f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid())) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } for _, node := range cfg.Nodes { if len(node.Name) > 0 { l.Infof("Node %s is %q at %v", node.NodeID, node.Name, node.Addresses) } } if cfg.Options.URAccepted > 0 && cfg.Options.URAccepted < usageReportVersion { l.Infoln("Anonymous usage report has changed; revoking acceptance") cfg.Options.URAccepted = 0 } if cfg.Options.URAccepted >= usageReportVersion { go usageReportingLoop(m) go func() { time.Sleep(10 * time.Minute) err := sendUsageReport(m) if err != nil { l.Infoln("Usage report:", err) } }() } if cfg.Options.RestartOnWakeup { go standbyMonitor() } events.Default.Log(events.StartupComplete, nil) go generateEvents() code := <-stop l.Okln("Exiting") os.Exit(code) }
func TestClusterConfig(t *testing.T) { cfg := config.New("test", node1) cfg.Nodes = []config.NodeConfiguration{ { NodeID: node1, }, { NodeID: node2, }, } cfg.Repositories = []config.RepositoryConfiguration{ { ID: "repo1", Nodes: []config.RepositoryNodeConfiguration{ {NodeID: node1}, {NodeID: node2}, }, }, { ID: "repo2", Nodes: []config.RepositoryNodeConfiguration{ {NodeID: node1}, {NodeID: node2}, }, }, } db, _ := leveldb.Open(storage.NewMemStorage(), nil) m := NewModel("/tmp", &cfg, "node", "syncthing", "dev", db) m.AddRepo(cfg.Repositories[0]) m.AddRepo(cfg.Repositories[1]) cm := m.clusterConfig(node2) if l := len(cm.Repositories); l != 2 { t.Fatalf("Incorrect number of repos %d != 2", l) } r := cm.Repositories[0] if r.ID != "repo1" { t.Errorf("Incorrect repo %q != repo1", r.ID) } if l := len(r.Nodes); l != 2 { t.Errorf("Incorrect number of nodes %d != 2", l) } if id := r.Nodes[0].ID; bytes.Compare(id, node1[:]) != 0 { t.Errorf("Incorrect node ID %x != %x", id, node1) } if id := r.Nodes[1].ID; bytes.Compare(id, node2[:]) != 0 { t.Errorf("Incorrect node ID %x != %x", id, node2) } r = cm.Repositories[1] if r.ID != "repo2" { t.Errorf("Incorrect repo %q != repo2", r.ID) } if l := len(r.Nodes); l != 2 { t.Errorf("Incorrect number of nodes %d != 2", l) } if id := r.Nodes[0].ID; bytes.Compare(id, node1[:]) != 0 { t.Errorf("Incorrect node ID %x != %x", id, node1) } if id := r.Nodes[1].ID; bytes.Compare(id, node2[:]) != 0 { t.Errorf("Incorrect node ID %x != %x", id, node2) } }