func (s *Service) ExternalAddresses() []string { s.listenersMut.RLock() var addrs []string for _, listener := range s.listeners { for _, wanAddr := range listener.WANAddresses() { addrs = append(addrs, wanAddr.String()) } } s.listenersMut.RUnlock() return util.UniqueStrings(addrs) }
func (w *Wrapper) ListenAddresses() []string { var addresses []string for _, addr := range w.cfg.Options.ListenAddresses { switch addr { case "default": addresses = append(addresses, DefaultListenAddresses...) default: addresses = append(addresses, addr) } } return util.UniqueStrings(addresses) }
func (w *Wrapper) GlobalDiscoveryServers() []string { var servers []string for _, srv := range w.cfg.Options.GlobalAnnServers { switch srv { case "default": servers = append(servers, DefaultDiscoveryServers...) case "default-v4": servers = append(servers, DefaultDiscoveryServersV4...) case "default-v6": servers = append(servers, DefaultDiscoveryServersV6...) default: servers = append(servers, srv) } } return util.UniqueStrings(servers) }
func (m *cachingMux) Cache() map[protocol.DeviceID]CacheEntry { // Res will be the "total" cache, i.e. the union of our cache and all our // children's caches. res := make(map[protocol.DeviceID]CacheEntry) m.mut.RLock() for i := range m.finders { // Each finder[i] has a corresponding cache at cache[i]. Go through // it and populate the total, appending any addresses and keeping // the newest "when" time. We skip any negative cache entries. for k, v := range m.caches[i].Cache() { if v.found { cur := res[k] if v.when.After(cur.when) { cur.when = v.when } cur.Addresses = append(cur.Addresses, v.Addresses...) res[k] = cur } } // Then ask the finder itself for it's cache and do the same. If this // finder is a global discovery client, it will have no cache. If it's // a local discovery client, this will be it's current state. for k, v := range m.finders[i].Cache() { if v.found { cur := res[k] if v.when.After(cur.when) { cur.when = v.when } cur.Addresses = append(cur.Addresses, v.Addresses...) res[k] = cur } } } m.mut.RUnlock() for k, v := range res { v.Addresses = util.UniqueStrings(v.Addresses) res[k] = v } return res }
func (cfg *Configuration) prepare(myID protocol.DeviceID) { util.FillNilSlices(&cfg.Options) // Initialize any empty slices if cfg.Folders == nil { cfg.Folders = []FolderConfiguration{} } if cfg.IgnoredDevices == nil { cfg.IgnoredDevices = []protocol.DeviceID{} } if cfg.Options.AlwaysLocalNets == nil { cfg.Options.AlwaysLocalNets = []string{} } // Check for missing, bad or duplicate folder ID:s var seenFolders = map[string]*FolderConfiguration{} for i := range cfg.Folders { folder := &cfg.Folders[i] folder.prepare() if seen, ok := seenFolders[folder.ID]; ok { l.Warnf("Multiple folders with ID %q; disabling", folder.ID) seen.Invalid = "duplicate folder ID" folder.Invalid = "duplicate folder ID" } else { seenFolders[folder.ID] = folder } } cfg.Options.ListenAddress = util.UniqueStrings(cfg.Options.ListenAddress) cfg.Options.GlobalAnnServers = util.UniqueStrings(cfg.Options.GlobalAnnServers) if cfg.Version > 0 && cfg.Version < OldestHandledVersion { l.Warnf("Configuration version %d is deprecated. Attempting best effort conversion, but please verify manually.", cfg.Version) } // Upgrade configuration versions as appropriate if cfg.Version <= 10 { convertV10V11(cfg) } if cfg.Version == 11 { convertV11V12(cfg) } if cfg.Version == 12 { convertV12V13(cfg) } // Build a list of available devices existingDevices := make(map[protocol.DeviceID]bool) for _, device := range cfg.Devices { existingDevices[device.DeviceID] = true } // Ensure this device is present in the config if !existingDevices[myID] { myName, _ := os.Hostname() cfg.Devices = append(cfg.Devices, DeviceConfiguration{ DeviceID: myID, Name: myName, }) existingDevices[myID] = true } // Ensure that the device list is free from duplicates cfg.Devices = ensureNoDuplicateDevices(cfg.Devices) sort.Sort(DeviceConfigurationList(cfg.Devices)) // Ensure that any loose devices are not present in the wrong places // Ensure that there are no duplicate devices // Ensure that puller settings are sane // Ensure that the versioning configuration parameter map is not nil for i := range cfg.Folders { cfg.Folders[i].Devices = ensureDevicePresent(cfg.Folders[i].Devices, myID) cfg.Folders[i].Devices = ensureExistingDevices(cfg.Folders[i].Devices, existingDevices) cfg.Folders[i].Devices = ensureNoDuplicateFolderDevices(cfg.Folders[i].Devices) if cfg.Folders[i].Versioning.Params == nil { cfg.Folders[i].Versioning.Params = map[string]string{} } sort.Sort(FolderDeviceConfigurationList(cfg.Folders[i].Devices)) } // An empty address list is equivalent to a single "dynamic" entry for i := range cfg.Devices { n := &cfg.Devices[i] if len(n.Addresses) == 0 || len(n.Addresses) == 1 && n.Addresses[0] == "" { n.Addresses = []string{"dynamic"} } } // Very short reconnection intervals are annoying if cfg.Options.ReconnectIntervalS < 5 { cfg.Options.ReconnectIntervalS = 5 } if cfg.GUI.APIKey == "" { cfg.GUI.APIKey = util.RandomString(32) } }
func (cfg *Configuration) clean() error { util.FillNilSlices(&cfg.Options) // Initialize any empty slices if cfg.Folders == nil { cfg.Folders = []FolderConfiguration{} } if cfg.IgnoredDevices == nil { cfg.IgnoredDevices = []protocol.DeviceID{} } if cfg.Options.AlwaysLocalNets == nil { cfg.Options.AlwaysLocalNets = []string{} } if cfg.Options.UnackedNotificationIDs == nil { cfg.Options.UnackedNotificationIDs = []string{} } // Prepare folders and check for duplicates. Duplicates are bad and // dangerous, can't currently be resolved in the GUI, and shouldn't // happen when configured by the GUI. We return with an error in that // situation. seenFolders := make(map[string]struct{}) for i := range cfg.Folders { folder := &cfg.Folders[i] folder.prepare() if _, ok := seenFolders[folder.ID]; ok { return fmt.Errorf("duplicate folder ID %q in configuration", folder.ID) } seenFolders[folder.ID] = struct{}{} } cfg.Options.ListenAddresses = util.UniqueStrings(cfg.Options.ListenAddresses) cfg.Options.GlobalAnnServers = util.UniqueStrings(cfg.Options.GlobalAnnServers) if cfg.Version > 0 && cfg.Version < OldestHandledVersion { l.Warnf("Configuration version %d is deprecated. Attempting best effort conversion, but please verify manually.", cfg.Version) } // Upgrade configuration versions as appropriate if cfg.Version <= 10 { convertV10V11(cfg) } if cfg.Version == 11 { convertV11V12(cfg) } if cfg.Version == 12 { convertV12V13(cfg) } if cfg.Version == 13 { convertV13V14(cfg) } if cfg.Version == 14 { convertV14V15(cfg) } if cfg.Version == 15 { convertV15V16(cfg) } if cfg.Version == 16 { convertV16V17(cfg) } // Build a list of available devices existingDevices := make(map[protocol.DeviceID]bool) for _, device := range cfg.Devices { existingDevices[device.DeviceID] = true } // Ensure that the device list is free from duplicates cfg.Devices = ensureNoDuplicateDevices(cfg.Devices) sort.Sort(DeviceConfigurationList(cfg.Devices)) // Ensure that any loose devices are not present in the wrong places // Ensure that there are no duplicate devices // Ensure that the versioning configuration parameter map is not nil for i := range cfg.Folders { cfg.Folders[i].Devices = ensureExistingDevices(cfg.Folders[i].Devices, existingDevices) cfg.Folders[i].Devices = ensureNoDuplicateFolderDevices(cfg.Folders[i].Devices) if cfg.Folders[i].Versioning.Params == nil { cfg.Folders[i].Versioning.Params = map[string]string{} } sort.Sort(FolderDeviceConfigurationList(cfg.Folders[i].Devices)) } // An empty address list is equivalent to a single "dynamic" entry for i := range cfg.Devices { n := &cfg.Devices[i] if len(n.Addresses) == 0 || len(n.Addresses) == 1 && n.Addresses[0] == "" { n.Addresses = []string{"dynamic"} } } // Very short reconnection intervals are annoying if cfg.Options.ReconnectIntervalS < 5 { cfg.Options.ReconnectIntervalS = 5 } if cfg.GUI.APIKey == "" { cfg.GUI.APIKey = rand.String(32) } // The list of ignored devices should not contain any devices that have // been manually added to the config. newIgnoredDevices := []protocol.DeviceID{} for _, dev := range cfg.IgnoredDevices { if !existingDevices[dev] { newIgnoredDevices = append(newIgnoredDevices, dev) } } cfg.IgnoredDevices = newIgnoredDevices return nil }
// Archive moves the named file away to a version archive. If this function // returns nil, the named file does not exist any more (has been archived). func (v Staggered) Archive(filePath string) error { l.Debugln("Waiting for lock on ", v.versionsPath) v.mutex.Lock() defer v.mutex.Unlock() _, err := osutil.Lstat(filePath) if os.IsNotExist(err) { l.Debugln("not archiving nonexistent file", filePath) return nil } else if err != nil { return err } if _, err := os.Stat(v.versionsPath); err != nil { if os.IsNotExist(err) { l.Debugln("creating versions dir", v.versionsPath) osutil.MkdirAll(v.versionsPath, 0755) osutil.HideFile(v.versionsPath) } else { return err } } l.Debugln("archiving", filePath) file := filepath.Base(filePath) inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath)) if err != nil { return err } dir := filepath.Join(v.versionsPath, inFolderPath) err = osutil.MkdirAll(dir, 0755) if err != nil && !os.IsExist(err) { return err } ver := taggedFilename(file, time.Now().Format(TimeFormat)) dst := filepath.Join(dir, ver) l.Debugln("moving to", dst) err = osutil.Rename(filePath, dst) if err != nil { return err } // Glob according to the new file~timestamp.ext pattern. pattern := filepath.Join(dir, taggedFilename(file, TimeGlob)) newVersions, err := osutil.Glob(pattern) if err != nil { l.Warnln("globbing:", err, "for", pattern) return nil } // Also according to the old file.ext~timestamp pattern. pattern = filepath.Join(dir, file+"~"+TimeGlob) oldVersions, err := osutil.Glob(pattern) if err != nil { l.Warnln("globbing:", err, "for", pattern) return nil } // Use all the found filenames. versions := append(oldVersions, newVersions...) v.expire(util.UniqueStrings(versions)) return nil }
// Archive moves the named file away to a version archive. If this function // returns nil, the named file does not exist any more (has been archived). func (v Simple) Archive(filePath string) error { fileInfo, err := osutil.Lstat(filePath) if os.IsNotExist(err) { l.Debugln("not archiving nonexistent file", filePath) return nil } else if err != nil { return err } versionsDir := filepath.Join(v.folderPath, ".stversions") _, err = os.Stat(versionsDir) if err != nil { if os.IsNotExist(err) { l.Debugln("creating versions dir", versionsDir) osutil.MkdirAll(versionsDir, 0755) osutil.HideFile(versionsDir) } else { return err } } l.Debugln("archiving", filePath) file := filepath.Base(filePath) inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath)) if err != nil { return err } dir := filepath.Join(versionsDir, inFolderPath) err = osutil.MkdirAll(dir, 0755) if err != nil && !os.IsExist(err) { return err } ver := taggedFilename(file, fileInfo.ModTime().Format(TimeFormat)) dst := filepath.Join(dir, ver) l.Debugln("moving to", dst) err = osutil.Rename(filePath, dst) if err != nil { return err } // Glob according to the new file~timestamp.ext pattern. pattern := filepath.Join(dir, taggedFilename(file, TimeGlob)) newVersions, err := osutil.Glob(pattern) if err != nil { l.Warnln("globbing:", err, "for", pattern) return nil } // Also according to the old file.ext~timestamp pattern. pattern = filepath.Join(dir, file+"~"+TimeGlob) oldVersions, err := osutil.Glob(pattern) if err != nil { l.Warnln("globbing:", err, "for", pattern) return nil } // Use all the found filenames. "~" sorts after "." so all old pattern // files will be deleted before any new, which is as it should be. versions := util.UniqueStrings(append(oldVersions, newVersions...)) if len(versions) > v.keep { for _, toRemove := range versions[:len(versions)-v.keep] { l.Debugln("cleaning out", toRemove) err = os.Remove(toRemove) if err != nil { l.Warnln("removing old version:", err) } } } return nil }