func NewBroadcast(port int) *Broadcast { b := &Broadcast{ Supervisor: suture.New("broadcastBeacon", suture.Spec{ // Don't retry too frenetically: an error to open a socket or // whatever is usually something that is either permanent or takes // a while to get solved... FailureThreshold: 2, FailureBackoff: 60 * time.Second, // Only log restarts in debug mode. Log: func(line string) { l.Debugln(line) }, }), port: port, inbox: make(chan []byte), outbox: make(chan recv, 16), } b.br = &broadcastReader{ port: port, outbox: b.outbox, connMut: sync.NewMutex(), } b.Add(b.br) b.bw = &broadcastWriter{ port: port, inbox: b.inbox, connMut: sync.NewMutex(), } b.Add(b.bw) return b }
func newAPIService(id protocol.DeviceID, cfg *config.Wrapper, assetDir string, m *model.Model, eventSub *events.BufferedSubscription, discoverer *discover.CachingMux, relayService *relay.Service, errors, systemLog *logger.Recorder) (*apiService, error) { service := &apiService{ id: id, cfg: cfg, assetDir: assetDir, model: m, eventSub: eventSub, discoverer: discoverer, relayService: relayService, systemConfigMut: sync.NewMutex(), stop: make(chan struct{}), configChanged: make(chan struct{}), listenerMut: sync.NewMutex(), guiErrors: errors, systemLog: systemLog, } seen := make(map[string]struct{}) for file := range auto.Assets() { theme := strings.Split(file, "/")[0] if _, ok := seen[theme]; !ok { seen[theme] = struct{}{} service.themes = append(service.themes, theme) } } var err error service.listener, err = service.getListener(cfg.GUI()) return service, err }
func newRWFolder(m *Model, shortID protocol.ShortID, cfg config.FolderConfiguration) *rwFolder { p := &rwFolder{ stateTracker: stateTracker{ folder: cfg.ID, mut: sync.NewMutex(), }, model: m, progressEmitter: m.progressEmitter, virtualMtimeRepo: db.NewVirtualMtimeRepo(m.db, cfg.ID), folder: cfg.ID, dir: cfg.Path(), scanIntv: time.Duration(cfg.RescanIntervalS) * time.Second, ignorePerms: cfg.IgnorePerms, copiers: cfg.Copiers, pullers: cfg.Pullers, shortID: shortID, order: cfg.Order, maxConflicts: cfg.MaxConflicts, allowSparse: !cfg.DisableSparseFiles, checkFreeSpace: cfg.MinDiskFreePct != 0, stop: make(chan struct{}), queue: newJobQueue(), pullTimer: time.NewTimer(time.Second), scanTimer: time.NewTimer(time.Millisecond), // The first scan should be done immediately. delayScan: make(chan time.Duration), scanNow: make(chan rescanRequest), remoteIndex: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a notification if we're busy doing a pull when it comes. errorsMut: sync.NewMutex(), } if p.copiers == 0 { p.copiers = defaultCopiers } if p.pullers == 0 { p.pullers = defaultPullers } if cfg.PullerPauseS == 0 { p.pause = defaultPullerPause } else { p.pause = time.Duration(cfg.PullerPauseS) * time.Second } if cfg.PullerSleepS == 0 { p.sleep = defaultPullerSleep } else { p.sleep = time.Duration(cfg.PullerSleepS) * time.Second } return p }
func newUPnPService(cfg *config.Wrapper, localPort int) *upnpService { return &upnpService{ cfg: cfg, localPort: localPort, extPortMut: sync.NewMutex(), } }
func TestSourceFileOK(t *testing.T) { s := sharedPullerState{ realName: "testdata/foo", mut: sync.NewMutex(), } fd, err := s.sourceFile() if err != nil { t.Fatal(err) } if fd == nil { t.Fatal("Unexpected nil fd") } bs := make([]byte, 6) n, err := fd.Read(bs) if n != len(bs) { t.Fatalf("Wrong read length %d != %d", n, len(bs)) } if string(bs) != "foobar" { t.Fatalf("Wrong contents %s != foobar", string(bs)) } if err := s.failed(); err != nil { t.Fatal(err) } }
func NewFileSet(folder string, db *Instance) *FileSet { var s = FileSet{ localVersion: make(map[protocol.DeviceID]int64), folder: folder, db: db, blockmap: NewBlockMap(db, db.folderIdx.ID([]byte(folder))), mutex: sync.NewMutex(), } s.db.checkGlobals([]byte(folder), &s.globalSize) var deviceID protocol.DeviceID s.db.withAllFolderTruncated([]byte(folder), func(device []byte, f FileInfoTruncated) bool { copy(deviceID[:], device) if f.LocalVersion > s.localVersion[deviceID] { s.localVersion[deviceID] = f.LocalVersion } if deviceID == protocol.LocalDeviceID { s.localSize.addFile(f) } return true }) l.Debugf("loaded localVersion for %q: %#v", folder, s.localVersion) clock(s.localVersion[protocol.LocalDeviceID]) return &s }
func newFolderSummaryService(cfg *config.Wrapper, m *model.Model) *folderSummaryService { service := &folderSummaryService{ Supervisor: suture.NewSimple("folderSummaryService"), cfg: cfg, model: m, stop: make(chan struct{}), immediate: make(chan string), folders: make(map[string]struct{}), foldersMut: sync.NewMutex(), lastEventReqMut: sync.NewMutex(), } service.Add(serviceFunc(service.listenForUpdates)) service.Add(serviceFunc(service.calculateSummaries)) return service }
func setUpRwFolder(model *Model) rwFolder { return rwFolder{ folder: "default", dir: "testdata", model: model, errors: make(map[string]string), errorsMut: sync.NewMutex(), } }
// Wrap wraps an existing Configuration structure and ties it to a file on // disk. func Wrap(path string, cfg Configuration) *Wrapper { w := &Wrapper{ cfg: cfg, path: path, mut: sync.NewMutex(), } w.replaces = make(chan Configuration) return w }
// NewProcess returns a new Process talking to Syncthing at the specified address. // Example: NewProcess("127.0.0.1:8082") func NewProcess(addr string) *Process { p := &Process{ addr: addr, localVersion: make(map[string]map[string]int64), done: make(map[string]bool), eventMut: sync.NewMutex(), } p.startCompleteCond = stdsync.NewCond(p.eventMut) return p }
func NewBufferedSubscription(s *Subscription, size int) *BufferedSubscription { bs := &BufferedSubscription{ sub: s, buf: make([]Event, size), mut: sync.NewMutex(), } bs.cond = stdsync.NewCond(bs.mut) go bs.pollingLoop() return bs }
func newSmallIndex(db *Instance, prefix []byte) *smallIndex { idx := &smallIndex{ db: db, prefix: prefix, id2val: make(map[uint32]string), val2id: make(map[string]uint32), mut: sync.NewMutex(), } idx.load() return idx }
func New(withCache bool) *Matcher { m := &Matcher{ withCache: withCache, stop: make(chan struct{}), mut: sync.NewMutex(), } if withCache { go m.clean(2 * time.Hour) } return m }
func newAutoclosedFile(name string, closeDelay, maxOpenTime time.Duration) *autoclosedFile { f := &autoclosedFile{ name: name, closeDelay: closeDelay, maxOpenTime: maxOpenTime, mut: sync.NewMutex(), closed: make(chan struct{}), closeTimer: time.NewTimer(time.Minute), } go f.closerLoop() return f }
// NewProgressEmitter creates a new progress emitter which emits // DownloadProgress events every interval. func NewProgressEmitter(cfg *config.Wrapper) *ProgressEmitter { t := &ProgressEmitter{ stop: make(chan struct{}), registry: make(map[string]*sharedPullerState), last: make(map[string]map[string]*pullerProgress), timer: time.NewTimer(time.Millisecond), mut: sync.NewMutex(), } t.CommitConfiguration(config.Configuration{}, cfg.Raw()) cfg.Subscribe(t) return t }
func newROFolder(model *Model, folder string, interval time.Duration) *roFolder { return &roFolder{ stateTracker: stateTracker{ folder: folder, mut: sync.NewMutex(), }, folder: folder, intv: interval, timer: time.NewTimer(time.Millisecond), model: model, stop: make(chan struct{}), scanNow: make(chan rescanRequest), delayScan: make(chan time.Duration), } }
func TestProgressEmitter(t *testing.T) { w := events.Default.Subscribe(events.DownloadProgress) c := config.Wrap("/tmp/test", config.Configuration{}) c.SetOptions(config.OptionsConfiguration{ ProgressUpdateIntervalS: 0, }) p := NewProgressEmitter(c) go p.Serve() expectTimeout(w, t) s := sharedPullerState{ mut: sync.NewMutex(), } p.Register(&s) expectEvent(w, t, 1) expectTimeout(w, t) s.copyDone() expectEvent(w, t, 1) expectTimeout(w, t) s.copiedFromOrigin() expectEvent(w, t, 1) expectTimeout(w, t) s.pullStarted() expectEvent(w, t, 1) expectTimeout(w, t) s.pullDone() expectEvent(w, t, 1) expectTimeout(w, t) p.Deregister(&s) expectEvent(w, t, 0) expectTimeout(w, t) }
func TestSourceFileBad(t *testing.T) { s := sharedPullerState{ realName: "nonexistent", mut: sync.NewMutex(), } fd, err := s.sourceFile() if err == nil { t.Fatal("Unexpected nil error") } if fd != nil { t.Fatal("Unexpected non-nil fd") } if err := s.failed(); err == nil { t.Fatal("Unexpected nil failed()") } }
func NewStaggered(folderID, folderPath string, params map[string]string) Versioner { maxAge, err := strconv.ParseInt(params["maxAge"], 10, 0) if err != nil { maxAge = 31536000 // Default: ~1 year } cleanInterval, err := strconv.ParseInt(params["cleanInterval"], 10, 0) if err != nil { cleanInterval = 3600 // Default: clean once per hour } // Use custom path if set, otherwise .stversions in folderPath var versionsDir string if params["versionsPath"] == "" { l.Debugln("using default dir .stversions") versionsDir = filepath.Join(folderPath, ".stversions") } else { l.Debugln("using dir", params["versionsPath"]) versionsDir = params["versionsPath"] } s := Staggered{ versionsPath: versionsDir, cleanInterval: cleanInterval, folderPath: folderPath, interval: [4]Interval{ {30, 3600}, // first hour -> 30 sec between versions {3600, 86400}, // next day -> 1 h between versions {86400, 592000}, // next 30 days -> 1 day between versions {604800, maxAge}, // next year -> 1 week between versions }, mutex: sync.NewMutex(), } l.Debugf("instantiated %#v", s) go func() { s.clean() for _ = range time.Tick(time.Duration(cleanInterval) * time.Second) { s.clean() } }() return s }
// Test creating temporary file inside read-only directory func TestReadOnlyDir(t *testing.T) { // Create a read only directory, clean it up afterwards. os.Mkdir("testdata/read_only_dir", 0555) defer func() { os.Chmod("testdata/read_only_dir", 0755) os.RemoveAll("testdata/read_only_dir") }() s := sharedPullerState{ tempName: "testdata/read_only_dir/.temp_name", mut: sync.NewMutex(), } fd, err := s.tempFile() if err != nil { t.Fatal(err) } if fd == nil { t.Fatal("Unexpected nil fd") } s.fail("Test done", nil) s.finalClose() }
package db import ( "bytes" "fmt" "github.com/hernad/syncthing/lib/protocol" "github.com/hernad/syncthing/lib/sync" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/opt" ) var ( clockTick int64 clockMut = sync.NewMutex() ) func clock(v int64) int64 { clockMut.Lock() defer clockMut.Unlock() if v > clockTick { clockTick = v + 1 } else { clockTick++ } return clockTick } const ( KeyTypeDevice = iota
// handleFile queues the copies and pulls as necessary for a single new or // changed file. func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) { curFile, hasCurFile := p.model.CurrentFolderFile(p.folder, file.Name) if hasCurFile && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) { // We are supposed to copy the entire file, and then fetch nothing. We // are only updating metadata, so we don't actually *need* to make the // copy. l.Debugln(p, "taking shortcut on", file.Name) events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "file", "action": "metadata", }) p.queue.Done(file.Name) var err error if file.IsSymlink() { err = p.shortcutSymlink(file) } else { err = p.shortcutFile(file) } events.Default.Log(events.ItemFinished, map[string]interface{}{ "folder": p.folder, "item": file.Name, "error": events.Error(err), "type": "file", "action": "metadata", }) if err != nil { l.Infoln("Puller: shortcut:", err) p.newError(file.Name, err) } else { p.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile} } return } // Figure out the absolute filenames we need once and for all tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name)) realName := filepath.Join(p.dir, file.Name) if hasCurFile && !curFile.IsDirectory() && !curFile.IsSymlink() { // Check that the file on disk is what we expect it to be according to // the database. If there's a mismatch here, there might be local // changes that we don't know about yet and we should scan before // touching the file. If we can't stat the file we'll just pull it. if info, err := osutil.Lstat(realName); err == nil { mtime := p.virtualMtimeRepo.GetMtime(file.Name, info.ModTime()) if mtime.Unix() != curFile.Modified || info.Size() != curFile.Size() { l.Debugln("file modified but not rescanned; not pulling:", realName) // Scan() is synchronous (i.e. blocks until the scan is // completed and returns an error), but a scan can't happen // while we're in the puller routine. Request the scan in the // background and it'll be handled when the current pulling // sweep is complete. As we do retries, we'll queue the scan // for this file up to ten times, but the last nine of those // scans will be cheap... go p.Scan([]string{file.Name}) return } } } scanner.PopulateOffsets(file.Blocks) reused := 0 var blocks []protocol.BlockInfo var blocksSize int64 // Check for an old temporary file which might have some blocks we could // reuse. tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize, 0, nil) if err == nil { // Check for any reusable blocks in the temp file tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks) // block.String() returns a string unique to the block existingBlocks := make(map[string]struct{}, len(tempCopyBlocks)) for _, block := range tempCopyBlocks { existingBlocks[block.String()] = struct{}{} } // Since the blocks are already there, we don't need to get them. for _, block := range file.Blocks { _, ok := existingBlocks[block.String()] if !ok { blocks = append(blocks, block) blocksSize += int64(block.Size) } } // The sharedpullerstate will know which flags to use when opening the // temp file depending if we are reusing any blocks or not. reused = len(file.Blocks) - len(blocks) if reused == 0 { // Otherwise, discard the file ourselves in order for the // sharedpuller not to panic when it fails to exclusively create a // file which already exists osutil.InWritableDir(osutil.Remove, tempName) } } else { blocks = file.Blocks blocksSize = file.Size() } if p.checkFreeSpace { if free, err := osutil.DiskFreeBytes(p.dir); err == nil && free < blocksSize { l.Warnf(`Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB`, p.folder, p.dir, file.Name, float64(free)/1024/1024, float64(blocksSize)/1024/1024) p.newError(file.Name, errors.New("insufficient space")) return } } events.Default.Log(events.ItemStarted, map[string]string{ "folder": p.folder, "item": file.Name, "type": "file", "action": "update", }) s := sharedPullerState{ file: file, folder: p.folder, tempName: tempName, realName: realName, copyTotal: len(blocks), copyNeeded: len(blocks), reused: reused, ignorePerms: p.ignorePermissions(file), version: curFile.Version, mut: sync.NewMutex(), sparse: p.allowSparse, } l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused) cs := copyBlocksState{ sharedPullerState: &s, blocks: blocks, } copyChan <- cs }
"net/http" "os" "strings" "github.com/hernad/syncthing/lib/config" "github.com/hernad/syncthing/lib/osutil" "github.com/hernad/syncthing/lib/sync" ) // csrfTokens is a list of valid tokens. It is sorted so that the most // recently used token is first in the list. New tokens are added to the front // of the list (as it is the most recently used at that time). The list is // pruned to a maximum of maxCsrfTokens, throwing away the least recently used // tokens. var csrfTokens []string var csrfMut = sync.NewMutex() const maxCsrfTokens = 25 // Check for CSRF token on /rest/ URLs. If a correct one is not given, reject // the request with 403. For / and /index.html, set a new CSRF cookie if none // is currently set. func csrfMiddleware(unique string, prefix string, cfg config.GUIConfiguration, next http.Handler) http.Handler { loadCsrfTokens() return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Allow requests carrying a valid API key if cfg.IsValidAPIKey(r.Header.Get("X-API-Key")) { next.ServeHTTP(w, r) return }
func newDeviceActivity() *deviceActivity { return &deviceActivity{ act: make(map[protocol.DeviceID]int), mut: sync.NewMutex(), } }
func NewLogger() *Logger { return &Logger{ mutex: sync.NewMutex(), } }
func TestDeregisterOnFailInPull(t *testing.T) { file := setUpFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8}) defer os.Remove("testdata/" + defTempNamer.TempName("filex")) db := db.OpenMemory() m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil) m.AddFolder(defaultFolderConfig) emitter := NewProgressEmitter(defaultConfig) go emitter.Serve() p := rwFolder{ folder: "default", dir: "testdata", model: m, queue: newJobQueue(), progressEmitter: emitter, errors: make(map[string]string), errorsMut: sync.NewMutex(), } // queue.Done should be called by the finisher routine p.queue.Push("filex", 0, 0) p.queue.Pop() if p.queue.lenProgress() != 1 { t.Fatal("Expected file in progress") } copyChan := make(chan copyBlocksState) pullChan := make(chan pullBlockState) finisherBufferChan := make(chan *sharedPullerState) finisherChan := make(chan *sharedPullerState) go p.copierRoutine(copyChan, pullChan, finisherBufferChan) go p.pullerRoutine(pullChan, finisherBufferChan) go p.finisherRoutine(finisherChan) p.handleFile(file, copyChan, finisherChan) // Receive at finisher, we should error out as puller has nowhere to pull // from. select { case state := <-finisherBufferChan: // At this point the file should still be registered with both the job // queue, and the progress emitter. Verify this. if p.progressEmitter.lenRegistry() != 1 || p.queue.lenProgress() != 1 || p.queue.lenQueued() != 0 { t.Fatal("Could not find file") } // Pass the file down the real finisher, and give it time to consume finisherChan <- state time.Sleep(100 * time.Millisecond) state.mut.Lock() stateFd := state.fd state.mut.Unlock() if stateFd != nil { t.Fatal("File not closed?") } if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 { t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued()) } // Doing it again should have no effect finisherChan <- state time.Sleep(100 * time.Millisecond) if p.progressEmitter.lenRegistry() != 0 || p.queue.lenProgress() != 0 || p.queue.lenQueued() != 0 { t.Fatal("Still registered", p.progressEmitter.lenRegistry(), p.queue.lenProgress(), p.queue.lenQueued()) } case <-time.After(time.Second): t.Fatal("Didn't get anything to the finisher") } }
"bytes" "encoding/base64" "math/rand" "net/http" "strings" "time" "github.com/hernad/syncthing/lib/config" "github.com/hernad/syncthing/lib/events" "github.com/hernad/syncthing/lib/sync" "golang.org/x/crypto/bcrypt" ) var ( sessions = make(map[string]bool) sessionsMut = sync.NewMutex() ) func emitLoginAttempt(success bool, username string) { events.Default.Log(events.LoginAttempt, map[string]interface{}{ "success": success, "username": username, }) } func basicAuthAndSessionMiddleware(cookieName string, cfg config.GUIConfiguration, next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if cfg.IsValidAPIKey(r.Header.Get("X-API-Key")) { next.ServeHTTP(w, r) return }
func newJobQueue() *jobQueue { return &jobQueue{ mut: sync.NewMutex(), } }
"fmt" "io" "os" "path/filepath" "runtime" "strings" "github.com/calmh/du" "github.com/hernad/syncthing/lib/sync" ) var ErrNoHome = errors.New("No home directory found - set $HOME (or the platform equivalent).") // Try to keep this entire operation atomic-like. We shouldn't be doing this // often enough that there is any contention on this lock. var renameLock = sync.NewMutex() // TryRename renames a file, leaving source file intact in case of failure. // Tries hard to succeed on various systems by temporarily tweaking directory // permissions and removing the destination file when necessary. func TryRename(from, to string) error { renameLock.Lock() defer renameLock.Unlock() return withPreparedTarget(from, to, func() error { return os.Rename(from, to) }) } // Rename moves a temporary file to it's final place. // Will make sure to delete the from file if the operation fails, so use only
"os" "os/exec" "os/signal" "runtime" "strings" "syscall" "time" "github.com/hernad/syncthing/lib/osutil" "github.com/hernad/syncthing/lib/sync" ) var ( stdoutFirstLines []string // The first 10 lines of stdout stdoutLastLines []string // The last 50 lines of stdout stdoutMut = sync.NewMutex() ) const ( countRestarts = 4 loopThreshold = 60 * time.Second logFileAutoCloseDelay = 5 * time.Second logFileMaxOpenTime = time.Minute ) func monitorMain(runtimeOptions RuntimeOptions) { os.Setenv("STNORESTART", "yes") os.Setenv("STMONITORED", "yes") l.SetPrefix("[monitor] ") var dst io.Writer = os.Stdout