func (d *Discoverer) StartGlobal(servers []string, extPort uint16) { d.mut.Lock() defer d.mut.Unlock() if len(d.clients) > 0 { d.stopGlobal() } d.extPort = extPort pkt := d.announcementPkt() wg := sync.NewWaitGroup() clients := make(chan Client, len(servers)) for _, address := range servers { wg.Add(1) go func(addr string) { defer wg.Done() client, err := New(addr, pkt) if err != nil { l.Infoln("Error creating discovery client", addr, err) return } clients <- client }(address) } wg.Wait() close(clients) for client := range clients { d.clients = append(d.clients, client) } }
// Discover discovers UPnP InternetGatewayDevices. // The order in which the devices appear in the results list is not deterministic. func Discover(timeout time.Duration) []IGD { var results []IGD interfaces, err := net.Interfaces() if err != nil { l.Infoln("Listing network interfaces:", err) return results } resultChan := make(chan IGD) wg := sync.NewWaitGroup() for _, intf := range interfaces { // Interface flags seem to always be 0 on Windows if runtime.GOOS != "windows" && (intf.Flags&net.FlagUp == 0 || intf.Flags&net.FlagMulticast == 0) { continue } for _, deviceType := range []string{"urn:schemas-upnp-org:device:InternetGatewayDevice:1", "urn:schemas-upnp-org:device:InternetGatewayDevice:2"} { wg.Add(1) go func(intf net.Interface, deviceType string) { discover(&intf, deviceType, timeout, resultChan) wg.Done() }(intf, deviceType) } } go func() { wg.Wait() close(resultChan) }() nextResult: for result := range resultChan { for _, existingResult := range results { if existingResult.uuid == result.uuid { if debug { l.Debugf("Skipping duplicate result %s with services:", result.uuid) for _, svc := range result.services { l.Debugf("* [%s] %s", svc.serviceID, svc.serviceURL) } } continue nextResult } } results = append(results, result) if debug { l.Debugf("UPnP discovery result %s with services:", result.uuid) for _, svc := range result.services { l.Debugf("* [%s] %s", svc.serviceID, svc.serviceURL) } } } return results }
func TestConcurrentSetClear(t *testing.T) { if testing.Short() { return } dur := 30 * time.Second t0 := time.Now() wg := sync.NewWaitGroup() os.RemoveAll("testdata/concurrent-set-clear.db") db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{OpenFilesCacheCapacity: 10}) if err != nil { t.Fatal(err) } defer os.RemoveAll("testdata/concurrent-set-clear.db") errChan := make(chan error, 3) wg.Add(1) go func() { defer wg.Done() for time.Since(t0) < dur { if err := setItems(db); err != nil { errChan <- err return } if err := clearItems(db); err != nil { errChan <- err return } } }() wg.Add(1) go func() { defer wg.Done() for time.Since(t0) < dur { if err := scanItems(db); err != nil { errChan <- err return } } }() go func() { wg.Wait() errChan <- nil }() err = <-errChan if err != nil { t.Error(err) } db.Close() }
func init() { for _, proto := range []string{"udp", "udp4", "udp6"} { Register(proto, func(uri *url.URL, pkt *Announce) (Client, error) { c := &UDPClient{ wg: sync.NewWaitGroup(), mut: sync.NewRWMutex(), } err := c.Start(uri, pkt) if err != nil { return nil, err } return c, nil }) } }
func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo) { wg := sync.NewWaitGroup() wg.Add(workers) for i := 0; i < workers; i++ { go func() { hashFiles(dir, blockSize, outbox, inbox) wg.Done() }() } go func() { wg.Wait() close(outbox) }() }
func (m *Model) ScanFolders() map[string]error { m.fmut.RLock() folders := make([]string, 0, len(m.folderCfgs)) for folder := range m.folderCfgs { folders = append(folders, folder) } m.fmut.RUnlock() errors := make(map[string]error, len(m.folderCfgs)) errorsMut := sync.NewMutex() wg := sync.NewWaitGroup() wg.Add(len(folders)) for _, folder := range folders { folder := folder go func() { err := m.ScanFolder(folder) if err != nil { errorsMut.Lock() errors[folder] = err errorsMut.Unlock() // Potentially sets the error twice, once in the scanner just // by doing a check, and once here, if the error returned is // the same one as returned by CheckFolderHealth, though // duplicate set is handled by setError. m.fmut.RLock() srv := m.folderRunners[folder] m.fmut.RUnlock() srv.setError(err) } wg.Done() }() } wg.Wait() return errors }
func monitorMain() { os.Setenv("STNORESTART", "yes") os.Setenv("STMONITORED", "yes") l.SetPrefix("[monitor] ") var err error var dst io.Writer = os.Stdout if logFile != "" { var fileDst io.Writer fileDst, err = os.Create(logFile) if err != nil { l.Fatalln("log file:", err) } if runtime.GOOS == "windows" { // Translate line breaks to Windows standard fileDst = osutil.ReplacingWriter{ Writer: fileDst, From: '\n', To: []byte{'\r', '\n'}, } } // Log to both stdout and file. dst = io.MultiWriter(dst, fileDst) l.Infof(`Log output saved to file "%s"`, logFile) } args := os.Args var restarts [countRestarts]time.Time sign := make(chan os.Signal, 1) sigTerm := syscall.Signal(0xf) signal.Notify(sign, os.Interrupt, sigTerm, os.Kill) for { if t := time.Since(restarts[0]); t < loopThreshold { l.Warnf("%d restarts in %v; not retrying further", countRestarts, t) os.Exit(exitError) } copy(restarts[0:], restarts[1:]) restarts[len(restarts)-1] = time.Now() cmd := exec.Command(args[0], args[1:]...) stderr, err := cmd.StderrPipe() if err != nil { l.Fatalln("stderr:", err) } stdout, err := cmd.StdoutPipe() if err != nil { l.Fatalln("stdout:", err) } l.Infoln("Starting syncthing") err = cmd.Start() if err != nil { l.Fatalln(err) } // Let the next child process know that this is not the first time // it's starting up. os.Setenv("STRESTART", "yes") stdoutMut.Lock() stdoutFirstLines = make([]string, 0, 10) stdoutLastLines = make([]string, 0, 50) stdoutMut.Unlock() wg := sync.NewWaitGroup() wg.Add(1) go func() { copyStderr(stderr, dst) wg.Done() }() wg.Add(1) go func() { copyStdout(stdout, dst) wg.Done() }() exit := make(chan error) go func() { wg.Wait() exit <- cmd.Wait() }() select { case s := <-sign: l.Infof("Signal %d received; exiting", s) cmd.Process.Kill() <-exit return case err = <-exit: if err == nil { // Successful exit indicates an intentional shutdown return } else if exiterr, ok := err.(*exec.ExitError); ok { if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { switch status.ExitStatus() { case exitUpgrading: // Restart the monitor process to release the .old // binary as part of the upgrade process. l.Infoln("Restarting monitor...") os.Setenv("STNORESTART", "") err := exec.Command(args[0], args[1:]...).Start() if err != nil { l.Warnln("restart:", err) } return } } } } l.Infoln("Syncthing exited:", err) time.Sleep(1 * time.Second) } }
func (d *Discoverer) Lookup(device protocol.DeviceID) []string { d.registryLock.RLock() cached := d.filterCached(d.registry[device]) lastLookup := d.lastLookup[device] d.registryLock.RUnlock() d.mut.RLock() defer d.mut.RUnlock() if len(cached) > 0 { // There are cached address entries. addrs := make([]string, len(cached)) for i := range cached { addrs[i] = cached[i].Address } return addrs } if time.Since(lastLookup) < d.negCacheCutoff { // We have recently tried to lookup this address and failed. Lets // chill for a while. return nil } if len(d.clients) != 0 && time.Since(d.localBcastStart) > d.localBcastIntv { // Only perform external lookups if we have at least one external // server client and one local announcement interval has passed. This is // to avoid finding local peers on their remote address at startup. results := make(chan []string, len(d.clients)) wg := sync.NewWaitGroup() for _, client := range d.clients { wg.Add(1) go func(c Client) { defer wg.Done() results <- c.Lookup(device) }(client) } wg.Wait() close(results) cached := []CacheEntry{} seen := make(map[string]struct{}) now := time.Now() var addrs []string for result := range results { for _, addr := range result { _, ok := seen[addr] if !ok { cached = append(cached, CacheEntry{ Address: addr, Seen: now, }) seen[addr] = struct{}{} addrs = append(addrs, addr) } } } d.registryLock.Lock() d.registry[device] = cached d.lastLookup[device] = time.Now() d.registryLock.Unlock() return addrs } return nil }
// pullerIteration runs a single puller iteration for the given folder and // returns the number items that should have been synced (even those that // might have failed). One puller iteration handles all files currently // flagged as needed in the folder. func (p *rwFolder) pullerIteration(ignores *ignore.Matcher) int { pullChan := make(chan pullBlockState) copyChan := make(chan copyBlocksState) finisherChan := make(chan *sharedPullerState) updateWg := sync.NewWaitGroup() copyWg := sync.NewWaitGroup() pullWg := sync.NewWaitGroup() doneWg := sync.NewWaitGroup() if debug { l.Debugln(p, "c", p.copiers, "p", p.pullers) } p.dbUpdates = make(chan dbUpdateJob) updateWg.Add(1) go func() { // dbUpdaterRoutine finishes when p.dbUpdates is closed p.dbUpdaterRoutine() updateWg.Done() }() for i := 0; i < p.copiers; i++ { copyWg.Add(1) go func() { // copierRoutine finishes when copyChan is closed p.copierRoutine(copyChan, pullChan, finisherChan) copyWg.Done() }() } for i := 0; i < p.pullers; i++ { pullWg.Add(1) go func() { // pullerRoutine finishes when pullChan is closed p.pullerRoutine(pullChan, finisherChan) pullWg.Done() }() } doneWg.Add(1) // finisherRoutine finishes when finisherChan is closed go func() { p.finisherRoutine(finisherChan) doneWg.Done() }() p.model.fmut.RLock() folderFiles := p.model.folderFiles[p.folder] p.model.fmut.RUnlock() // !!! // WithNeed takes a database snapshot (by necessity). By the time we've // handled a bunch of files it might have become out of date and we might // be attempting to sync with an old version of a file... // !!! changed := 0 fileDeletions := map[string]protocol.FileInfo{} dirDeletions := []protocol.FileInfo{} buckets := map[string][]protocol.FileInfo{} folderFiles.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool { // Needed items are delivered sorted lexicographically. We'll handle // directories as they come along, so parents before children. Files // are queued and the order may be changed later. file := intf.(protocol.FileInfo) if ignores.Match(file.Name) { // This is an ignored file. Skip it, continue iteration. return true } if debug { l.Debugln(p, "handling", file.Name) } switch { case file.IsDeleted(): // A deleted file, directory or symlink if file.IsDirectory() { dirDeletions = append(dirDeletions, file) } else { fileDeletions[file.Name] = file df, ok := p.model.CurrentFolderFile(p.folder, file.Name) // Local file can be already deleted, but with a lower version // number, hence the deletion coming in again as part of // WithNeed, furthermore, the file can simply be of the wrong // type if we haven't yet managed to pull it. if ok && !df.IsDeleted() && !df.IsSymlink() && !df.IsDirectory() { // Put files into buckets per first hash key := string(df.Blocks[0].Hash) buckets[key] = append(buckets[key], df) } } case file.IsDirectory() && !file.IsSymlink(): // A new or changed directory if debug { l.Debugln("Creating directory", file.Name) } p.handleDir(file) default: // A new or changed file or symlink. This is the only case where we // do stuff concurrently in the background p.queue.Push(file.Name, file.Size(), file.Modified) } changed++ return true }) // Reorder the file queue according to configuration switch p.order { case config.OrderRandom: p.queue.Shuffle() case config.OrderAlphabetic: // The queue is already in alphabetic order. case config.OrderSmallestFirst: p.queue.SortSmallestFirst() case config.OrderLargestFirst: p.queue.SortLargestFirst() case config.OrderOldestFirst: p.queue.SortOldestFirst() case config.OrderNewestFirst: p.queue.SortOldestFirst() } // Process the file queue nextFile: for { fileName, ok := p.queue.Pop() if !ok { break } f, ok := p.model.CurrentGlobalFile(p.folder, fileName) if !ok { // File is no longer in the index. Mark it as done and drop it. p.queue.Done(fileName) continue } // Local file can be already deleted, but with a lower version // number, hence the deletion coming in again as part of // WithNeed, furthermore, the file can simply be of the wrong type if // the global index changed while we were processing this iteration. if !f.IsDeleted() && !f.IsSymlink() && !f.IsDirectory() { key := string(f.Blocks[0].Hash) for i, candidate := range buckets[key] { if scanner.BlocksEqual(candidate.Blocks, f.Blocks) { // Remove the candidate from the bucket lidx := len(buckets[key]) - 1 buckets[key][i] = buckets[key][lidx] buckets[key] = buckets[key][:lidx] // candidate is our current state of the file, where as the // desired state with the delete bit set is in the deletion // map. desired := fileDeletions[candidate.Name] // Remove the pending deletion (as we perform it by renaming) delete(fileDeletions, candidate.Name) p.renameFile(desired, f) p.queue.Done(fileName) continue nextFile } } } // Not a rename or a symlink, deal with it. p.handleFile(f, copyChan, finisherChan) } // Signal copy and puller routines that we are done with the in data for // this iteration. Wait for them to finish. close(copyChan) copyWg.Wait() close(pullChan) pullWg.Wait() // Signal the finisher chan that there will be no more input. close(finisherChan) // Wait for the finisherChan to finish. doneWg.Wait() for _, file := range fileDeletions { if debug { l.Debugln("Deleting file", file.Name) } p.deleteFile(file) } for i := range dirDeletions { dir := dirDeletions[len(dirDeletions)-i-1] if debug { l.Debugln("Deleting dir", dir.Name) } p.deleteDir(dir) } // Wait for db updates to complete close(p.dbUpdates) updateWg.Wait() return changed }
func TestUDP4Success(t *testing.T) { conn, err := net.ListenUDP("udp4", nil) if err != nil { t.Fatal(err) } port := conn.LocalAddr().(*net.UDPAddr).Port address := fmt.Sprintf("udp4://127.0.0.1:%d", port) pkt := &Announce{ Magic: AnnouncementMagic, This: Device{ device[:], []Address{{ IP: net.IPv4(123, 123, 123, 123), Port: 1234, }}, }, } client, err := New(address, pkt) if err != nil { t.Fatal(err) } udpclient := client.(*UDPClient) if udpclient.errorRetryInterval != DefaultErrorRetryInternval { t.Fatal("Incorrect retry interval") } if udpclient.listenAddress.IP != nil || udpclient.listenAddress.Port != 0 { t.Fatal("Wrong listen IP or port", udpclient.listenAddress) } if client.Address() != address { t.Fatal("Incorrect address") } buf := make([]byte, 2048) // First announcement conn.SetDeadline(time.Now().Add(time.Millisecond * 100)) _, err = conn.Read(buf) if err != nil { t.Fatal(err) } // Announcement verification conn.SetDeadline(time.Now().Add(time.Millisecond * 1100)) _, addr, err := conn.ReadFromUDP(buf) if err != nil { t.Fatal(err) } // Reply to it. _, err = conn.WriteToUDP(pkt.MustMarshalXDR(), addr) if err != nil { t.Fatal(err) } // We should get nothing else conn.SetDeadline(time.Now().Add(time.Millisecond * 100)) _, err = conn.Read(buf) if err == nil { t.Fatal("Expected error") } // Status should be ok if !client.StatusOK() { t.Fatal("Wrong status") } // Do a lookup in a separate routine addrs := []string{} wg := sync.NewWaitGroup() wg.Add(1) go func() { addrs = client.Lookup(device) wg.Done() }() // Receive the lookup and reply conn.SetDeadline(time.Now().Add(time.Millisecond * 100)) _, addr, err = conn.ReadFromUDP(buf) if err != nil { t.Fatal(err) } conn.WriteToUDP(pkt.MustMarshalXDR(), addr) // Wait for the lookup to arrive, verify that the number of answers is correct wg.Wait() if len(addrs) != 1 || addrs[0] != "123.123.123.123:1234" { t.Fatal("Wrong number of answers") } client.Stop() }
func TestUDP4Failure(t *testing.T) { conn, err := net.ListenUDP("udp4", nil) if err != nil { t.Fatal(err) } port := conn.LocalAddr().(*net.UDPAddr).Port address := fmt.Sprintf("udp4://127.0.0.1:%d/?listenaddress=127.0.0.1&retry=5", port) pkt := &Announce{ Magic: AnnouncementMagic, This: Device{ device[:], []Address{{ IP: net.IPv4(123, 123, 123, 123), Port: 1234, }}, }, } client, err := New(address, pkt) if err != nil { t.Fatal(err) } udpclient := client.(*UDPClient) if udpclient.errorRetryInterval != time.Second*5 { t.Fatal("Incorrect retry interval") } if !udpclient.listenAddress.IP.Equal(net.IPv4(127, 0, 0, 1)) || udpclient.listenAddress.Port != 0 { t.Fatal("Wrong listen IP or port", udpclient.listenAddress) } if client.Address() != address { t.Fatal("Incorrect address") } buf := make([]byte, 2048) // First announcement conn.SetDeadline(time.Now().Add(time.Millisecond * 100)) _, err = conn.Read(buf) if err != nil { t.Fatal(err) } // Announcement verification conn.SetDeadline(time.Now().Add(time.Millisecond * 1100)) _, _, err = conn.ReadFromUDP(buf) if err != nil { t.Fatal(err) } // Don't reply // We should get nothing else conn.SetDeadline(time.Now().Add(time.Millisecond * 100)) _, err = conn.Read(buf) if err == nil { t.Fatal("Expected error") } // Status should be failure if client.StatusOK() { t.Fatal("Wrong status") } // Do a lookup in a separate routine addrs := []string{} wg := sync.NewWaitGroup() wg.Add(1) go func() { addrs = client.Lookup(device) wg.Done() }() // Receive the lookup and don't reply conn.SetDeadline(time.Now().Add(time.Millisecond * 100)) _, _, err = conn.ReadFromUDP(buf) if err != nil { t.Fatal(err) } // Wait for the lookup to timeout, verify that the number of answers is none wg.Wait() if len(addrs) != 0 { t.Fatal("Wrong number of answers") } client.Stop() }