Ejemplo n.º 1
0
func discoverAll(renewal, timeout time.Duration) map[string]Device {
	wg := sync.NewWaitGroup()
	wg.Add(len(providers))

	c := make(chan Device)
	done := make(chan struct{})

	for _, discoverFunc := range providers {
		go func(f DiscoverFunc) {
			for _, dev := range f(renewal, timeout) {
				c <- dev
			}
			wg.Done()
		}(discoverFunc)
	}

	nats := make(map[string]Device)

	go func() {
		for dev := range c {
			nats[dev.ID()] = dev
		}
		close(done)
	}()

	wg.Wait()
	close(c)
	<-done

	return nats
}
Ejemplo n.º 2
0
func (d *Discoverer) StartGlobal(servers []string, extPort uint16) {
	d.mut.Lock()
	defer d.mut.Unlock()

	if len(d.clients) > 0 {
		d.stopGlobal()
	}

	d.extPort = extPort
	wg := sync.NewWaitGroup()
	clients := make(chan Client, len(servers))
	for _, address := range servers {
		wg.Add(1)
		go func(addr string) {
			defer wg.Done()
			client, err := New(addr, d)
			if err != nil {
				l.Infoln("Error creating discovery client", addr, err)
				return
			}
			clients <- client
		}(address)
	}

	wg.Wait()
	close(clients)

	for client := range clients {
		d.clients = append(d.clients, client)
	}
}
Ejemplo n.º 3
0
// Discover discovers UPnP InternetGatewayDevices.
// The order in which the devices appear in the results list is not deterministic.
func Discover(timeout time.Duration) []IGD {
	var results []IGD

	interfaces, err := net.Interfaces()
	if err != nil {
		l.Infoln("Listing network interfaces:", err)
		return results
	}

	resultChan := make(chan IGD)

	wg := sync.NewWaitGroup()

	for _, intf := range interfaces {
		// Interface flags seem to always be 0 on Windows
		if runtime.GOOS != "windows" && (intf.Flags&net.FlagUp == 0 || intf.Flags&net.FlagMulticast == 0) {
			continue
		}

		for _, deviceType := range []string{"urn:schemas-upnp-org:device:InternetGatewayDevice:1", "urn:schemas-upnp-org:device:InternetGatewayDevice:2"} {
			wg.Add(1)
			go func(intf net.Interface, deviceType string) {
				discover(&intf, deviceType, timeout, resultChan)
				wg.Done()
			}(intf, deviceType)
		}
	}

	go func() {
		wg.Wait()
		close(resultChan)
	}()

nextResult:
	for result := range resultChan {
		for _, existingResult := range results {
			if existingResult.uuid == result.uuid {
				if shouldDebug() {
					l.Debugf("Skipping duplicate result %s with services:", result.uuid)
					for _, svc := range result.services {
						l.Debugf("* [%s] %s", svc.serviceID, svc.serviceURL)
					}
				}
				continue nextResult
			}
		}

		results = append(results, result)
		if shouldDebug() {
			l.Debugf("UPnP discovery result %s with services:", result.uuid)
			for _, svc := range result.services {
				l.Debugf("* [%s] %s", svc.serviceID, svc.serviceURL)
			}
		}
	}

	return results
}
Ejemplo n.º 4
0
// Discover discovers UPnP InternetGatewayDevices.
// The order in which the devices appear in the results list is not deterministic.
func Discover(renewal, timeout time.Duration) []nat.Device {
	var results []nat.Device

	interfaces, err := net.Interfaces()
	if err != nil {
		l.Infoln("Listing network interfaces:", err)
		return results
	}

	resultChan := make(chan IGD)

	wg := sync.NewWaitGroup()

	for _, intf := range interfaces {
		// Interface flags seem to always be 0 on Windows
		if runtime.GOOS != "windows" && (intf.Flags&net.FlagUp == 0 || intf.Flags&net.FlagMulticast == 0) {
			continue
		}

		for _, deviceType := range []string{"urn:schemas-upnp-org:device:InternetGatewayDevice:1", "urn:schemas-upnp-org:device:InternetGatewayDevice:2"} {
			wg.Add(1)
			go func(intf net.Interface, deviceType string) {
				discover(&intf, deviceType, timeout, resultChan)
				wg.Done()
			}(intf, deviceType)
		}
	}

	go func() {
		wg.Wait()
		close(resultChan)
	}()

	seenResults := make(map[string]bool)
nextResult:
	for result := range resultChan {
		if seenResults[result.ID()] {
			l.Debugf("Skipping duplicate result %s with services:", result.uuid)
			for _, service := range result.services {
				l.Debugf("* [%s] %s", service.ID, service.URL)
			}
			continue nextResult
		}

		result := result // Reallocate as we need to keep a pointer
		results = append(results, &result)
		seenResults[result.ID()] = true

		l.Debugf("UPnP discovery result %s with services:", result.uuid)
		for _, service := range result.services {
			l.Debugf("* [%s] %s", service.ID, service.URL)
		}
	}

	return results
}
Ejemplo n.º 5
0
func TestConcurrentSetClear(t *testing.T) {
	if testing.Short() {
		return
	}

	dur := 30 * time.Second
	t0 := time.Now()
	wg := sync.NewWaitGroup()

	os.RemoveAll("testdata/concurrent-set-clear.db")
	db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{OpenFilesCacheCapacity: 10})
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll("testdata/concurrent-set-clear.db")

	errChan := make(chan error, 3)

	wg.Add(1)
	go func() {
		defer wg.Done()
		for time.Since(t0) < dur {
			if err := setItems(db); err != nil {
				errChan <- err
				return
			}
			if err := clearItems(db); err != nil {
				errChan <- err
				return
			}
		}
	}()

	wg.Add(1)
	go func() {
		defer wg.Done()
		for time.Since(t0) < dur {
			if err := scanItems(db); err != nil {
				errChan <- err
				return
			}
		}
	}()

	go func() {
		wg.Wait()
		errChan <- nil
	}()

	err = <-errChan
	if err != nil {
		t.Error(err)
	}
	db.Close()
}
Ejemplo n.º 6
0
func init() {
	for _, proto := range []string{"udp", "udp4", "udp6"} {
		Register(proto, func(uri *url.URL, pkt *Announce) (Client, error) {
			c := &UDPClient{
				wg:  sync.NewWaitGroup(),
				mut: sync.NewRWMutex(),
			}
			err := c.Start(uri, pkt)
			if err != nil {
				return nil, err
			}
			return c, nil
		})
	}
}
Ejemplo n.º 7
0
func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo) {
	wg := sync.NewWaitGroup()
	wg.Add(workers)

	for i := 0; i < workers; i++ {
		go func() {
			hashFiles(dir, blockSize, outbox, inbox)
			wg.Done()
		}()
	}

	go func() {
		wg.Wait()
		close(outbox)
	}()
}
Ejemplo n.º 8
0
func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan protocol.FileInfo, counter Counter, done, cancel chan struct{}) {
	wg := sync.NewWaitGroup()
	wg.Add(workers)

	for i := 0; i < workers; i++ {
		go func() {
			hashFiles(dir, blockSize, outbox, inbox, counter, cancel)
			wg.Done()
		}()
	}

	go func() {
		wg.Wait()
		if done != nil {
			close(done)
		}
		close(outbox)
	}()
}
Ejemplo n.º 9
0
// pullerIteration runs a single puller iteration for the given folder and
// returns the number items that should have been synced (even those that
// might have failed). One puller iteration handles all files currently
// flagged as needed in the folder.
func (f *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
	pullChan := make(chan pullBlockState)
	copyChan := make(chan copyBlocksState)
	finisherChan := make(chan *sharedPullerState)

	updateWg := sync.NewWaitGroup()
	copyWg := sync.NewWaitGroup()
	pullWg := sync.NewWaitGroup()
	doneWg := sync.NewWaitGroup()

	l.Debugln(f, "c", f.copiers, "p", f.pullers)

	f.dbUpdates = make(chan dbUpdateJob)
	updateWg.Add(1)
	go func() {
		// dbUpdaterRoutine finishes when f.dbUpdates is closed
		f.dbUpdaterRoutine()
		updateWg.Done()
	}()

	for i := 0; i < f.copiers; i++ {
		copyWg.Add(1)
		go func() {
			// copierRoutine finishes when copyChan is closed
			f.copierRoutine(copyChan, pullChan, finisherChan)
			copyWg.Done()
		}()
	}

	for i := 0; i < f.pullers; i++ {
		pullWg.Add(1)
		go func() {
			// pullerRoutine finishes when pullChan is closed
			f.pullerRoutine(pullChan, finisherChan)
			pullWg.Done()
		}()
	}

	doneWg.Add(1)
	// finisherRoutine finishes when finisherChan is closed
	go func() {
		f.finisherRoutine(finisherChan)
		doneWg.Done()
	}()

	f.model.fmut.RLock()
	folderFiles := f.model.folderFiles[f.folderID]
	f.model.fmut.RUnlock()

	// !!!
	// WithNeed takes a database snapshot (by necessity). By the time we've
	// handled a bunch of files it might have become out of date and we might
	// be attempting to sync with an old version of a file...
	// !!!

	changed := 0

	fileDeletions := map[string]protocol.FileInfo{}
	dirDeletions := []protocol.FileInfo{}
	buckets := map[string][]protocol.FileInfo{}

	handleItem := func(fi protocol.FileInfo) bool {
		switch {
		case fi.IsDeleted():
			// A deleted file, directory or symlink
			if fi.IsDirectory() {
				dirDeletions = append(dirDeletions, fi)
			} else {
				fileDeletions[fi.Name] = fi
				df, ok := f.model.CurrentFolderFile(f.folderID, fi.Name)
				// Local file can be already deleted, but with a lower version
				// number, hence the deletion coming in again as part of
				// WithNeed, furthermore, the file can simply be of the wrong
				// type if we haven't yet managed to pull it.
				if ok && !df.IsDeleted() && !df.IsSymlink() && !df.IsDirectory() {
					// Put files into buckets per first hash
					key := string(df.Blocks[0].Hash)
					buckets[key] = append(buckets[key], df)
				}
			}
			changed++

		case fi.IsDirectory() && !fi.IsSymlink():
			l.Debugln("Handling directory", fi.Name)
			f.handleDir(fi)
			changed++

		case fi.IsSymlink():
			l.Debugln("Handling symlink", fi.Name)
			f.handleSymlink(fi)
			changed++

		default:
			return false
		}
		return true
	}

	folderFiles.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
		// Needed items are delivered sorted lexicographically. We'll handle
		// directories as they come along, so parents before children. Files
		// are queued and the order may be changed later.

		if shouldIgnore(intf, ignores, f.ignoreDelete) {
			return true
		}

		if err := fileValid(intf); err != nil {
			// The file isn't valid so we can't process it. Pretend that we
			// tried and set the error for the file.
			f.newError(intf.FileName(), err)
			changed++
			return true
		}

		file := intf.(protocol.FileInfo)
		l.Debugln(f, "handling", file.Name)
		if !handleItem(file) {
			// A new or changed file or symlink. This is the only case where
			// we do stuff concurrently in the background. We only queue
			// files where we are connected to at least one device that has
			// the file.

			devices := folderFiles.Availability(file.Name)
			for _, dev := range devices {
				if f.model.ConnectedTo(dev) {
					f.queue.Push(file.Name, file.Size, file.ModTime())
					changed++
					break
				}
			}
		}

		return true
	})

	// Reorder the file queue according to configuration

	switch f.order {
	case config.OrderRandom:
		f.queue.Shuffle()
	case config.OrderAlphabetic:
	// The queue is already in alphabetic order.
	case config.OrderSmallestFirst:
		f.queue.SortSmallestFirst()
	case config.OrderLargestFirst:
		f.queue.SortLargestFirst()
	case config.OrderOldestFirst:
		f.queue.SortOldestFirst()
	case config.OrderNewestFirst:
		f.queue.SortNewestFirst()
	}

	// Process the file queue

nextFile:
	for {
		select {
		case <-f.stop:
			// Stop processing files if the puller has been told to stop.
			break
		default:
		}

		fileName, ok := f.queue.Pop()
		if !ok {
			break
		}

		fi, ok := f.model.CurrentGlobalFile(f.folderID, fileName)
		if !ok {
			// File is no longer in the index. Mark it as done and drop it.
			f.queue.Done(fileName)
			continue
		}

		// Handles races where an index update arrives changing what the file
		// is between queueing and retrieving it from the queue, effectively
		// changing how the file should be handled.
		if handleItem(fi) {
			continue
		}

		// Check our list of files to be removed for a match, in which case
		// we can just do a rename instead.
		key := string(fi.Blocks[0].Hash)
		for i, candidate := range buckets[key] {
			if scanner.BlocksEqual(candidate.Blocks, fi.Blocks) {
				// Remove the candidate from the bucket
				lidx := len(buckets[key]) - 1
				buckets[key][i] = buckets[key][lidx]
				buckets[key] = buckets[key][:lidx]

				// candidate is our current state of the file, where as the
				// desired state with the delete bit set is in the deletion
				// map.
				desired := fileDeletions[candidate.Name]
				// Remove the pending deletion (as we perform it by renaming)
				delete(fileDeletions, candidate.Name)

				f.renameFile(desired, fi)

				f.queue.Done(fileName)
				continue nextFile
			}
		}

		// Handle the file normally, by coping and pulling, etc.
		f.handleFile(fi, copyChan, finisherChan)
	}

	// Signal copy and puller routines that we are done with the in data for
	// this iteration. Wait for them to finish.
	close(copyChan)
	copyWg.Wait()
	close(pullChan)
	pullWg.Wait()

	// Signal the finisher chan that there will be no more input.
	close(finisherChan)

	// Wait for the finisherChan to finish.
	doneWg.Wait()

	for _, file := range fileDeletions {
		l.Debugln("Deleting file", file.Name)
		f.deleteFile(file)
	}

	for i := range dirDeletions {
		dir := dirDeletions[len(dirDeletions)-i-1]
		l.Debugln("Deleting dir", dir.Name)
		f.deleteDir(dir, ignores)
	}

	// Wait for db updates to complete
	close(f.dbUpdates)
	updateWg.Wait()

	return changed
}
Ejemplo n.º 10
0
// pullerIteration runs a single puller iteration for the given folder and
// returns the number items that should have been synced (even those that
// might have failed). One puller iteration handles all files currently
// flagged as needed in the folder.
func (p *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
	pullChan := make(chan pullBlockState)
	copyChan := make(chan copyBlocksState)
	finisherChan := make(chan *sharedPullerState)

	updateWg := sync.NewWaitGroup()
	copyWg := sync.NewWaitGroup()
	pullWg := sync.NewWaitGroup()
	doneWg := sync.NewWaitGroup()

	if debug {
		l.Debugln(p, "c", p.copiers, "p", p.pullers)
	}

	p.dbUpdates = make(chan dbUpdateJob)
	updateWg.Add(1)
	go func() {
		// dbUpdaterRoutine finishes when p.dbUpdates is closed
		p.dbUpdaterRoutine()
		updateWg.Done()
	}()

	for i := 0; i < p.copiers; i++ {
		copyWg.Add(1)
		go func() {
			// copierRoutine finishes when copyChan is closed
			p.copierRoutine(copyChan, pullChan, finisherChan)
			copyWg.Done()
		}()
	}

	for i := 0; i < p.pullers; i++ {
		pullWg.Add(1)
		go func() {
			// pullerRoutine finishes when pullChan is closed
			p.pullerRoutine(pullChan, finisherChan)
			pullWg.Done()
		}()
	}

	doneWg.Add(1)
	// finisherRoutine finishes when finisherChan is closed
	go func() {
		p.finisherRoutine(finisherChan)
		doneWg.Done()
	}()

	p.model.fmut.RLock()
	folderFiles := p.model.folderFiles[p.folder]
	p.model.fmut.RUnlock()

	// !!!
	// WithNeed takes a database snapshot (by necessity). By the time we've
	// handled a bunch of files it might have become out of date and we might
	// be attempting to sync with an old version of a file...
	// !!!

	changed := 0
	pullFileSize := int64(0)

	fileDeletions := map[string]protocol.FileInfo{}
	dirDeletions := []protocol.FileInfo{}
	buckets := map[string][]protocol.FileInfo{}

	folderFiles.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
		// Needed items are delivered sorted lexicographically. We'll handle
		// directories as they come along, so parents before children. Files
		// are queued and the order may be changed later.

		file := intf.(protocol.FileInfo)

		if ignores.Match(file.Name) {
			// This is an ignored file. Skip it, continue iteration.
			return true
		}

		if debug {
			l.Debugln(p, "handling", file.Name)
		}

		switch {
		case file.IsDeleted():
			// A deleted file, directory or symlink
			if file.IsDirectory() {
				dirDeletions = append(dirDeletions, file)
			} else {
				fileDeletions[file.Name] = file
				df, ok := p.model.CurrentFolderFile(p.folder, file.Name)
				// Local file can be already deleted, but with a lower version
				// number, hence the deletion coming in again as part of
				// WithNeed, furthermore, the file can simply be of the wrong
				// type if we haven't yet managed to pull it.
				if ok && !df.IsDeleted() && !df.IsSymlink() && !df.IsDirectory() {
					// Put files into buckets per first hash
					key := string(df.Blocks[0].Hash)
					buckets[key] = append(buckets[key], df)
				}
			}
		case file.IsDirectory() && !file.IsSymlink():
			// A new or changed directory
			if debug {
				l.Debugln("Creating directory", file.Name)
			}
			p.handleDir(file)
		default:
			// A new or changed file or symlink. This is the only case where we
			// do stuff concurrently in the background
			pullFileSize += file.Size()
			p.queue.Push(file.Name, file.Size(), file.Modified)
		}

		changed++
		return true
	})

	// Check if we are able to store all files on disk
	if pullFileSize > 0 {
		folder, ok := p.model.cfg.Folders()[p.folder]
		if ok {
			if free, err := osutil.DiskFreeBytes(folder.Path()); err == nil && free < pullFileSize {
				l.Infof("Puller (folder %q): insufficient disk space available to pull %d files (%.2fMB)", p.folder, changed, float64(pullFileSize)/1024/1024)
				return 0
			}
		}
	}

	// Reorder the file queue according to configuration

	switch p.order {
	case config.OrderRandom:
		p.queue.Shuffle()
	case config.OrderAlphabetic:
		// The queue is already in alphabetic order.
	case config.OrderSmallestFirst:
		p.queue.SortSmallestFirst()
	case config.OrderLargestFirst:
		p.queue.SortLargestFirst()
	case config.OrderOldestFirst:
		p.queue.SortOldestFirst()
	case config.OrderNewestFirst:
		p.queue.SortOldestFirst()
	}

	// Process the file queue

nextFile:
	for {
		fileName, ok := p.queue.Pop()
		if !ok {
			break
		}

		f, ok := p.model.CurrentGlobalFile(p.folder, fileName)
		if !ok {
			// File is no longer in the index. Mark it as done and drop it.
			p.queue.Done(fileName)
			continue
		}

		// Local file can be already deleted, but with a lower version
		// number, hence the deletion coming in again as part of
		// WithNeed, furthermore, the file can simply be of the wrong type if
		// the global index changed while we were processing this iteration.
		if !f.IsDeleted() && !f.IsSymlink() && !f.IsDirectory() {
			key := string(f.Blocks[0].Hash)
			for i, candidate := range buckets[key] {
				if scanner.BlocksEqual(candidate.Blocks, f.Blocks) {
					// Remove the candidate from the bucket
					lidx := len(buckets[key]) - 1
					buckets[key][i] = buckets[key][lidx]
					buckets[key] = buckets[key][:lidx]

					// candidate is our current state of the file, where as the
					// desired state with the delete bit set is in the deletion
					// map.
					desired := fileDeletions[candidate.Name]
					// Remove the pending deletion (as we perform it by renaming)
					delete(fileDeletions, candidate.Name)

					p.renameFile(desired, f)

					p.queue.Done(fileName)
					continue nextFile
				}
			}
		}

		// Not a rename or a symlink, deal with it.
		p.handleFile(f, copyChan, finisherChan)
	}

	// Signal copy and puller routines that we are done with the in data for
	// this iteration. Wait for them to finish.
	close(copyChan)
	copyWg.Wait()
	close(pullChan)
	pullWg.Wait()

	// Signal the finisher chan that there will be no more input.
	close(finisherChan)

	// Wait for the finisherChan to finish.
	doneWg.Wait()

	for _, file := range fileDeletions {
		if debug {
			l.Debugln("Deleting file", file.Name)
		}
		p.deleteFile(file)
	}

	for i := range dirDeletions {
		dir := dirDeletions[len(dirDeletions)-i-1]
		if debug {
			l.Debugln("Deleting dir", dir.Name)
		}
		p.deleteDir(dir)
	}

	// Wait for db updates to complete
	close(p.dbUpdates)
	updateWg.Wait()

	return changed
}
Ejemplo n.º 11
0
// Lookup returns a list of addresses the device is available at, as well as
// a list of relays the device is supposed to be available on sorted by the
// sum of latencies between this device, and the device in question.
func (d *Discoverer) Lookup(device protocol.DeviceID) ([]string, []string) {
	d.registryLock.RLock()
	cachedAddresses := d.filterCached(d.addressRegistry[device])
	cachedRelays := d.filterCached(d.relayRegistry[device])
	lastLookup := d.lastLookup[device]
	d.registryLock.RUnlock()

	d.mut.RLock()
	defer d.mut.RUnlock()

	relays := make([]string, len(cachedRelays))
	for i := range cachedRelays {
		relays[i] = cachedRelays[i].Address
	}

	if len(cachedAddresses) > 0 {
		// There are cached address entries.
		addrs := make([]string, len(cachedAddresses))
		for i := range cachedAddresses {
			addrs[i] = cachedAddresses[i].Address
		}
		return addrs, relays
	}

	if time.Since(lastLookup) < d.negCacheCutoff {
		// We have recently tried to lookup this address and failed. Lets
		// chill for a while.
		return nil, relays
	}

	if len(d.clients) != 0 && time.Since(d.localBcastStart) > d.localBcastIntv {
		// Only perform external lookups if we have at least one external
		// server client and one local announcement interval has passed. This is
		// to avoid finding local peers on their remote address at startup.
		results := make(chan Announce, len(d.clients))
		wg := sync.NewWaitGroup()
		for _, client := range d.clients {
			wg.Add(1)
			go func(c Client) {
				defer wg.Done()
				ann, err := c.Lookup(device)
				if err == nil {
					results <- ann
				}

			}(client)
		}

		wg.Wait()
		close(results)

		cachedAddresses := []CacheEntry{}
		availableRelays := []Relay{}
		seenAddresses := make(map[string]struct{})
		seenRelays := make(map[string]struct{})
		now := time.Now()

		var addrs []string
		for result := range results {
			for _, addr := range result.This.Addresses {
				_, ok := seenAddresses[addr]
				if !ok {
					cachedAddresses = append(cachedAddresses, CacheEntry{
						Address: addr,
						Seen:    now,
					})
					seenAddresses[addr] = struct{}{}
					addrs = append(addrs, addr)
				}
			}

			for _, relay := range result.This.Relays {
				_, ok := seenRelays[relay.Address]
				if !ok {
					availableRelays = append(availableRelays, relay)
					seenRelays[relay.Address] = struct{}{}
				}
			}
		}

		relays = addressesSortedByLatency(availableRelays)
		cachedRelays := make([]CacheEntry, len(relays))
		for i := range relays {
			cachedRelays[i] = CacheEntry{
				Address: relays[i],
				Seen:    now,
			}
		}

		d.registryLock.Lock()
		d.addressRegistry[device] = cachedAddresses
		d.relayRegistry[device] = cachedRelays
		d.lastLookup[device] = time.Now()
		d.registryLock.Unlock()

		return addrs, relays
	}

	return nil, relays
}
Ejemplo n.º 12
0
func monitorMain(runtimeOptions RuntimeOptions) {
	os.Setenv("STNORESTART", "yes")
	os.Setenv("STMONITORED", "yes")
	l.SetPrefix("[monitor] ")

	var dst io.Writer = os.Stdout

	logFile := runtimeOptions.logFile
	if logFile != "-" {
		var fileDst io.Writer = newAutoclosedFile(logFile, logFileAutoCloseDelay, logFileMaxOpenTime)

		if runtime.GOOS == "windows" {
			// Translate line breaks to Windows standard
			fileDst = osutil.ReplacingWriter{
				Writer: fileDst,
				From:   '\n',
				To:     []byte{'\r', '\n'},
			}
		}

		// Log to both stdout and file.
		dst = io.MultiWriter(dst, fileDst)

		l.Infof(`Log output saved to file "%s"`, logFile)
	}

	args := os.Args
	var restarts [countRestarts]time.Time

	stopSign := make(chan os.Signal, 1)
	sigTerm := syscall.Signal(15)
	signal.Notify(stopSign, os.Interrupt, sigTerm)
	restartSign := make(chan os.Signal, 1)
	sigHup := syscall.Signal(1)
	signal.Notify(restartSign, sigHup)

	for {
		if t := time.Since(restarts[0]); t < loopThreshold {
			l.Warnf("%d restarts in %v; not retrying further", countRestarts, t)
			os.Exit(exitError)
		}

		copy(restarts[0:], restarts[1:])
		restarts[len(restarts)-1] = time.Now()

		cmd := exec.Command(args[0], args[1:]...)

		stderr, err := cmd.StderrPipe()
		if err != nil {
			l.Fatalln("stderr:", err)
		}

		stdout, err := cmd.StdoutPipe()
		if err != nil {
			l.Fatalln("stdout:", err)
		}

		l.Infoln("Starting syncthing")
		err = cmd.Start()
		if err != nil {
			l.Fatalln(err)
		}

		// Let the next child process know that this is not the first time
		// it's starting up.
		os.Setenv("STRESTART", "yes")

		stdoutMut.Lock()
		stdoutFirstLines = make([]string, 0, 10)
		stdoutLastLines = make([]string, 0, 50)
		stdoutMut.Unlock()

		wg := sync.NewWaitGroup()

		wg.Add(1)
		go func() {
			copyStderr(stderr, dst)
			wg.Done()
		}()

		wg.Add(1)
		go func() {
			copyStdout(stdout, dst)
			wg.Done()
		}()

		exit := make(chan error)

		go func() {
			wg.Wait()
			exit <- cmd.Wait()
		}()

		select {
		case s := <-stopSign:
			l.Infof("Signal %d received; exiting", s)
			cmd.Process.Kill()
			<-exit
			return

		case s := <-restartSign:
			l.Infof("Signal %d received; restarting", s)
			cmd.Process.Signal(sigHup)
			err = <-exit

		case err = <-exit:
			if err == nil {
				// Successful exit indicates an intentional shutdown
				return
			} else if exiterr, ok := err.(*exec.ExitError); ok {
				if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
					switch status.ExitStatus() {
					case exitUpgrading:
						// Restart the monitor process to release the .old
						// binary as part of the upgrade process.
						l.Infoln("Restarting monitor...")
						os.Setenv("STNORESTART", "")
						if err = restartMonitor(args); err != nil {
							l.Warnln("Restart:", err)
						}
						return
					}
				}
			}
		}

		l.Infoln("Syncthing exited:", err)
		time.Sleep(1 * time.Second)
	}
}
Ejemplo n.º 13
0
func (d *Discoverer) Lookup(device protocol.DeviceID) []string {
	d.registryLock.RLock()
	cached := d.filterCached(d.registry[device])
	lastLookup := d.lastLookup[device]
	d.registryLock.RUnlock()

	d.mut.RLock()
	defer d.mut.RUnlock()

	if len(cached) > 0 {
		// There are cached address entries.
		addrs := make([]string, len(cached))
		for i := range cached {
			addrs[i] = cached[i].Address
		}
		return addrs
	}

	if time.Since(lastLookup) < d.negCacheCutoff {
		// We have recently tried to lookup this address and failed. Lets
		// chill for a while.
		return nil
	}

	if len(d.clients) != 0 && time.Since(d.localBcastStart) > d.localBcastIntv {
		// Only perform external lookups if we have at least one external
		// server client and one local announcement interval has passed. This is
		// to avoid finding local peers on their remote address at startup.
		results := make(chan []string, len(d.clients))
		wg := sync.NewWaitGroup()
		for _, client := range d.clients {
			wg.Add(1)
			go func(c Client) {
				defer wg.Done()
				results <- c.Lookup(device)
			}(client)
		}

		wg.Wait()
		close(results)

		cached := []CacheEntry{}
		seen := make(map[string]struct{})
		now := time.Now()

		var addrs []string
		for result := range results {
			for _, addr := range result {
				_, ok := seen[addr]
				if !ok {
					cached = append(cached, CacheEntry{
						Address: addr,
						Seen:    now,
					})
					seen[addr] = struct{}{}
					addrs = append(addrs, addr)
				}
			}
		}

		d.registryLock.Lock()
		d.registry[device] = cached
		d.lastLookup[device] = time.Now()
		d.registryLock.Unlock()

		return addrs
	}

	return nil
}
Ejemplo n.º 14
0
func TestUDP4Success(t *testing.T) {
	conn, err := net.ListenUDP("udp4", nil)
	if err != nil {
		t.Fatal(err)
	}

	port := conn.LocalAddr().(*net.UDPAddr).Port

	address := fmt.Sprintf("udp4://127.0.0.1:%d", port)
	pkt := Announce{
		Magic: AnnouncementMagic,
		This: Device{
			device[:],
			[]string{"tcp://123.123.123.123:1234"},
			nil,
		},
	}
	ann := &FakeAnnouncer{
		pkt: pkt,
	}

	client, err := New(address, ann)
	if err != nil {
		t.Fatal(err)
	}

	udpclient := client.(*UDPClient)
	if udpclient.errorRetryInterval != DefaultErrorRetryInternval {
		t.Fatal("Incorrect retry interval")
	}

	if udpclient.listenAddress.IP != nil || udpclient.listenAddress.Port != 0 {
		t.Fatal("Wrong listen IP or port", udpclient.listenAddress)
	}

	if client.Address() != address {
		t.Fatal("Incorrect address")
	}

	buf := make([]byte, 2048)

	// First announcement
	conn.SetDeadline(time.Now().Add(time.Millisecond * 100))
	_, err = conn.Read(buf)
	if err != nil {
		t.Fatal(err)
	}

	// Announcement verification
	conn.SetDeadline(time.Now().Add(time.Millisecond * 1100))
	_, addr, err := conn.ReadFromUDP(buf)
	if err != nil {
		t.Fatal(err)
	}

	// Reply to it.
	_, err = conn.WriteToUDP(pkt.MustMarshalXDR(), addr)
	if err != nil {
		t.Fatal(err)
	}

	// We should get nothing else
	conn.SetDeadline(time.Now().Add(time.Millisecond * 100))
	_, err = conn.Read(buf)
	if err == nil {
		t.Fatal("Expected error")
	}

	// Status should be ok
	if !client.StatusOK() {
		t.Fatal("Wrong status")
	}

	// Do a lookup in a separate routine
	addrs := []string{}
	wg := sync.NewWaitGroup()
	wg.Add(1)
	go func() {
		pkt, err := client.Lookup(device)
		if err == nil {
			for _, addr := range pkt.This.Addresses {
				addrs = append(addrs, addr)
			}
		}
		wg.Done()
	}()

	// Receive the lookup and reply
	conn.SetDeadline(time.Now().Add(time.Millisecond * 100))
	_, addr, err = conn.ReadFromUDP(buf)
	if err != nil {
		t.Fatal(err)
	}

	conn.WriteToUDP(pkt.MustMarshalXDR(), addr)

	// Wait for the lookup to arrive, verify that the number of answers is correct
	wg.Wait()

	if len(addrs) != 1 || addrs[0] != "tcp://123.123.123.123:1234" {
		t.Fatal("Wrong number of answers")
	}

	client.Stop()
}
Ejemplo n.º 15
0
func TestUDP4Failure(t *testing.T) {
	conn, err := net.ListenUDP("udp4", nil)
	if err != nil {
		t.Fatal(err)
	}

	port := conn.LocalAddr().(*net.UDPAddr).Port

	address := fmt.Sprintf("udp4://127.0.0.1:%d/?listenaddress=127.0.0.1&retry=5", port)

	pkt := Announce{
		Magic: AnnouncementMagic,
		This: Device{
			device[:],
			[]string{"tcp://123.123.123.123:1234"},
			nil,
		},
	}
	ann := &FakeAnnouncer{
		pkt: pkt,
	}

	client, err := New(address, ann)
	if err != nil {
		t.Fatal(err)
	}

	udpclient := client.(*UDPClient)
	if udpclient.errorRetryInterval != time.Second*5 {
		t.Fatal("Incorrect retry interval")
	}

	if !udpclient.listenAddress.IP.Equal(net.IPv4(127, 0, 0, 1)) || udpclient.listenAddress.Port != 0 {
		t.Fatal("Wrong listen IP or port", udpclient.listenAddress)
	}

	if client.Address() != address {
		t.Fatal("Incorrect address")
	}

	buf := make([]byte, 2048)

	// First announcement
	conn.SetDeadline(time.Now().Add(time.Millisecond * 100))
	_, err = conn.Read(buf)
	if err != nil {
		t.Fatal(err)
	}

	// Announcement verification
	conn.SetDeadline(time.Now().Add(time.Millisecond * 1100))
	_, _, err = conn.ReadFromUDP(buf)
	if err != nil {
		t.Fatal(err)
	}

	// Don't reply
	// We should get nothing else
	conn.SetDeadline(time.Now().Add(time.Millisecond * 100))
	_, err = conn.Read(buf)
	if err == nil {
		t.Fatal("Expected error")
	}

	// Status should be failure
	if client.StatusOK() {
		t.Fatal("Wrong status")
	}

	// Do a lookup in a separate routine
	addrs := []string{}
	wg := sync.NewWaitGroup()
	wg.Add(1)
	go func() {
		pkt, err := client.Lookup(device)
		if err == nil {
			for _, addr := range pkt.This.Addresses {
				addrs = append(addrs, addr)
			}
		}
		wg.Done()
	}()

	// Receive the lookup and don't reply
	conn.SetDeadline(time.Now().Add(time.Millisecond * 100))
	_, _, err = conn.ReadFromUDP(buf)
	if err != nil {
		t.Fatal(err)
	}

	// Wait for the lookup to timeout, verify that the number of answers is none
	wg.Wait()

	if len(addrs) != 0 {
		t.Fatal("Wrong number of answers")
	}

	client.Stop()
}