Ejemplo n.º 1
0
func NewModel(cfg *config.Wrapper, db *bolt.DB) *Model {
	m := &Model{
		cfg: cfg,
		db:  db,

		protoConn: make(map[protocol.DeviceID]stmodel.Connection),
		pmut:      sync.NewRWMutex(),

		blockCaches:   make(map[string]*fileblockcache.FileBlockCache),
		treeCaches:    make(map[string]*filetreecache.FileTreeCache),
		devicesByFile: make(map[string]map[string][]protocol.DeviceID),
		filesByDevice: make(map[string]map[protocol.DeviceID][]string),
		fmut:          sync.NewRWMutex(),
	}

	for _, folderCfg := range m.cfg.Folders() {
		folder := folderCfg.ID

		fbc, err := fileblockcache.NewFileBlockCache(m.cfg, db, folderCfg)
		if err != nil {
			l.Warnln("Skipping folder", folder, "because fileblockcache init failed:", err)
		}
		m.blockCaches[folder] = fbc
		m.treeCaches[folder] = filetreecache.NewFileTreeCache(m.cfg, db, folder)

		m.devicesByFile[folder] = make(map[string][]protocol.DeviceID)
		m.filesByDevice[folder] = make(map[protocol.DeviceID][]string)
	}

	return m
}
Ejemplo n.º 2
0
func NewDiscoverer(id protocol.DeviceID, addresses []string) *Discoverer {
	return &Discoverer{
		myID:           id,
		listenAddrs:    addresses,
		localBcastIntv: 30 * time.Second,
		cacheLifetime:  5 * time.Minute,
		negCacheCutoff: 3 * time.Minute,
		registry:       make(map[protocol.DeviceID][]CacheEntry),
		lastLookup:     make(map[protocol.DeviceID]time.Time),
		registryLock:   sync.NewRWMutex(),
		mut:            sync.NewRWMutex(),
	}
}
Ejemplo n.º 3
0
func NewModel(cfg *config.Wrapper, db *bolt.DB) *Model {
	var lmutex sync.Mutex
	m := &Model{
		cfg:         cfg,
		db:          db,
		pinnedFiles: make(map[string][]string),

		blockCaches:   make(map[string]*fileblockcache.FileBlockCache),
		treeCaches:    make(map[string]*filetreecache.FileTreeCache),
		folderDevices: make(map[string][]protocol.DeviceID),
		pulls:         make(map[string]map[string]*blockPullStatus),
		fmut:          stsync.NewRWMutex(),

		lmut: sync.NewCond(&lmutex),

		protoConn: make(map[protocol.DeviceID]connections.Connection),
		pmut:      stsync.NewRWMutex(),
	}

	for _, folderCfg := range m.cfg.Folders() {
		folder := folderCfg.ID

		fbc, err := fileblockcache.NewFileBlockCache(m.cfg, db, folderCfg)
		if err != nil {
			l.Warnln("Skipping folder", folder, "because fileblockcache init failed:", err)
			continue
		}
		m.blockCaches[folder] = fbc
		m.treeCaches[folder] = filetreecache.NewFileTreeCache(folderCfg, db, folder)

		m.folderDevices[folder] = make([]protocol.DeviceID, len(folderCfg.Devices))
		for i, device := range folderCfg.Devices {
			m.folderDevices[folder][i] = device.DeviceID
		}

		m.pulls[folder] = make(map[string]*blockPullStatus)

		m.pinnedFiles[folder] = make([]string, len(folderCfg.PinnedFiles))
		copy(m.pinnedFiles[folder], folderCfg.PinnedFiles)
		sort.Strings(m.pinnedFiles[folder])
		m.unpinUnnecessaryBlocks(folder)
	}

	m.removeUnconfiguredFolders()

	for i := 0; i < 4; i++ {
		go m.backgroundPinnerRoutine()
	}

	return m
}
Ejemplo n.º 4
0
func TestSourceFileOK(t *testing.T) {
	s := sharedPullerState{
		realName: "testdata/foo",
		mut:      sync.NewRWMutex(),
	}

	fd, err := s.sourceFile()
	if err != nil {
		t.Fatal(err)
	}
	if fd == nil {
		t.Fatal("Unexpected nil fd")
	}

	bs := make([]byte, 6)
	n, err := fd.Read(bs)
	if err != nil {
		t.Fatal(err)
	}

	if n != len(bs) {
		t.Fatalf("Wrong read length %d != %d", n, len(bs))
	}
	if string(bs) != "foobar" {
		t.Fatalf("Wrong contents %s != foobar", string(bs))
	}

	if err := s.failed(); err != nil {
		t.Fatal(err)
	}
}
Ejemplo n.º 5
0
func newStaticsServer(theme, assetDir string) *staticsServer {
	s := &staticsServer{
		assetDir: assetDir,
		assets:   auto.Assets(),
		mut:      sync.NewRWMutex(),
		theme:    theme,
	}

	seen := make(map[string]struct{})
	// Load themes from compiled in assets.
	for file := range auto.Assets() {
		theme := strings.Split(file, "/")[0]
		if _, ok := seen[theme]; !ok {
			seen[theme] = struct{}{}
			s.availableThemes = append(s.availableThemes, theme)
		}
	}
	if assetDir != "" {
		// Load any extra themes from the asset override dir.
		for _, dir := range dirNames(assetDir) {
			if _, ok := seen[dir]; !ok {
				seen[dir] = struct{}{}
				s.availableThemes = append(s.availableThemes, dir)
			}
		}
	}

	return s
}
Ejemplo n.º 6
0
func newStaticClient(uri *url.URL, certs []tls.Certificate, invitations chan protocol.SessionInvitation, timeout time.Duration) RelayClient {
	closeInvitationsOnFinish := false
	if invitations == nil {
		closeInvitationsOnFinish = true
		invitations = make(chan protocol.SessionInvitation)
	}

	return &staticClient{
		uri:         uri,
		invitations: invitations,

		closeInvitationsOnFinish: closeInvitationsOnFinish,

		config: configForCerts(certs),

		messageTimeout: time.Minute * 2,
		connectTimeout: timeout,

		stop:    make(chan struct{}),
		stopped: make(chan struct{}),

		mut:       sync.NewRWMutex(),
		connected: false,
	}
}
Ejemplo n.º 7
0
func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *tls.Config, discoverer discover.Finder,
	bepProtocolName string, tlsDefaultCommonName string, lans []*net.IPNet) *Service {

	service := &Service{
		Supervisor: suture.New("connections.Service", suture.Spec{
			Log: func(line string) {
				l.Infoln(line)
			},
		}),
		cfg:                  cfg,
		myID:                 myID,
		model:                mdl,
		tlsCfg:               tlsCfg,
		discoverer:           discoverer,
		conns:                make(chan internalConn),
		bepProtocolName:      bepProtocolName,
		tlsDefaultCommonName: tlsDefaultCommonName,
		lans:                 lans,
		limiter:              newLimiter(cfg),
		natService:           nat.NewService(myID, cfg),

		listenersMut:   sync.NewRWMutex(),
		listeners:      make(map[string]genericListener),
		listenerTokens: make(map[string]suture.ServiceToken),

		// A listener can fail twice, rapidly. Any more than that and it
		// will be put on suspension for ten minutes. Restarts and changes
		// due to config are done by removing and adding services, so are
		// not subject to these limitations.
		listenerSupervisor: suture.New("c.S.listenerSupervisor", suture.Spec{
			Log: func(line string) {
				l.Infoln(line)
			},
			FailureThreshold: 2,
			FailureBackoff:   600 * time.Second,
		}),

		curConMut:         sync.NewMutex(),
		currentConnection: make(map[protocol.DeviceID]completeConn),
	}
	cfg.Subscribe(service)

	// There are several moving parts here; one routine per listening address
	// (handled in configuration changing) to handle incoming connections,
	// one routine to periodically attempt outgoing connections, one routine to
	// the the common handling regardless of whether the connection was
	// incoming or outgoing.

	service.Add(serviceFunc(service.connect))
	service.Add(serviceFunc(service.handle))
	service.Add(service.listenerSupervisor)

	raw := cfg.RawCopy()
	// Actually starts the listeners and NAT service
	service.CommitConfiguration(raw, raw)

	return service
}
Ejemplo n.º 8
0
func NewService(id protocol.DeviceID, cfg *config.Wrapper) *Service {
	return &Service{
		id:  id,
		cfg: cfg,

		timer: time.NewTimer(0),
		mut:   sync.NewRWMutex(),
	}
}
Ejemplo n.º 9
0
func NewService(id protocol.DeviceID, cfg *config.Wrapper) *Service {
	return &Service{
		id:  id,
		cfg: cfg,

		immediate: make(chan chan struct{}),
		timer:     time.NewTimer(time.Second),

		mut: sync.NewRWMutex(),
	}
}
Ejemplo n.º 10
0
func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *tls.Config, discoverer discover.Finder,
	bepProtocolName string, tlsDefaultCommonName string, lans []*net.IPNet) *Service {

	service := &Service{
		Supervisor:           suture.NewSimple("connections.Service"),
		cfg:                  cfg,
		myID:                 myID,
		model:                mdl,
		tlsCfg:               tlsCfg,
		discoverer:           discoverer,
		conns:                make(chan IntermediateConnection),
		bepProtocolName:      bepProtocolName,
		tlsDefaultCommonName: tlsDefaultCommonName,
		lans:                 lans,
		natService:           nat.NewService(myID, cfg),

		listenersMut:   sync.NewRWMutex(),
		listeners:      make(map[string]genericListener),
		listenerTokens: make(map[string]suture.ServiceToken),

		curConMut:         sync.NewMutex(),
		currentConnection: make(map[protocol.DeviceID]Connection),
	}
	cfg.Subscribe(service)

	// The rate variables are in KiB/s in the UI (despite the camel casing
	// of the name). We multiply by 1024 here to get B/s.
	options := service.cfg.Options()
	if options.MaxSendKbps > 0 {
		service.writeRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxSendKbps), int64(5*1024*options.MaxSendKbps))
	}

	if options.MaxRecvKbps > 0 {
		service.readRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxRecvKbps), int64(5*1024*options.MaxRecvKbps))
	}

	// There are several moving parts here; one routine per listening address
	// (handled in configuration changing) to handle incoming connections,
	// one routine to periodically attempt outgoing connections, one routine to
	// the the common handling regardless of whether the connection was
	// incoming or outgoing.

	service.Add(serviceFunc(service.connect))
	service.Add(serviceFunc(service.handle))

	raw := cfg.Raw()
	// Actually starts the listeners and NAT service
	service.CommitConfiguration(raw, raw)

	return service
}
Ejemplo n.º 11
0
func TestProgressEmitter(t *testing.T) {
	w := events.Default.Subscribe(events.DownloadProgress)

	c := config.Wrap("/tmp/test", config.Configuration{})
	c.SetOptions(config.OptionsConfiguration{
		ProgressUpdateIntervalS: 0,
	})

	p := NewProgressEmitter(c)
	go p.Serve()
	p.interval = 0

	expectTimeout(w, t)

	s := sharedPullerState{
		updated: time.Now(),
		mut:     sync.NewRWMutex(),
	}
	p.Register(&s)

	expectEvent(w, t, 1)
	expectTimeout(w, t)

	s.copyDone(protocol.BlockInfo{})

	expectEvent(w, t, 1)
	expectTimeout(w, t)

	s.copiedFromOrigin()

	expectEvent(w, t, 1)
	expectTimeout(w, t)

	s.pullStarted()

	expectEvent(w, t, 1)
	expectTimeout(w, t)

	s.pullDone(protocol.BlockInfo{})

	expectEvent(w, t, 1)
	expectTimeout(w, t)

	p.Deregister(&s)

	expectEvent(w, t, 0)
	expectTimeout(w, t)

}
Ejemplo n.º 12
0
func NewBlockFinder(db *leveldb.DB, cfg *config.Wrapper) *BlockFinder {
	if blockFinder != nil {
		return blockFinder
	}

	f := &BlockFinder{
		db:  db,
		mut: sync.NewRWMutex(),
	}

	f.CommitConfiguration(config.Configuration{}, cfg.Raw())
	cfg.Subscribe(f)

	return f
}
Ejemplo n.º 13
0
func newDynamicClient(uri *url.URL, certs []tls.Certificate, invitations chan protocol.SessionInvitation) RelayClient {
	closeInvitationsOnFinish := false
	if invitations == nil {
		closeInvitationsOnFinish = true
		invitations = make(chan protocol.SessionInvitation)
	}
	return &dynamicClient{
		pooladdr:                 uri,
		certs:                    certs,
		invitations:              invitations,
		closeInvitationsOnFinish: closeInvitationsOnFinish,

		mut: sync.NewRWMutex(),
	}
}
Ejemplo n.º 14
0
func init() {
	for _, proto := range []string{"udp", "udp4", "udp6"} {
		Register(proto, func(uri *url.URL, pkt *Announce) (Client, error) {
			c := &UDPClient{
				wg:  sync.NewWaitGroup(),
				mut: sync.NewRWMutex(),
			}
			err := c.Start(uri, pkt)
			if err != nil {
				return nil, err
			}
			return c, nil
		})
	}
}
Ejemplo n.º 15
0
func TestAssetsDir(t *testing.T) {
	// For any given request to $FILE, we should return the first found of
	//  - assetsdir/$THEME/$FILE
	//  - compiled in asset $THEME/$FILE
	//  - assetsdir/default/$FILE
	//  - compiled in asset default/$FILE

	// The asset map contains compressed assets, so create a couple of gzip compressed assets here.
	buf := new(bytes.Buffer)
	gw := gzip.NewWriter(buf)
	gw.Write([]byte("default"))
	gw.Close()
	def := buf.Bytes()

	buf = new(bytes.Buffer)
	gw = gzip.NewWriter(buf)
	gw.Write([]byte("foo"))
	gw.Close()
	foo := buf.Bytes()

	e := &staticsServer{
		theme:    "foo",
		mut:      sync.NewRWMutex(),
		assetDir: "testdata",
		assets: map[string][]byte{
			"foo/a":     foo, // overridden in foo/a
			"foo/b":     foo,
			"default/a": def, // overridden in default/a (but foo/a takes precedence)
			"default/b": def, // overridden in default/b (but foo/b takes precedence)
			"default/c": def,
		},
	}

	s := httptest.NewServer(e)
	defer s.Close()

	// assetsdir/foo/a exists, overrides compiled in
	expectURLToContain(t, s.URL+"/a", "overridden-foo")

	// foo/b is compiled in, default/b is overridden, return compiled in
	expectURLToContain(t, s.URL+"/b", "foo")

	// only exists as compiled in default/c so use that
	expectURLToContain(t, s.URL+"/c", "default")

	// only exists as overridden default/d so use that
	expectURLToContain(t, s.URL+"/d", "overridden-default")
}
Ejemplo n.º 16
0
func TestSourceFileBad(t *testing.T) {
	s := sharedPullerState{
		realName: "nonexistent",
		mut:      sync.NewRWMutex(),
	}

	fd, err := s.sourceFile()
	if err == nil {
		t.Fatal("Unexpected nil error")
	}
	if fd != nil {
		t.Fatal("Unexpected non-nil fd")
	}
	if err := s.failed(); err == nil {
		t.Fatal("Unexpected nil failed()")
	}
}
Ejemplo n.º 17
0
// Update updates internal state of what has been downloaded into the temporary
// files by the remote device for this specific folder.
func (t *deviceDownloadState) Update(folder string, updates []protocol.FileDownloadProgressUpdate) {
	t.mut.RLock()
	f, ok := t.folders[folder]
	t.mut.RUnlock()

	if !ok {
		f = &deviceFolderDownloadState{
			mut:   sync.NewRWMutex(),
			files: make(map[string]deviceFolderFileDownloadState),
		}
		t.mut.Lock()
		t.folders[folder] = f
		t.mut.Unlock()
	}

	f.Update(updates)
}
Ejemplo n.º 18
0
func (s *Service) NewMapping(protocol Protocol, ip net.IP, port int) *Mapping {
	mapping := &Mapping{
		protocol: protocol,
		address: Address{
			IP:   ip,
			Port: port,
		},
		extAddresses: make(map[string]Address),
		mut:          sync.NewRWMutex(),
	}

	s.mut.Lock()
	s.mappings = append(s.mappings, mapping)
	s.mut.Unlock()

	return mapping
}
Ejemplo n.º 19
0
func NewSvc(cfg *config.Wrapper, tlsCfg *tls.Config) *Svc {
	conns := make(chan *tls.Conn)

	svc := &Svc{
		Supervisor: suture.New("Svc", suture.Spec{
			Log: func(log string) {
				if debug {
					l.Debugln(log)
				}
			},
			FailureBackoff:   5 * time.Minute,
			FailureDecay:     float64((10 * time.Minute) / time.Second),
			FailureThreshold: 5,
		}),
		cfg:    cfg,
		tlsCfg: tlsCfg,

		tokens:      make(map[string]suture.ServiceToken),
		clients:     make(map[string]*client.ProtocolClient),
		mut:         sync.NewRWMutex(),
		invitations: make(chan protocol.SessionInvitation),
		conns:       conns,
	}

	rcfg := cfg.Raw()
	svc.CommitConfiguration(rcfg, rcfg)
	cfg.Subscribe(svc)

	receiver := &invitationReceiver{
		tlsCfg:      tlsCfg,
		conns:       conns,
		invitations: svc.invitations,
		stop:        make(chan struct{}),
	}

	eventBc := &eventBroadcaster{
		svc: svc,
	}

	svc.Add(receiver)
	svc.Add(eventBc)

	return svc
}
Ejemplo n.º 20
0
func (s *Service) NewMapping(protocol Protocol, ip net.IP, port int) *Mapping {
	mapping := &Mapping{
		protocol: protocol,
		address: Address{
			IP:   ip,
			Port: port,
		},
		extAddresses: make(map[string]Address),
		mut:          sync.NewRWMutex(),
	}

	s.mut.Lock()
	s.mappings = append(s.mappings, mapping)
	// Reset the timer while holding the lock, see process() for explanation
	s.timer.Reset(time.Second)
	s.mut.Unlock()

	return mapping
}
Ejemplo n.º 21
0
// Test creating temporary file inside read-only directory
func TestReadOnlyDir(t *testing.T) {
	// Create a read only directory, clean it up afterwards.
	os.Mkdir("testdata/read_only_dir", 0555)
	defer func() {
		os.Chmod("testdata/read_only_dir", 0755)
		os.RemoveAll("testdata/read_only_dir")
	}()

	s := sharedPullerState{
		tempName: "testdata/read_only_dir/.temp_name",
		mut:      sync.NewRWMutex(),
	}

	fd, err := s.tempFile()
	if err != nil {
		t.Fatal(err)
	}
	if fd == nil {
		t.Fatal("Unexpected nil fd")
	}

	s.fail("Test done", nil)
	s.finalClose()
}
Ejemplo n.º 22
0
func NewProtocolClient(uri *url.URL, certs []tls.Certificate, invitations chan protocol.SessionInvitation) *ProtocolClient {
	closeInvitationsOnFinish := false
	if invitations == nil {
		closeInvitationsOnFinish = true
		invitations = make(chan protocol.SessionInvitation)
	}

	return &ProtocolClient{
		URI:         uri,
		Invitations: invitations,

		closeInvitationsOnFinish: closeInvitationsOnFinish,

		config: configForCerts(certs),

		timeout: time.Minute * 2,

		stop:    make(chan struct{}),
		stopped: make(chan struct{}),

		mut:       sync.NewRWMutex(),
		connected: false,
	}
}
Ejemplo n.º 23
0
	go restart()
}

func (s *apiService) postSystemShutdown(w http.ResponseWriter, r *http.Request) {
	s.flushResponse(`{"ok": "shutting down"}`, w)
	go shutdown()
}

func (s *apiService) flushResponse(resp string, w http.ResponseWriter) {
	w.Write([]byte(resp + "\n"))
	f := w.(http.Flusher)
	f.Flush()
}

var cpuUsagePercent [10]float64 // The last ten seconds
var cpuUsageLock = sync.NewRWMutex()

func (s *apiService) getSystemStatus(w http.ResponseWriter, r *http.Request) {
	var m runtime.MemStats
	runtime.ReadMemStats(&m)

	tilde, _ := osutil.ExpandTilde("~")
	res := make(map[string]interface{})
	res["myID"] = myID.String()
	res["goroutines"] = runtime.NumGoroutine()
	res["alloc"] = m.Alloc
	res["sys"] = m.Sys - m.HeapReleased
	res["tilde"] = tilde
	if s.cfg.Options().LocalAnnEnabled || s.cfg.Options().GlobalAnnEnabled {
		res["discoveryEnabled"] = true
		discoErrors := make(map[string]string)
Ejemplo n.º 24
0
// handleFile queues the copies and pulls as necessary for a single new or
// changed file.
func (f *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
	curFile, hasCurFile := f.model.CurrentFolderFile(f.folderID, file.Name)

	if hasCurFile && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
		// We are supposed to copy the entire file, and then fetch nothing. We
		// are only updating metadata, so we don't actually *need* to make the
		// copy.
		l.Debugln(f, "taking shortcut on", file.Name)

		events.Default.Log(events.ItemStarted, map[string]string{
			"folder": f.folderID,
			"item":   file.Name,
			"type":   "file",
			"action": "metadata",
		})

		f.queue.Done(file.Name)

		err := f.shortcutFile(file)
		events.Default.Log(events.ItemFinished, map[string]interface{}{
			"folder": f.folderID,
			"item":   file.Name,
			"error":  events.Error(err),
			"type":   "file",
			"action": "metadata",
		})

		if err != nil {
			l.Infoln("Puller: shortcut:", err)
			f.newError(file.Name, err)
		} else {
			f.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile}
		}

		return
	}

	// Figure out the absolute filenames we need once and for all
	tempName, err := rootedJoinedPath(f.dir, defTempNamer.TempName(file.Name))
	if err != nil {
		f.newError(file.Name, err)
		return
	}
	realName, err := rootedJoinedPath(f.dir, file.Name)
	if err != nil {
		f.newError(file.Name, err)
		return
	}

	if hasCurFile && !curFile.IsDirectory() && !curFile.IsSymlink() {
		// Check that the file on disk is what we expect it to be according to
		// the database. If there's a mismatch here, there might be local
		// changes that we don't know about yet and we should scan before
		// touching the file. If we can't stat the file we'll just pull it.
		if info, err := f.mtimeFS.Lstat(realName); err == nil {
			if !info.ModTime().Equal(curFile.ModTime()) || info.Size() != curFile.Size {
				l.Debugln("file modified but not rescanned; not pulling:", realName)
				// Scan() is synchronous (i.e. blocks until the scan is
				// completed and returns an error), but a scan can't happen
				// while we're in the puller routine. Request the scan in the
				// background and it'll be handled when the current pulling
				// sweep is complete. As we do retries, we'll queue the scan
				// for this file up to ten times, but the last nine of those
				// scans will be cheap...
				go f.scan.Scan([]string{file.Name})
				return
			}
		}
	}

	scanner.PopulateOffsets(file.Blocks)

	var blocks []protocol.BlockInfo
	var blocksSize int64
	var reused []int32

	// Check for an old temporary file which might have some blocks we could
	// reuse.
	tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize, nil)
	if err == nil {
		// Check for any reusable blocks in the temp file
		tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)

		// block.String() returns a string unique to the block
		existingBlocks := make(map[string]struct{}, len(tempCopyBlocks))
		for _, block := range tempCopyBlocks {
			existingBlocks[block.String()] = struct{}{}
		}

		// Since the blocks are already there, we don't need to get them.
		for i, block := range file.Blocks {
			_, ok := existingBlocks[block.String()]
			if !ok {
				blocks = append(blocks, block)
				blocksSize += int64(block.Size)
			} else {
				reused = append(reused, int32(i))
			}
		}

		// The sharedpullerstate will know which flags to use when opening the
		// temp file depending if we are reusing any blocks or not.
		if len(reused) == 0 {
			// Otherwise, discard the file ourselves in order for the
			// sharedpuller not to panic when it fails to exclusively create a
			// file which already exists
			osutil.InWritableDir(os.Remove, tempName)
		}
	} else {
		// Copy the blocks, as we don't want to shuffle them on the FileInfo
		blocks = append(blocks, file.Blocks...)
		blocksSize = file.Size
	}

	if f.checkFreeSpace {
		if free, err := osutil.DiskFreeBytes(f.dir); err == nil && free < blocksSize {
			l.Warnf(`Folder "%s": insufficient disk space in %s for %s: have %.2f MiB, need %.2f MiB`, f.folderID, f.dir, file.Name, float64(free)/1024/1024, float64(blocksSize)/1024/1024)
			f.newError(file.Name, errors.New("insufficient space"))
			return
		}
	}

	// Shuffle the blocks
	for i := range blocks {
		j := rand.Intn(i + 1)
		blocks[i], blocks[j] = blocks[j], blocks[i]
	}

	events.Default.Log(events.ItemStarted, map[string]string{
		"folder": f.folderID,
		"item":   file.Name,
		"type":   "file",
		"action": "update",
	})

	s := sharedPullerState{
		file:             file,
		folder:           f.folderID,
		tempName:         tempName,
		realName:         realName,
		copyTotal:        len(blocks),
		copyNeeded:       len(blocks),
		reused:           len(reused),
		updated:          time.Now(),
		available:        reused,
		availableUpdated: time.Now(),
		ignorePerms:      f.ignorePermissions(file),
		version:          curFile.Version,
		mut:              sync.NewRWMutex(),
		sparse:           f.allowSparse,
		created:          time.Now(),
	}

	l.Debugf("%v need file %s; copy %d, reused %v", f, file.Name, len(blocks), reused)

	cs := copyBlocksState{
		sharedPullerState: &s,
		blocks:            blocks,
	}
	copyChan <- cs
}
Ejemplo n.º 25
0
func (s *apiService) Serve() {
	s.stop = make(chan struct{})

	// The GET handlers
	getRestMux := http.NewServeMux()
	getRestMux.HandleFunc("/rest/db/completion", s.getDBCompletion)              // device folder
	getRestMux.HandleFunc("/rest/db/file", s.getDBFile)                          // folder file
	getRestMux.HandleFunc("/rest/db/ignores", s.getDBIgnores)                    // folder
	getRestMux.HandleFunc("/rest/db/need", s.getDBNeed)                          // folder [perpage] [page]
	getRestMux.HandleFunc("/rest/db/status", s.getDBStatus)                      // folder
	getRestMux.HandleFunc("/rest/db/browse", s.getDBBrowse)                      // folder [prefix] [dirsonly] [levels]
	getRestMux.HandleFunc("/rest/events", s.getEvents)                           // since [limit]
	getRestMux.HandleFunc("/rest/stats/device", s.getDeviceStats)                // -
	getRestMux.HandleFunc("/rest/stats/folder", s.getFolderStats)                // -
	getRestMux.HandleFunc("/rest/svc/deviceid", s.getDeviceID)                   // id
	getRestMux.HandleFunc("/rest/svc/lang", s.getLang)                           // -
	getRestMux.HandleFunc("/rest/svc/report", s.getReport)                       // -
	getRestMux.HandleFunc("/rest/system/browse", s.getSystemBrowse)              // current
	getRestMux.HandleFunc("/rest/system/config", s.getSystemConfig)              // -
	getRestMux.HandleFunc("/rest/system/config/insync", s.getSystemConfigInsync) // -
	getRestMux.HandleFunc("/rest/system/connections", s.getSystemConnections)    // -
	getRestMux.HandleFunc("/rest/system/discovery", s.getSystemDiscovery)        // -
	getRestMux.HandleFunc("/rest/system/error", s.getSystemError)                // -
	getRestMux.HandleFunc("/rest/system/ping", s.restPing)                       // -
	getRestMux.HandleFunc("/rest/system/status", s.getSystemStatus)              // -
	getRestMux.HandleFunc("/rest/system/upgrade", s.getSystemUpgrade)            // -
	getRestMux.HandleFunc("/rest/system/version", s.getSystemVersion)            // -
	getRestMux.HandleFunc("/rest/system/debug", s.getSystemDebug)                // -
	getRestMux.HandleFunc("/rest/system/log", s.getSystemLog)                    // [since]
	getRestMux.HandleFunc("/rest/system/log.txt", s.getSystemLogTxt)             // [since]

	// The POST handlers
	postRestMux := http.NewServeMux()
	postRestMux.HandleFunc("/rest/db/prio", s.postDBPrio)                      // folder file [perpage] [page]
	postRestMux.HandleFunc("/rest/db/ignores", s.postDBIgnores)                // folder
	postRestMux.HandleFunc("/rest/db/override", s.postDBOverride)              // folder
	postRestMux.HandleFunc("/rest/db/scan", s.postDBScan)                      // folder [sub...] [delay]
	postRestMux.HandleFunc("/rest/system/config", s.postSystemConfig)          // <body>
	postRestMux.HandleFunc("/rest/system/error", s.postSystemError)            // <body>
	postRestMux.HandleFunc("/rest/system/error/clear", s.postSystemErrorClear) // -
	postRestMux.HandleFunc("/rest/system/ping", s.restPing)                    // -
	postRestMux.HandleFunc("/rest/system/reset", s.postSystemReset)            // [folder]
	postRestMux.HandleFunc("/rest/system/restart", s.postSystemRestart)        // -
	postRestMux.HandleFunc("/rest/system/shutdown", s.postSystemShutdown)      // -
	postRestMux.HandleFunc("/rest/system/upgrade", s.postSystemUpgrade)        // -
	postRestMux.HandleFunc("/rest/system/pause", s.postSystemPause)            // device
	postRestMux.HandleFunc("/rest/system/resume", s.postSystemResume)          // device
	postRestMux.HandleFunc("/rest/system/debug", s.postSystemDebug)            // [enable] [disable]

	// Debug endpoints, not for general use
	getRestMux.HandleFunc("/rest/debug/peerCompletion", s.getPeerCompletion)
	getRestMux.HandleFunc("/rest/debug/httpmetrics", s.getSystemHTTPMetrics)

	// A handler that splits requests between the two above and disables
	// caching
	restMux := noCacheMiddleware(metricsMiddleware(getPostHandler(getRestMux, postRestMux)))

	// The main routing handler
	mux := http.NewServeMux()
	mux.Handle("/rest/", restMux)
	mux.HandleFunc("/qr/", s.getQR)

	// Serve compiled in assets unless an asset directory was set (for development)
	assets := &embeddedStatic{
		theme:        s.cfg.GUI().Theme,
		lastModified: time.Now(),
		mut:          sync.NewRWMutex(),
		assetDir:     s.assetDir,
		assets:       auto.Assets(),
	}
	mux.Handle("/", assets)

	s.cfg.Subscribe(assets)

	guiCfg := s.cfg.GUI()

	// Wrap everything in CSRF protection. The /rest prefix should be
	// protected, other requests will grant cookies.
	handler := csrfMiddleware(s.id.String()[:5], "/rest", guiCfg.APIKey(), mux)

	// Add our version and ID as a header to responses
	handler = withDetailsMiddleware(s.id, handler)

	// Wrap everything in basic auth, if user/password is set.
	if len(guiCfg.User) > 0 && len(guiCfg.Password) > 0 {
		handler = basicAuthAndSessionMiddleware("sessionid-"+s.id.String()[:5], guiCfg, handler)
	}

	// Redirect to HTTPS if we are supposed to
	if guiCfg.UseTLS() {
		handler = redirectToHTTPSMiddleware(handler)
	}

	handler = debugMiddleware(handler)

	srv := http.Server{
		Handler:     handler,
		ReadTimeout: 10 * time.Second,
	}

	s.fss = newFolderSummaryService(s.cfg, s.model)
	defer s.fss.Stop()
	s.fss.ServeBackground()

	l.Infoln("API listening on", s.listener.Addr())
	l.Infoln("GUI URL is", guiCfg.URL())
	err := srv.Serve(s.listener)

	// The return could be due to an intentional close. Wait for the stop
	// signal before returning. IF there is no stop signal within a second, we
	// assume it was unintentional and log the error before retrying.
	select {
	case <-s.stop:
	case <-time.After(time.Second):
		l.Warnln("API:", err)
	}
}
Ejemplo n.º 26
0
func newDeviceDownloadState() *deviceDownloadState {
	return &deviceDownloadState{
		mut:     sync.NewRWMutex(),
		folders: make(map[string]*deviceFolderDownloadState),
	}
}
Ejemplo n.º 27
0
	listen         string        = ":80"
	dir            string        = ""
	evictionTime   time.Duration = time.Hour
	debug          bool          = false
	getLRUSize     int           = 10 << 10
	getLimitBurst  int64         = 10
	getLimitAvg                  = 1
	postLRUSize    int           = 1 << 10
	postLimitBurst int64         = 2
	postLimitAvg                 = 1
	getLimit       time.Duration
	postLimit      time.Duration
	permRelaysFile string
	ipHeader       string

	getMut      sync.RWMutex = sync.NewRWMutex()
	getLRUCache *lru.Cache

	postMut      sync.RWMutex = sync.NewRWMutex()
	postLRUCache *lru.Cache

	requests = make(chan request, 10)

	mut             sync.RWMutex           = sync.NewRWMutex()
	knownRelays     []relay                = make([]relay, 0)
	permanentRelays []relay                = make([]relay, 0)
	evictionTimers  map[string]*time.Timer = make(map[string]*time.Timer)
)

func main() {
	flag.StringVar(&listen, "listen", listen, "Listen address")
Ejemplo n.º 28
0
func TestSendDownloadProgressMessages(t *testing.T) {
	c := config.Wrap("/tmp/test", config.Configuration{})
	c.SetOptions(config.OptionsConfiguration{
		ProgressUpdateIntervalS: 0,
		TempIndexMinBlocks:      10,
	})

	fc := &FakeConnection{}

	p := NewProgressEmitter(c)
	p.temporaryIndexSubscribe(fc, []string{"folder", "folder2"})

	expect := func(updateIdx int, state *sharedPullerState, updateType protocol.FileDownloadProgressUpdateType, version protocol.Vector, blocks []int32, remove bool) {
		messageIdx := -1
		for i, msg := range fc.downloadProgressMessages {
			if msg.folder == state.folder {
				messageIdx = i
				break
			}
		}
		if messageIdx < 0 {
			t.Errorf("Message for folder %s does not exist at %s", state.folder, caller(1))
		}

		msg := fc.downloadProgressMessages[messageIdx]

		// Don't know the index (it's random due to iterating maps)
		if updateIdx == -1 {
			for i, upd := range msg.updates {
				if upd.Name == state.file.Name {
					updateIdx = i
					break
				}
			}
		}

		if updateIdx == -1 {
			t.Errorf("Could not find update for %s at %s", state.file.Name, caller(1))
		}

		if updateIdx > len(msg.updates)-1 {
			t.Errorf("Update at index %d does not exist at %s", updateIdx, caller(1))
		}

		update := msg.updates[updateIdx]

		if update.UpdateType != updateType {
			t.Errorf("Wrong update type at %s", caller(1))
		}

		if !update.Version.Equal(version) {
			t.Errorf("Wrong version at %s", caller(1))
		}

		if len(update.BlockIndexes) != len(blocks) {
			t.Errorf("Wrong indexes. Have %d expect %d at %s", len(update.BlockIndexes), len(blocks), caller(1))
		}
		for i := range update.BlockIndexes {
			if update.BlockIndexes[i] != blocks[i] {
				t.Errorf("Index %d incorrect at %s", i, caller(1))
			}
		}

		if remove {
			fc.downloadProgressMessages = append(fc.downloadProgressMessages[:messageIdx], fc.downloadProgressMessages[messageIdx+1:]...)
		}
	}
	expectEmpty := func() {
		if len(fc.downloadProgressMessages) > 0 {
			t.Errorf("Still have something at %s: %#v", caller(1), fc.downloadProgressMessages)
		}
	}

	now := time.Now()
	tick := func() time.Time {
		now = now.Add(time.Second)
		return now
	}

	if len(fc.downloadProgressMessages) != 0 {
		t.Error("Expected no requests")
	}

	v1 := (protocol.Vector{}).Update(0)
	v2 := (protocol.Vector{}).Update(1)

	// Requires more than 10 blocks to work.
	blocks := make([]protocol.BlockInfo, 11, 11)

	state1 := &sharedPullerState{
		folder: "folder",
		file: protocol.FileInfo{
			Name:    "state1",
			Version: v1,
			Blocks:  blocks,
		},
		mut:              sync.NewRWMutex(),
		availableUpdated: time.Now(),
	}
	p.registry["1"] = state1

	// Has no blocks, hence no message is sent
	p.sendDownloadProgressMessages()
	expectEmpty()

	// Returns update for puller with new extra blocks
	state1.available = []int32{1}
	p.sendDownloadProgressMessages()

	expect(0, state1, protocol.UpdateTypeAppend, v1, []int32{1}, true)
	expectEmpty()

	// Does nothing if nothing changes
	p.sendDownloadProgressMessages()
	expectEmpty()

	// Does nothing if timestamp updated, but no new blocks (should never happen)
	state1.availableUpdated = tick()

	p.sendDownloadProgressMessages()
	expectEmpty()

	// Does not return an update if date blocks change but date does not (should never happen)
	state1.available = []int32{1, 2}

	p.sendDownloadProgressMessages()
	expectEmpty()

	// If the date and blocks changes, returns only the diff
	state1.availableUpdated = tick()

	p.sendDownloadProgressMessages()

	expect(0, state1, protocol.UpdateTypeAppend, v1, []int32{2}, true)
	expectEmpty()

	// Returns forget and update if puller version has changed
	state1.file.Version = v2

	p.sendDownloadProgressMessages()

	expect(0, state1, protocol.UpdateTypeForget, v1, nil, false)
	expect(1, state1, protocol.UpdateTypeAppend, v2, []int32{1, 2}, true)
	expectEmpty()

	// Returns forget and append if sharedPullerState creation timer changes.

	state1.available = []int32{1}
	state1.availableUpdated = tick()
	state1.created = tick()

	p.sendDownloadProgressMessages()

	expect(0, state1, protocol.UpdateTypeForget, v2, nil, false)
	expect(1, state1, protocol.UpdateTypeAppend, v2, []int32{1}, true)
	expectEmpty()

	// Sends an empty update if new file exists, but does not have any blocks yet. (To indicate that the old blocks are no longer available)
	state1.file.Version = v1
	state1.available = nil
	state1.availableUpdated = tick()

	p.sendDownloadProgressMessages()

	expect(0, state1, protocol.UpdateTypeForget, v2, nil, false)
	expect(1, state1, protocol.UpdateTypeAppend, v1, nil, true)
	expectEmpty()

	// Updates for multiple files and folders can be combined
	state1.available = []int32{1, 2, 3}
	state1.availableUpdated = tick()

	state2 := &sharedPullerState{
		folder: "folder2",
		file: protocol.FileInfo{
			Name:    "state2",
			Version: v1,
			Blocks:  blocks,
		},
		mut:              sync.NewRWMutex(),
		available:        []int32{1, 2, 3},
		availableUpdated: time.Now(),
	}
	state3 := &sharedPullerState{
		folder: "folder",
		file: protocol.FileInfo{
			Name:    "state3",
			Version: v1,
			Blocks:  blocks,
		},
		mut:              sync.NewRWMutex(),
		available:        []int32{1, 2, 3},
		availableUpdated: time.Now(),
	}
	state4 := &sharedPullerState{
		folder: "folder2",
		file: protocol.FileInfo{
			Name:    "state4",
			Version: v1,
			Blocks:  blocks,
		},
		mut:              sync.NewRWMutex(),
		available:        []int32{1, 2, 3},
		availableUpdated: time.Now(),
	}
	p.registry["2"] = state2
	p.registry["3"] = state3
	p.registry["4"] = state4

	p.sendDownloadProgressMessages()

	expect(-1, state1, protocol.UpdateTypeAppend, v1, []int32{1, 2, 3}, false)
	expect(-1, state3, protocol.UpdateTypeAppend, v1, []int32{1, 2, 3}, true)
	expect(-1, state2, protocol.UpdateTypeAppend, v1, []int32{1, 2, 3}, false)
	expect(-1, state4, protocol.UpdateTypeAppend, v1, []int32{1, 2, 3}, true)
	expectEmpty()

	// Returns forget if puller no longer exists, as well as updates if it has been updated.
	state1.available = []int32{1, 2, 3, 4, 5}
	state1.availableUpdated = tick()
	state2.available = []int32{1, 2, 3, 4, 5}
	state2.availableUpdated = tick()

	delete(p.registry, "3")
	delete(p.registry, "4")

	p.sendDownloadProgressMessages()

	expect(-1, state1, protocol.UpdateTypeAppend, v1, []int32{4, 5}, false)
	expect(-1, state3, protocol.UpdateTypeForget, v1, nil, true)
	expect(-1, state2, protocol.UpdateTypeAppend, v1, []int32{4, 5}, false)
	expect(-1, state4, protocol.UpdateTypeForget, v1, nil, true)
	expectEmpty()

	// Deletions are sent only once (actual bug I found writing the tests)
	p.sendDownloadProgressMessages()
	p.sendDownloadProgressMessages()
	expectEmpty()

	// Not sent for "inactive" (symlinks, dirs, or wrong folder) pullers
	// Directory
	state5 := &sharedPullerState{
		folder: "folder",
		file: protocol.FileInfo{
			Name:    "state5",
			Version: v1,
			Type:    protocol.FileInfoTypeDirectory,
			Blocks:  blocks,
		},
		mut:              sync.NewRWMutex(),
		available:        []int32{1, 2, 3},
		availableUpdated: time.Now(),
	}
	// Symlink
	state6 := &sharedPullerState{
		folder: "folder",
		file: protocol.FileInfo{
			Name:    "state6",
			Version: v1,
			Type:    protocol.FileInfoTypeSymlinkUnknown,
		},
		mut:              sync.NewRWMutex(),
		available:        []int32{1, 2, 3},
		availableUpdated: time.Now(),
	}
	// Some other directory
	state7 := &sharedPullerState{
		folder: "folderXXX",
		file: protocol.FileInfo{
			Name:    "state7",
			Version: v1,
			Blocks:  blocks,
		},
		mut:              sync.NewRWMutex(),
		available:        []int32{1, 2, 3},
		availableUpdated: time.Now(),
	}
	// Less than 10 blocks
	state8 := &sharedPullerState{
		folder: "folder",
		file: protocol.FileInfo{
			Name:    "state8",
			Version: v1,
			Blocks:  blocks[:3],
		},
		mut:              sync.NewRWMutex(),
		available:        []int32{1, 2, 3},
		availableUpdated: time.Now(),
	}
	p.registry["5"] = state5
	p.registry["6"] = state6
	p.registry["7"] = state7
	p.registry["8"] = state8

	p.sendDownloadProgressMessages()

	expectEmpty()

	// Device is no longer subscribed to a particular folder
	delete(p.registry, "1") // Clean up first
	delete(p.registry, "2") // Clean up first

	p.sendDownloadProgressMessages()
	expect(-1, state1, protocol.UpdateTypeForget, v1, nil, true)
	expect(-1, state2, protocol.UpdateTypeForget, v1, nil, true)

	expectEmpty()

	p.registry["1"] = state1
	p.registry["2"] = state2
	p.registry["3"] = state3
	p.registry["4"] = state4

	p.sendDownloadProgressMessages()

	expect(-1, state1, protocol.UpdateTypeAppend, v1, []int32{1, 2, 3, 4, 5}, false)
	expect(-1, state3, protocol.UpdateTypeAppend, v1, []int32{1, 2, 3}, true)
	expect(-1, state2, protocol.UpdateTypeAppend, v1, []int32{1, 2, 3, 4, 5}, false)
	expect(-1, state4, protocol.UpdateTypeAppend, v1, []int32{1, 2, 3}, true)
	expectEmpty()

	p.temporaryIndexUnsubscribe(fc)
	p.temporaryIndexSubscribe(fc, []string{"folder"})

	p.sendDownloadProgressMessages()

	// See progressemitter.go for explanation why this is commented out.
	// Search for state.cleanup
	//expect(-1, state2, protocol.UpdateTypeForget, v1, nil, false)
	//expect(-1, state4, protocol.UpdateTypeForget, v1, nil, true)

	expectEmpty()

	// Cleanup when device no longer exists
	p.temporaryIndexUnsubscribe(fc)

	p.sendDownloadProgressMessages()
	_, ok := p.sentDownloadStates[fc.ID()]
	if ok {
		t.Error("Should not be there")
	}
}
Ejemplo n.º 29
0
	evictionTime   = time.Hour
	debug          bool
	getLRUSize           = 10 << 10
	getLimitBurst  int64 = 10
	getLimitAvg          = 1
	postLRUSize          = 1 << 10
	postLimitBurst int64 = 2
	postLimitAvg         = 1
	getLimit       time.Duration
	postLimit      time.Duration
	permRelaysFile string
	ipHeader       string
	geoipPath      string
	proto          string

	getMut      = sync.NewRWMutex()
	getLRUCache *lru.Cache

	postMut      = sync.NewRWMutex()
	postLRUCache *lru.Cache

	requests = make(chan request, 10)

	mut             = sync.NewRWMutex()
	knownRelays     = make([]relay, 0)
	permanentRelays = make([]relay, 0)
	evictionTimers  = make(map[string]*time.Timer)
)

func main() {
	flag.StringVar(&listen, "listen", listen, "Listen address")
Ejemplo n.º 30
0
func NewCachingMux() CachingMux {
	return &cachingMux{
		Supervisor: suture.NewSimple("discover.cachingMux"),
		mut:        sync.NewRWMutex(),
	}
}