Example #1
0
func newAPIService(id protocol.DeviceID, cfg *config.Wrapper, assetDir string, m *model.Model, eventSub *events.BufferedSubscription, discoverer *discover.CachingMux, relayService *relay.Service, errors, systemLog *logger.Recorder) (*apiService, error) {
	service := &apiService{
		id:              id,
		cfg:             cfg,
		assetDir:        assetDir,
		model:           m,
		eventSub:        eventSub,
		discoverer:      discoverer,
		relayService:    relayService,
		systemConfigMut: sync.NewMutex(),
		stop:            make(chan struct{}),
		configChanged:   make(chan struct{}),
		listenerMut:     sync.NewMutex(),
		guiErrors:       errors,
		systemLog:       systemLog,
	}

	seen := make(map[string]struct{})
	for file := range auto.Assets() {
		theme := strings.Split(file, "/")[0]
		if _, ok := seen[theme]; !ok {
			seen[theme] = struct{}{}
			service.themes = append(service.themes, theme)
		}
	}

	var err error
	service.listener, err = service.getListener(cfg.GUI())
	return service, err
}
Example #2
0
func NewBroadcast(port int) *Broadcast {
	b := &Broadcast{
		Supervisor: suture.New("broadcastBeacon", suture.Spec{
			// Don't retry too frenetically: an error to open a socket or
			// whatever is usually something that is either permanent or takes
			// a while to get solved...
			FailureThreshold: 2,
			FailureBackoff:   60 * time.Second,
			// Only log restarts in debug mode.
			Log: func(line string) {
				l.Debugln(line)
			},
		}),
		port:   port,
		inbox:  make(chan []byte),
		outbox: make(chan recv, 16),
	}

	b.br = &broadcastReader{
		port:    port,
		outbox:  b.outbox,
		connMut: sync.NewMutex(),
	}
	b.Add(b.br)
	b.bw = &broadcastWriter{
		port:    port,
		inbox:   b.inbox,
		connMut: sync.NewMutex(),
	}
	b.Add(b.bw)

	return b
}
Example #3
0
func newRWFolder(m *Model, shortID uint64, cfg config.FolderConfiguration) *rwFolder {
	return &rwFolder{
		stateTracker: stateTracker{
			folder: cfg.ID,
			mut:    sync.NewMutex(),
		},

		model:            m,
		progressEmitter:  m.progressEmitter,
		virtualMtimeRepo: db.NewVirtualMtimeRepo(m.db, cfg.ID),

		folder:      cfg.ID,
		dir:         cfg.Path(),
		scanIntv:    time.Duration(cfg.RescanIntervalS) * time.Second,
		ignorePerms: cfg.IgnorePerms,
		copiers:     cfg.Copiers,
		pullers:     cfg.Pullers,
		shortID:     shortID,
		order:       cfg.Order,

		stop:        make(chan struct{}),
		queue:       newJobQueue(),
		pullTimer:   time.NewTimer(shortPullIntv),
		scanTimer:   time.NewTimer(time.Millisecond), // The first scan should be done immediately.
		delayScan:   make(chan time.Duration),
		scanNow:     make(chan rescanRequest),
		remoteIndex: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a notification if we're busy doing a pull when it comes.

		errorsMut: sync.NewMutex(),
	}
}
Example #4
0
// Wrap wraps an existing Configuration structure and ties it to a file on
// disk.
func Wrap(path string, cfg Configuration) *Wrapper {
	w := &Wrapper{
		cfg:  cfg,
		path: path,
		mut:  sync.NewMutex(),
		sMut: sync.NewMutex(),
	}
	w.replaces = make(chan Configuration)
	return w
}
Example #5
0
func newRWFolder(m *Model, shortID uint64, cfg config.FolderConfiguration) *rwFolder {
	p := &rwFolder{
		stateTracker: stateTracker{
			folder: cfg.ID,
			mut:    sync.NewMutex(),
		},

		model:            m,
		progressEmitter:  m.progressEmitter,
		virtualMtimeRepo: db.NewVirtualMtimeRepo(m.db, cfg.ID),

		folder:         cfg.ID,
		dir:            cfg.Path(),
		scanIntv:       time.Duration(cfg.RescanIntervalS) * time.Second,
		ignorePerms:    cfg.IgnorePerms,
		copiers:        cfg.Copiers,
		pullers:        cfg.Pullers,
		shortID:        shortID,
		order:          cfg.Order,
		maxConflicts:   cfg.MaxConflicts,
		allowSparse:    !cfg.DisableSparseFiles,
		checkFreeSpace: cfg.MinDiskFreePct != 0,

		stop:        make(chan struct{}),
		queue:       newJobQueue(),
		pullTimer:   time.NewTimer(time.Second),
		scanTimer:   time.NewTimer(time.Millisecond), // The first scan should be done immediately.
		delayScan:   make(chan time.Duration),
		scanNow:     make(chan rescanRequest),
		remoteIndex: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a notification if we're busy doing a pull when it comes.

		errorsMut: sync.NewMutex(),
	}

	if p.copiers == 0 {
		p.copiers = defaultCopiers
	}
	if p.pullers == 0 {
		p.pullers = defaultPullers
	}

	if cfg.PullerPauseS == 0 {
		p.pause = defaultPullerPause
	} else {
		p.pause = time.Duration(cfg.PullerPauseS) * time.Second
	}

	if cfg.PullerSleepS == 0 {
		p.sleep = defaultPullerSleep
	} else {
		p.sleep = time.Duration(cfg.PullerSleepS) * time.Second
	}

	return p
}
Example #6
0
func NewFileSet(folder string, db *leveldb.DB) *FileSet {
	var s = FileSet{
		localVersion: make(map[protocol.DeviceID]int64),
		folder:       folder,
		db:           db,
		blockmap:     NewBlockMap(db, folder),
		mutex:        sync.NewMutex(),
	}

	ldbCheckGlobals(db, []byte(folder))

	var deviceID protocol.DeviceID
	ldbWithAllFolderTruncated(db, []byte(folder), func(device []byte, f FileInfoTruncated) bool {
		copy(deviceID[:], device)
		if f.LocalVersion > s.localVersion[deviceID] {
			s.localVersion[deviceID] = f.LocalVersion
		}
		return true
	})
	if debug {
		l.Debugf("loaded localVersion for %q: %#v", folder, s.localVersion)
	}
	clock(s.localVersion[protocol.LocalDeviceID])

	return &s
}
Example #7
0
func NewFileSet(folder string, db *Instance) *FileSet {
	var s = FileSet{
		remoteSequence: make(map[protocol.DeviceID]int64),
		folder:         folder,
		db:             db,
		blockmap:       NewBlockMap(db, db.folderIdx.ID([]byte(folder))),
		updateMutex:    sync.NewMutex(),
	}

	s.db.checkGlobals([]byte(folder), &s.globalSize)

	var deviceID protocol.DeviceID
	s.db.withAllFolderTruncated([]byte(folder), func(device []byte, f FileInfoTruncated) bool {
		copy(deviceID[:], device)
		if deviceID == protocol.LocalDeviceID {
			if f.Sequence > s.sequence {
				s.sequence = f.Sequence
			}
			s.localSize.addFile(f)
		} else if f.Sequence > s.remoteSequence[deviceID] {
			s.remoteSequence[deviceID] = f.Sequence
		}
		return true
	})
	l.Debugf("loaded sequence for %q: %#v", folder, s.sequence)

	return &s
}
Example #8
0
func NewFileSet(folder string, db *Instance) *FileSet {
	var s = FileSet{
		localVersion: make(map[protocol.DeviceID]int64),
		folder:       folder,
		db:           db,
		blockmap:     NewBlockMap(db, db.folderIdx.ID([]byte(folder))),
		mutex:        sync.NewMutex(),
	}

	s.db.checkGlobals([]byte(folder), &s.globalSize)

	var deviceID protocol.DeviceID
	s.db.withAllFolderTruncated([]byte(folder), func(device []byte, f FileInfoTruncated) bool {
		copy(deviceID[:], device)
		if f.LocalVersion > s.localVersion[deviceID] {
			s.localVersion[deviceID] = f.LocalVersion
		}
		if deviceID == protocol.LocalDeviceID {
			s.localSize.addFile(f)
		}
		return true
	})
	l.Debugf("loaded localVersion for %q: %#v", folder, s.localVersion)
	clock(s.localVersion[protocol.LocalDeviceID])

	return &s
}
Example #9
0
func newUPnPSvc(cfg *config.Wrapper, localPort int) *upnpSvc {
	return &upnpSvc{
		cfg:        cfg,
		localPort:  localPort,
		extPortMut: sync.NewMutex(),
	}
}
func TestSourceFileOK(t *testing.T) {
	s := sharedPullerState{
		realName: "testdata/foo",
		mut:      sync.NewMutex(),
	}

	fd, err := s.sourceFile()
	if err != nil {
		t.Fatal(err)
	}
	if fd == nil {
		t.Fatal("Unexpected nil fd")
	}

	bs := make([]byte, 6)
	n, err := fd.Read(bs)

	if n != len(bs) {
		t.Fatalf("Wrong read length %d != %d", n, len(bs))
	}
	if string(bs) != "foobar" {
		t.Fatalf("Wrong contents %s != foobar", string(bs))
	}

	if err := s.failed(); err != nil {
		t.Fatal(err)
	}
}
Example #11
0
func newRWFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner, mtimeFS *fs.MtimeFS) service {
	f := &rwFolder{
		folder: folder{
			stateTracker: newStateTracker(cfg.ID),
			scan:         newFolderScanner(cfg),
			stop:         make(chan struct{}),
			model:        model,
		},

		mtimeFS:        mtimeFS,
		dir:            cfg.Path(),
		versioner:      ver,
		ignorePerms:    cfg.IgnorePerms,
		copiers:        cfg.Copiers,
		pullers:        cfg.Pullers,
		order:          cfg.Order,
		maxConflicts:   cfg.MaxConflicts,
		allowSparse:    !cfg.DisableSparseFiles,
		checkFreeSpace: cfg.MinDiskFreePct != 0,
		ignoreDelete:   cfg.IgnoreDelete,
		fsync:          cfg.Fsync,

		queue:       newJobQueue(),
		pullTimer:   time.NewTimer(time.Second),
		remoteIndex: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a notification if we're busy doing a pull when it comes.

		errorsMut: sync.NewMutex(),

		initialScanCompleted: make(chan struct{}),
	}

	f.configureCopiersAndPullers(cfg)

	return f
}
Example #12
0
func NewUPnPService(cfg *config.Wrapper, localPort int) *Service {
	return &Service{
		cfg:        cfg,
		localPort:  localPort,
		extPortMut: sync.NewMutex(),
	}
}
Example #13
0
func newFolderSummaryService(cfg *config.Wrapper, m *model.Model) *folderSummaryService {
	service := &folderSummaryService{
		Supervisor:      suture.NewSimple("folderSummaryService"),
		cfg:             cfg,
		model:           m,
		stop:            make(chan struct{}),
		immediate:       make(chan string),
		folders:         make(map[string]struct{}),
		foldersMut:      sync.NewMutex(),
		lastEventReqMut: sync.NewMutex(),
	}

	service.Add(serviceFunc(service.listenForUpdates))
	service.Add(serviceFunc(service.calculateSummaries))

	return service
}
Example #14
0
// Wrap wraps an existing Configuration structure and ties it to a file on
// disk.
func Wrap(path string, cfg Configuration) *Wrapper {
	w := &Wrapper{
		cfg:  cfg,
		path: path,
		mut:  sync.NewMutex(),
	}
	return w
}
Example #15
0
func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *tls.Config, discoverer discover.Finder,
	bepProtocolName string, tlsDefaultCommonName string, lans []*net.IPNet) *Service {

	service := &Service{
		Supervisor: suture.New("connections.Service", suture.Spec{
			Log: func(line string) {
				l.Infoln(line)
			},
		}),
		cfg:                  cfg,
		myID:                 myID,
		model:                mdl,
		tlsCfg:               tlsCfg,
		discoverer:           discoverer,
		conns:                make(chan internalConn),
		bepProtocolName:      bepProtocolName,
		tlsDefaultCommonName: tlsDefaultCommonName,
		lans:                 lans,
		limiter:              newLimiter(cfg),
		natService:           nat.NewService(myID, cfg),

		listenersMut:   sync.NewRWMutex(),
		listeners:      make(map[string]genericListener),
		listenerTokens: make(map[string]suture.ServiceToken),

		// A listener can fail twice, rapidly. Any more than that and it
		// will be put on suspension for ten minutes. Restarts and changes
		// due to config are done by removing and adding services, so are
		// not subject to these limitations.
		listenerSupervisor: suture.New("c.S.listenerSupervisor", suture.Spec{
			Log: func(line string) {
				l.Infoln(line)
			},
			FailureThreshold: 2,
			FailureBackoff:   600 * time.Second,
		}),

		curConMut:         sync.NewMutex(),
		currentConnection: make(map[protocol.DeviceID]completeConn),
	}
	cfg.Subscribe(service)

	// There are several moving parts here; one routine per listening address
	// (handled in configuration changing) to handle incoming connections,
	// one routine to periodically attempt outgoing connections, one routine to
	// the the common handling regardless of whether the connection was
	// incoming or outgoing.

	service.Add(serviceFunc(service.connect))
	service.Add(serviceFunc(service.handle))
	service.Add(service.listenerSupervisor)

	raw := cfg.RawCopy()
	// Actually starts the listeners and NAT service
	service.CommitConfiguration(raw, raw)

	return service
}
Example #16
0
func setUpRwFolder(model *Model) rwFolder {
	return rwFolder{
		folder:    "default",
		dir:       "testdata",
		model:     model,
		errors:    make(map[string]string),
		errorsMut: sync.NewMutex(),
	}
}
Example #17
0
func NewBufferedSubscription(s *Subscription, size int) *BufferedSubscription {
	bs := &BufferedSubscription{
		sub: s,
		buf: make([]Event, size),
		mut: sync.NewMutex(),
	}
	bs.cond = stdsync.NewCond(bs.mut)
	go bs.pollingLoop()
	return bs
}
Example #18
0
// NewProcess returns a new Process talking to Syncthing at the specified address.
// Example: NewProcess("127.0.0.1:8082")
func NewProcess(addr string) *Process {
	p := &Process{
		addr:         addr,
		localVersion: make(map[string]map[string]int64),
		done:         make(map[string]bool),
		eventMut:     sync.NewMutex(),
	}
	p.startCompleteCond = stdsync.NewCond(p.eventMut)
	return p
}
Example #19
0
func TestHandleFileWithTemp(t *testing.T) {
	// After diff between required and existing we should:
	// Copy: 2, 5, 8
	// Pull: 1, 3, 4, 6, 7

	// After dropping out blocks already on the temp file we should:
	// Copy: 5, 8
	// Pull: 1, 6

	// Create existing file
	existingFile := protocol.FileInfo{
		Name:     "file",
		Flags:    0,
		Modified: 0,
		Blocks: []protocol.BlockInfo{
			blocks[0], blocks[2], blocks[0], blocks[0],
			blocks[5], blocks[0], blocks[0], blocks[8],
		},
	}

	// Create target file
	requiredFile := existingFile
	requiredFile.Blocks = blocks[1:]

	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
	m.AddFolder(defaultFolderConfig)
	// Update index
	m.updateLocals("default", []protocol.FileInfo{existingFile})

	p := rwFolder{
		folder:    "default",
		dir:       "testdata",
		model:     m,
		errors:    make(map[string]string),
		errorsMut: sync.NewMutex(),
	}

	copyChan := make(chan copyBlocksState, 1)

	p.handleFile(requiredFile, copyChan, nil)

	// Receive the results
	toCopy := <-copyChan

	if len(toCopy.blocks) != 4 {
		t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
	}

	for i, eq := range []int{1, 5, 6, 8} {
		if string(toCopy.blocks[i].Hash) != string(blocks[eq].Hash) {
			t.Errorf("Block mismatch: %s != %s", toCopy.blocks[i].String(), blocks[eq].String())
		}
	}
}
Example #20
0
func newAPIService(id protocol.DeviceID, cfg configIntf, httpsCertFile, httpsKeyFile, assetDir string, m modelIntf, eventSub events.BufferedSubscription, discoverer discover.CachingMux, relayService relay.Service, errors, systemLog logger.Recorder) (*apiService, error) {
	service := &apiService{
		id:              id,
		cfg:             cfg,
		httpsCertFile:   httpsCertFile,
		httpsKeyFile:    httpsKeyFile,
		assetDir:        assetDir,
		model:           m,
		eventSub:        eventSub,
		discoverer:      discoverer,
		relayService:    relayService,
		systemConfigMut: sync.NewMutex(),
		stop:            make(chan struct{}),
		configChanged:   make(chan struct{}),
		listenerMut:     sync.NewMutex(),
		guiErrors:       errors,
		systemLog:       systemLog,
	}

	seen := make(map[string]struct{})
	// Load themes from compiled in assets.
	for file := range auto.Assets() {
		theme := strings.Split(file, "/")[0]
		if _, ok := seen[theme]; !ok {
			seen[theme] = struct{}{}
			service.themes = append(service.themes, theme)
		}
	}
	if assetDir != "" {
		// Load any extra themes from the asset override dir.
		for _, dir := range dirNames(assetDir) {
			if _, ok := seen[dir]; !ok {
				seen[dir] = struct{}{}
				service.themes = append(service.themes, dir)
			}
		}
	}

	var err error
	service.listener, err = service.getListener(cfg.GUI())
	return service, err
}
func newSmallIndex(db *Instance, prefix []byte) *smallIndex {
	idx := &smallIndex{
		db:     db,
		prefix: prefix,
		id2val: make(map[uint32]string),
		val2id: make(map[string]uint32),
		mut:    sync.NewMutex(),
	}
	idx.load()
	return idx
}
Example #22
0
func New(withCache bool) *Matcher {
	m := &Matcher{
		withCache: withCache,
		stop:      make(chan struct{}),
		mut:       sync.NewMutex(),
	}
	if withCache {
		go m.clean(2 * time.Hour)
	}
	return m
}
Example #23
0
func newRWFolder(model *Model, cfg config.FolderConfiguration, ver versioner.Versioner) service {
	f := &rwFolder{
		folder: folder{
			stateTracker: stateTracker{
				folderID: cfg.ID,
				mut:      sync.NewMutex(),
			},
			scan: folderscan{
				interval: time.Duration(cfg.RescanIntervalS) * time.Second,
				timer:    time.NewTimer(time.Millisecond), // The first scan should be done immediately.
				now:      make(chan rescanRequest),
				delay:    make(chan time.Duration),
			},
			stop:  make(chan struct{}),
			model: model,
		},

		virtualMtimeRepo: db.NewVirtualMtimeRepo(model.db, cfg.ID),
		dir:              cfg.Path(),
		ignorePerms:      cfg.IgnorePerms,
		copiers:          cfg.Copiers,
		pullers:          cfg.Pullers,
		order:            cfg.Order,
		maxConflicts:     cfg.MaxConflicts,
		allowSparse:      !cfg.DisableSparseFiles,
		checkFreeSpace:   cfg.MinDiskFreePct != 0,
		versioner:        ver,

		queue:       newJobQueue(),
		pullTimer:   time.NewTimer(time.Second),
		remoteIndex: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a notification if we're busy doing a pull when it comes.

		errorsMut: sync.NewMutex(),

		initialScanCompleted: make(chan struct{}),
	}

	f.configureCopiersAndPullers(cfg)

	return f
}
Example #24
0
func setUpRwFolder(model *Model) rwFolder {
	return rwFolder{
		folder: folder{
			stateTracker: newStateTracker("default"),
			model:        model,
		},
		dir:       "testdata",
		queue:     newJobQueue(),
		errors:    make(map[string]string),
		errorsMut: sync.NewMutex(),
	}
}
Example #25
0
func newAutoclosedFile(name string, closeDelay, maxOpenTime time.Duration) *autoclosedFile {
	f := &autoclosedFile{
		name:        name,
		closeDelay:  closeDelay,
		maxOpenTime: maxOpenTime,
		mut:         sync.NewMutex(),
		closed:      make(chan struct{}),
		closeTimer:  time.NewTimer(time.Minute),
	}
	go f.closerLoop()
	return f
}
Example #26
0
func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *tls.Config, discoverer discover.Finder,
	bepProtocolName string, tlsDefaultCommonName string, lans []*net.IPNet) *Service {

	service := &Service{
		Supervisor:           suture.NewSimple("connections.Service"),
		cfg:                  cfg,
		myID:                 myID,
		model:                mdl,
		tlsCfg:               tlsCfg,
		discoverer:           discoverer,
		conns:                make(chan IntermediateConnection),
		bepProtocolName:      bepProtocolName,
		tlsDefaultCommonName: tlsDefaultCommonName,
		lans:                 lans,
		natService:           nat.NewService(myID, cfg),

		listenersMut:   sync.NewRWMutex(),
		listeners:      make(map[string]genericListener),
		listenerTokens: make(map[string]suture.ServiceToken),

		curConMut:         sync.NewMutex(),
		currentConnection: make(map[protocol.DeviceID]Connection),
	}
	cfg.Subscribe(service)

	// The rate variables are in KiB/s in the UI (despite the camel casing
	// of the name). We multiply by 1024 here to get B/s.
	options := service.cfg.Options()
	if options.MaxSendKbps > 0 {
		service.writeRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxSendKbps), int64(5*1024*options.MaxSendKbps))
	}

	if options.MaxRecvKbps > 0 {
		service.readRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxRecvKbps), int64(5*1024*options.MaxRecvKbps))
	}

	// There are several moving parts here; one routine per listening address
	// (handled in configuration changing) to handle incoming connections,
	// one routine to periodically attempt outgoing connections, one routine to
	// the the common handling regardless of whether the connection was
	// incoming or outgoing.

	service.Add(serviceFunc(service.connect))
	service.Add(serviceFunc(service.handle))

	raw := cfg.Raw()
	// Actually starts the listeners and NAT service
	service.CommitConfiguration(raw, raw)

	return service
}
Example #27
0
func TestHandleFile(t *testing.T) {
	// After the diff between required and existing we should:
	// Copy: 2, 5, 8
	// Pull: 1, 3, 4, 6, 7

	// Create existing file
	existingFile := protocol.FileInfo{
		Name:     "filex",
		Flags:    0,
		Modified: 0,
		Blocks: []protocol.BlockInfo{
			blocks[0], blocks[2], blocks[0], blocks[0],
			blocks[5], blocks[0], blocks[0], blocks[8],
		},
	}

	// Create target file
	requiredFile := existingFile
	requiredFile.Blocks = blocks[1:]

	db := db.OpenMemory()
	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
	m.AddFolder(defaultFolderConfig)
	// Update index
	m.updateLocals("default", []protocol.FileInfo{existingFile})

	p := rwFolder{
		folder:    "default",
		dir:       "testdata",
		model:     m,
		errors:    make(map[string]string),
		errorsMut: sync.NewMutex(),
	}

	copyChan := make(chan copyBlocksState, 1)

	p.handleFile(requiredFile, copyChan, nil)

	// Receive the results
	toCopy := <-copyChan

	if len(toCopy.blocks) != 8 {
		t.Errorf("Unexpected count of copy blocks: %d != 8", len(toCopy.blocks))
	}

	for i, block := range toCopy.blocks {
		if string(block.Hash) != string(blocks[i+1].Hash) {
			t.Errorf("Block mismatch: %s != %s", block.String(), blocks[i+1].String())
		}
	}
}
Example #28
0
func NewStaggered(folderID, folderPath string, params map[string]string) Versioner {
	maxAge, err := strconv.ParseInt(params["maxAge"], 10, 0)
	if err != nil {
		maxAge = 31536000 // Default: ~1 year
	}
	cleanInterval, err := strconv.ParseInt(params["cleanInterval"], 10, 0)
	if err != nil {
		cleanInterval = 3600 // Default: clean once per hour
	}

	// Use custom path if set, otherwise .stversions in folderPath
	var versionsDir string
	if params["versionsPath"] == "" {
		if debug {
			l.Debugln("using default dir .stversions")
		}
		versionsDir = filepath.Join(folderPath, ".stversions")
	} else {
		if debug {
			l.Debugln("using dir", params["versionsPath"])
		}
		versionsDir = params["versionsPath"]
	}

	s := Staggered{
		versionsPath:  versionsDir,
		cleanInterval: cleanInterval,
		folderPath:    folderPath,
		interval: [4]Interval{
			{30, 3600},       // first hour -> 30 sec between versions
			{3600, 86400},    // next day -> 1 h between versions
			{86400, 592000},  // next 30 days -> 1 day between versions
			{604800, maxAge}, // next year -> 1 week between versions
		},
		mutex: sync.NewMutex(),
	}

	if debug {
		l.Debugf("instantiated %#v", s)
	}

	go func() {
		s.clean()
		for _ = range time.Tick(time.Duration(cleanInterval) * time.Second) {
			s.clean()
		}
	}()

	return s
}
Example #29
0
func setUpSendReceiveFolder(model *Model) sendReceiveFolder {
	return sendReceiveFolder{
		folder: folder{
			stateTracker: newStateTracker("default"),
			model:        model,
		},

		mtimeFS:   fs.NewMtimeFS(db.NewNamespacedKV(model.db, "mtime")),
		dir:       "testdata",
		queue:     newJobQueue(),
		errors:    make(map[string]string),
		errorsMut: sync.NewMutex(),
	}
}
Example #30
0
func newAPISvc(id protocol.DeviceID, cfg config.GUIConfiguration, assetDir string, m *model.Model, eventSub *events.BufferedSubscription) (*apiSvc, error) {
	svc := &apiSvc{
		id:              id,
		cfg:             cfg,
		assetDir:        assetDir,
		model:           m,
		systemConfigMut: sync.NewMutex(),
		eventSub:        eventSub,
	}

	var err error
	svc.listener, err = svc.getListener(cfg)
	return svc, err
}