示例#1
0
func newRWFolder(m *Model, shortID uint64, cfg config.FolderConfiguration) *rwFolder {
	return &rwFolder{
		stateTracker: stateTracker{
			folder: cfg.ID,
			mut:    sync.NewMutex(),
		},

		model:            m,
		progressEmitter:  m.progressEmitter,
		virtualMtimeRepo: db.NewVirtualMtimeRepo(m.db, cfg.ID),

		folder:      cfg.ID,
		dir:         cfg.Path(),
		scanIntv:    time.Duration(cfg.RescanIntervalS) * time.Second,
		ignorePerms: cfg.IgnorePerms,
		copiers:     cfg.Copiers,
		pullers:     cfg.Pullers,
		shortID:     shortID,
		order:       cfg.Order,

		encrypt: cfg.Encrypt,
		key:     cfg.Passphrase,

		stop:        make(chan struct{}),
		queue:       newJobQueue(),
		pullTimer:   time.NewTimer(shortPullIntv),
		scanTimer:   time.NewTimer(time.Millisecond), // The first scan should be done immediately.
		delayScan:   make(chan time.Duration),
		scanNow:     make(chan rescanRequest),
		remoteIndex: make(chan struct{}, 1), // This needs to be 1-buffered so that we queue a notification if we're busy doing a pull when it comes.

		errorsMut: sync.NewMutex(),
	}
}
示例#2
0
// Wrap wraps an existing Configuration structure and ties it to a file on
// disk.
func Wrap(path string, cfg Configuration) *Wrapper {
	w := &Wrapper{
		cfg:  cfg,
		path: path,
		mut:  sync.NewMutex(),
		sMut: sync.NewMutex(),
	}
	w.replaces = make(chan Configuration)
	return w
}
示例#3
0
func NewFileSet(folder string, db *leveldb.DB) *FileSet {
	var s = FileSet{
		localVersion: make(map[protocol.DeviceID]int64),
		folder:       folder,
		db:           db,
		blockmap:     NewBlockMap(db, folder),
		mutex:        sync.NewMutex(),
	}

	ldbCheckGlobals(db, []byte(folder))

	var deviceID protocol.DeviceID
	ldbWithAllFolderTruncated(db, []byte(folder), func(device []byte, f FileInfoTruncated) bool {
		copy(deviceID[:], device)
		if f.LocalVersion > s.localVersion[deviceID] {
			s.localVersion[deviceID] = f.LocalVersion
		}
		return true
	})
	if debug {
		l.Debugf("loaded localVersion for %q: %#v", folder, s.localVersion)
	}
	clock(s.localVersion[protocol.LocalDeviceID])

	return &s
}
示例#4
0
// NewProcess returns a new Process talking to Syncthing at the specified address.
// Example: NewProcess("127.0.0.1:8082")
func NewProcess(addr string) *Process {
	p := &Process{
		addr:         addr,
		localVersion: make(map[string]map[string]int64),
		done:         make(map[string]bool),
		eventMut:     sync.NewMutex(),
	}
	p.startCompleteCond = stdsync.NewCond(p.eventMut)
	return p
}
示例#5
0
func NewBufferedSubscription(s *Subscription, size int) *BufferedSubscription {
	bs := &BufferedSubscription{
		sub: s,
		buf: make([]Event, size),
		mut: sync.NewMutex(),
	}
	bs.cond = stdsync.NewCond(bs.mut)
	go bs.pollingLoop()
	return bs
}
示例#6
0
func TestHandleFileWithTemp(t *testing.T) {
	// After diff between required and existing we should:
	// Copy: 2, 5, 8
	// Pull: 1, 3, 4, 6, 7

	// After dropping out blocks already on the temp file we should:
	// Copy: 5, 8
	// Pull: 1, 6

	// Create existing file
	existingFile := protocol.FileInfo{
		Name:     "file",
		Flags:    0,
		Modified: 0,
		Blocks: []protocol.BlockInfo{
			blocks[0], blocks[2], blocks[0], blocks[0],
			blocks[5], blocks[0], blocks[0], blocks[8],
		},
	}

	// Create target file
	requiredFile := existingFile
	requiredFile.Blocks = blocks[1:]

	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
	m.AddFolder(defaultFolderConfig)
	// Update index
	m.updateLocals("default", []protocol.FileInfo{existingFile})

	p := rwFolder{
		folder:    "default",
		dir:       "testdata",
		model:     m,
		errors:    make(map[string]string),
		errorsMut: sync.NewMutex(),
	}

	copyChan := make(chan copyBlocksState, 1)

	p.handleFile(requiredFile, copyChan, nil)

	// Receive the results
	toCopy := <-copyChan

	if len(toCopy.blocks) != 4 {
		t.Errorf("Unexpected count of copy blocks: %d != 4", len(toCopy.blocks))
	}

	for i, eq := range []int{1, 5, 6, 8} {
		if string(toCopy.blocks[i].Hash) != string(blocks[eq].Hash) {
			t.Errorf("Block mismatch: %s != %s", toCopy.blocks[i].String(), blocks[eq].String())
		}
	}
}
示例#7
0
func New(withCache bool) *Matcher {
	m := &Matcher{
		withCache: withCache,
		stop:      make(chan struct{}),
		mut:       sync.NewMutex(),
	}
	if withCache {
		go m.clean(2 * time.Hour)
	}
	return m
}
示例#8
0
func NewStaggered(folderID, folderPath string, params map[string]string) Versioner {
	maxAge, err := strconv.ParseInt(params["maxAge"], 10, 0)
	if err != nil {
		maxAge = 31536000 // Default: ~1 year
	}
	cleanInterval, err := strconv.ParseInt(params["cleanInterval"], 10, 0)
	if err != nil {
		cleanInterval = 3600 // Default: clean once per hour
	}

	// Use custom path if set, otherwise .stversions in folderPath
	var versionsDir string
	if params["versionsPath"] == "" {
		if debug {
			l.Debugln("using default dir .stversions")
		}
		versionsDir = filepath.Join(folderPath, ".stversions")
	} else {
		if debug {
			l.Debugln("using dir", params["versionsPath"])
		}
		versionsDir = params["versionsPath"]
	}

	s := Staggered{
		versionsPath:  versionsDir,
		cleanInterval: cleanInterval,
		folderPath:    folderPath,
		interval: [4]Interval{
			{30, 3600},       // first hour -> 30 sec between versions
			{3600, 86400},    // next day -> 1 h between versions
			{86400, 592000},  // next 30 days -> 1 day between versions
			{604800, maxAge}, // next year -> 1 week between versions
		},
		mutex: sync.NewMutex(),
	}

	if debug {
		l.Debugf("instantiated %#v", s)
	}

	go func() {
		s.clean()
		for _ = range time.Tick(time.Duration(cleanInterval) * time.Second) {
			s.clean()
		}
	}()

	return s
}
示例#9
0
// NewProgressEmitter creates a new progress emitter which emits
// DownloadProgress events every interval.
func NewProgressEmitter(cfg *config.Wrapper) *ProgressEmitter {
	t := &ProgressEmitter{
		stop:     make(chan struct{}),
		registry: make(map[string]*sharedPullerState),
		last:     make(map[string]map[string]*pullerProgress),
		timer:    time.NewTimer(time.Millisecond),
		mut:      sync.NewMutex(),
	}

	t.CommitConfiguration(config.Configuration{}, cfg.Raw())
	cfg.Subscribe(t)

	return t
}
示例#10
0
func newAPISvc(id protocol.DeviceID, cfg config.GUIConfiguration, assetDir string, m *model.Model, eventSub *events.BufferedSubscription) (*apiSvc, error) {
	svc := &apiSvc{
		id:              id,
		cfg:             cfg,
		assetDir:        assetDir,
		model:           m,
		systemConfigMut: sync.NewMutex(),
		eventSub:        eventSub,
	}

	var err error
	svc.listener, err = svc.getListener(cfg)
	return svc, err
}
示例#11
0
func newROFolder(model *Model, folder string, interval time.Duration) *roFolder {
	return &roFolder{
		stateTracker: stateTracker{
			folder: folder,
			mut:    sync.NewMutex(),
		},
		folder:    folder,
		intv:      interval,
		timer:     time.NewTimer(time.Millisecond),
		model:     model,
		stop:      make(chan struct{}),
		scanNow:   make(chan rescanRequest),
		delayScan: make(chan time.Duration),
	}
}
示例#12
0
func TestProgressEmitter(t *testing.T) {
	w := events.Default.Subscribe(events.DownloadProgress)

	c := config.Wrap("/tmp/test", config.Configuration{})
	c.SetOptions(config.OptionsConfiguration{
		ProgressUpdateIntervalS: 0,
	})

	p := NewProgressEmitter(c)
	go p.Serve()

	expectTimeout(w, t)

	s := sharedPullerState{
		mut: sync.NewMutex(),
	}
	p.Register(&s)

	expectEvent(w, t, 1)
	expectTimeout(w, t)

	s.copyDone()

	expectEvent(w, t, 1)
	expectTimeout(w, t)

	s.copiedFromOrigin()

	expectEvent(w, t, 1)
	expectTimeout(w, t)

	s.pullStarted()

	expectEvent(w, t, 1)
	expectTimeout(w, t)

	s.pullDone()

	expectEvent(w, t, 1)
	expectTimeout(w, t)

	p.Deregister(&s)

	expectEvent(w, t, 0)
	expectTimeout(w, t)

}
示例#13
0
func (m *Model) ScanFolders() map[string]error {
	m.fmut.RLock()
	folders := make([]string, 0, len(m.folderCfgs))
	for folder := range m.folderCfgs {
		folders = append(folders, folder)
	}
	m.fmut.RUnlock()

	errors := make(map[string]error, len(m.folderCfgs))
	errorsMut := sync.NewMutex()

	wg := sync.NewWaitGroup()
	wg.Add(len(folders))
	for _, folder := range folders {
		folder := folder
		go func() {
			err := m.ScanFolder(folder)
			if err != nil {
				errorsMut.Lock()
				errors[folder] = err
				errorsMut.Unlock()

				// Potentially sets the error twice, once in the scanner just
				// by doing a check, and once here, if the error returned is
				// the same one as returned by CheckFolderHealth, though
				// duplicate set is handled by setError.
				m.fmut.RLock()
				srv := m.folderRunners[folder]
				m.fmut.RUnlock()
				srv.setError(err)
			}
			wg.Done()
		}()
	}
	wg.Wait()
	return errors
}
示例#14
0
func newDeviceActivity() *deviceActivity {
	return &deviceActivity{
		act: make(map[protocol.DeviceID]int),
		mut: sync.NewMutex(),
	}
}
示例#15
0
func TestCopierFinder(t *testing.T) {
	// After diff between required and existing we should:
	// Copy: 1, 2, 3, 4, 6, 7, 8
	// Since there is no existing file, nor a temp file

	// After dropping out blocks found locally:
	// Pull: 1, 5, 6, 8

	tempFile := filepath.Join("testdata", defTempNamer.TempName("file2"))
	err := os.Remove(tempFile)
	if err != nil && !os.IsNotExist(err) {
		t.Error(err)
	}

	// Create existing file
	existingFile := protocol.FileInfo{
		Name:     defTempNamer.TempName("file"),
		Flags:    0,
		Modified: 0,
		Blocks: []protocol.BlockInfo{
			blocks[0], blocks[2], blocks[3], blocks[4],
			blocks[0], blocks[0], blocks[7], blocks[0],
		},
	}

	// Create target file
	requiredFile := existingFile
	requiredFile.Blocks = blocks[1:]
	requiredFile.Name = "file2"

	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
	m.AddFolder(defaultFolderConfig)
	// Update index
	m.updateLocals("default", []protocol.FileInfo{existingFile})

	iterFn := func(folder, file string, index int32) bool {
		return true
	}

	// Verify that the blocks we say exist on file, really exist in the db.
	for _, idx := range []int{2, 3, 4, 7} {
		if m.finder.Iterate(blocks[idx].Hash, iterFn) == false {
			t.Error("Didn't find block")
		}
	}

	p := rwFolder{
		folder:    "default",
		dir:       "testdata",
		model:     m,
		errors:    make(map[string]string),
		errorsMut: sync.NewMutex(),
	}

	copyChan := make(chan copyBlocksState)
	pullChan := make(chan pullBlockState, 4)
	finisherChan := make(chan *sharedPullerState, 1)

	// Run a single fetcher routine
	go p.copierRoutine(copyChan, pullChan, finisherChan)

	p.handleFile(requiredFile, copyChan, finisherChan)

	pulls := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan}
	finish := <-finisherChan

	select {
	case <-pullChan:
		t.Fatal("Finisher channel has data to be read")
	case <-finisherChan:
		t.Fatal("Finisher channel has data to be read")
	default:
	}

	// Verify that the right blocks went into the pull list
	for i, eq := range []int{1, 5, 6, 8} {
		if string(pulls[i].block.Hash) != string(blocks[eq].Hash) {
			t.Errorf("Block %d mismatch: %s != %s", eq, pulls[i].block.String(), blocks[eq].String())
		}
		if string(finish.file.Blocks[eq-1].Hash) != string(blocks[eq].Hash) {
			t.Errorf("Block %d mismatch: %s != %s", eq, finish.file.Blocks[eq-1].String(), blocks[eq].String())
		}
	}

	// Verify that the fetched blocks have actually been written to the temp file
	blks, err := scanner.HashFile(tempFile, protocol.BlockSize)
	if err != nil {
		t.Log(err)
	}

	for _, eq := range []int{2, 3, 4, 7} {
		if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
			t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
		}
	}
	finish.fd.Close()

	os.Remove(tempFile)
}
示例#16
0
// handleFile queues the copies and pulls as necessary for a single new or
// changed file.
func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
	curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name)

	if ok && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
		// We are supposed to copy the entire file, and then fetch nothing. We
		// are only updating metadata, so we don't actually *need* to make the
		// copy.
		if debug {
			l.Debugln(p, "taking shortcut on", file.Name)
		}

		events.Default.Log(events.ItemStarted, map[string]string{
			"folder": p.folder,
			"item":   file.Name,
			"type":   "file",
			"action": "metadata",
		})

		p.queue.Done(file.Name)

		var err error
		if file.IsSymlink() {
			err = p.shortcutSymlink(file)
		} else {
			err = p.shortcutFile(file)
		}

		events.Default.Log(events.ItemFinished, map[string]interface{}{
			"folder": p.folder,
			"item":   file.Name,
			"error":  events.Error(err),
			"type":   "file",
			"action": "metadata",
		})

		if err != nil {
			l.Infoln("Puller: shortcut:", err)
			p.newError(file.Name, err)
		} else {
			p.dbUpdates <- dbUpdateJob{file, dbUpdateShortcutFile}
		}

		return
	}

	events.Default.Log(events.ItemStarted, map[string]string{
		"folder": p.folder,
		"item":   file.Name,
		"type":   "file",
		"action": "update",
	})

	scanner.PopulateOffsets(file.Blocks)

	// Figure out the absolute filenames we need once and for all
	tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name))
	realName := filepath.Join(p.dir, file.Name)

	reused := 0
	var blocks []protocol.BlockInfo

	// Check for an old temporary file which might have some blocks we could
	// reuse.
	tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize)
	if err == nil {
		// Check for any reusable blocks in the temp file
		tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)

		// block.String() returns a string unique to the block
		existingBlocks := make(map[string]struct{}, len(tempCopyBlocks))
		for _, block := range tempCopyBlocks {
			existingBlocks[block.String()] = struct{}{}
		}

		// Since the blocks are already there, we don't need to get them.
		for _, block := range file.Blocks {
			_, ok := existingBlocks[block.String()]
			if !ok {
				blocks = append(blocks, block)
			}
		}

		// The sharedpullerstate will know which flags to use when opening the
		// temp file depending if we are reusing any blocks or not.
		reused = len(file.Blocks) - len(blocks)
		if reused == 0 {
			// Otherwise, discard the file ourselves in order for the
			// sharedpuller not to panic when it fails to exclusively create a
			// file which already exists
			os.Remove(tempName)
		}
	} else {
		blocks = file.Blocks
	}

	s := sharedPullerState{
		file:        file,
		folder:      p.folder,
		tempName:    tempName,
		realName:    realName,
		copyTotal:   len(blocks),
		copyNeeded:  len(blocks),
		reused:      reused,
		ignorePerms: p.ignorePermissions(file),
		version:     curFile.Version,
		mut:         sync.NewMutex(),
	}

	if debug {
		l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused)
	}

	cs := copyBlocksState{
		sharedPullerState: &s,
		blocks:            blocks,
	}
	copyChan <- cs
}
示例#17
0
func TestDeregisterOnFailInPull(t *testing.T) {
	file := protocol.FileInfo{
		Name:     "filex",
		Flags:    0,
		Modified: 0,
		Blocks: []protocol.BlockInfo{
			blocks[0], blocks[2], blocks[0], blocks[0],
			blocks[5], blocks[0], blocks[0], blocks[8],
		},
	}
	defer os.Remove("testdata/" + defTempNamer.TempName("filex"))

	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
	m.AddFolder(defaultFolderConfig)

	emitter := NewProgressEmitter(defaultConfig)
	go emitter.Serve()

	p := rwFolder{
		folder:          "default",
		dir:             "testdata",
		model:           m,
		queue:           newJobQueue(),
		progressEmitter: emitter,
		errors:          make(map[string]string),
		errorsMut:       sync.NewMutex(),
	}

	// queue.Done should be called by the finisher routine
	p.queue.Push("filex", 0, 0)
	p.queue.Pop()

	if len(p.queue.progress) != 1 {
		t.Fatal("Expected file in progress")
	}

	copyChan := make(chan copyBlocksState)
	pullChan := make(chan pullBlockState)
	finisherBufferChan := make(chan *sharedPullerState)
	finisherChan := make(chan *sharedPullerState)

	go p.copierRoutine(copyChan, pullChan, finisherBufferChan)
	go p.pullerRoutine(pullChan, finisherBufferChan)
	go p.finisherRoutine(finisherChan)

	p.handleFile(file, copyChan, finisherChan)

	// Receove at finisher, we shoud error out as puller has nowhere to pull
	// from.
	select {
	case state := <-finisherBufferChan:
		// At this point the file should still be registered with both the job
		// queue, and the progress emitter. Verify this.
		if len(p.progressEmitter.registry) != 1 || len(p.queue.progress) != 1 || len(p.queue.queued) != 0 {
			t.Fatal("Could not find file")
		}

		// Pass the file down the real finisher, and give it time to consume
		finisherChan <- state
		time.Sleep(100 * time.Millisecond)

		if state.fd != nil {
			t.Fatal("File not closed?")
		}

		if len(p.progressEmitter.registry) != 0 || len(p.queue.progress) != 0 || len(p.queue.queued) != 0 {
			t.Fatal("Still registered", len(p.progressEmitter.registry), len(p.queue.progress), len(p.queue.queued))
		}

		// Doing it again should have no effect
		finisherChan <- state
		time.Sleep(100 * time.Millisecond)

		if len(p.progressEmitter.registry) != 0 || len(p.queue.progress) != 0 || len(p.queue.queued) != 0 {
			t.Fatal("Still registered")
		}
	case <-time.After(time.Second):
		t.Fatal("Didn't get anything to the finisher")
	}
}
示例#18
0
func NewLogger() *Logger {
	return &Logger{
		subs:  make(map[int]*Subscription),
		mutex: sync.NewMutex(),
	}
}
示例#19
0
	"bytes"
	"fmt"
	"runtime"
	"sort"

	"github.com/syncthing/protocol"
	"github.com/syncthing/syncthing/internal/sync"
	"github.com/syndtr/goleveldb/leveldb"
	"github.com/syndtr/goleveldb/leveldb/iterator"
	"github.com/syndtr/goleveldb/leveldb/opt"
	"github.com/syndtr/goleveldb/leveldb/util"
)

var (
	clockTick int64
	clockMut  = sync.NewMutex()
)

func clock(v int64) int64 {
	clockMut.Lock()
	defer clockMut.Unlock()
	if v > clockTick {
		clockTick = v + 1
	} else {
		clockTick++
	}
	return clockTick
}

const (
	KeyTypeDevice = iota
示例#20
0
func newJobQueue() *jobQueue {
	return &jobQueue{
		mut: sync.NewMutex(),
	}
}
示例#21
0
	"errors"
	"fmt"
	"io"
	"os"
	"path/filepath"
	"runtime"
	"strings"

	"github.com/syncthing/syncthing/internal/sync"
)

var ErrNoHome = errors.New("No home directory found - set $HOME (or the platform equivalent).")

// Try to keep this entire operation atomic-like. We shouldn't be doing this
// often enough that there is any contention on this lock.
var renameLock = sync.NewMutex()

// TryRename renames a file, leaving source file intact in case of failure.
// Tries hard to succeed on various systems by temporarily tweaking directory
// permissions and removing the destination file when necessary.
func TryRename(from, to string) error {
	renameLock.Lock()
	defer renameLock.Unlock()

	return withPreparedTarget(from, to, func() error {
		return os.Rename(from, to)
	})
}

// Rename moves a temporary file to it's final place.
// Will make sure to delete the from file if the operation fails, so use only
示例#22
0
	"github.com/syncthing/syncthing/internal/osutil"
	"github.com/syncthing/syncthing/internal/sync"
	"github.com/syncthing/syncthing/internal/upgrade"
	"github.com/vitrun/qart/qr"
	"golang.org/x/crypto/bcrypt"
)

type guiError struct {
	Time  time.Time `json:"time"`
	Error string    `json:"error"`
}

var (
	configInSync = true
	guiErrors    = []guiError{}
	guiErrorsMut = sync.NewMutex()
	startTime    = time.Now()
)

type apiSvc struct {
	id              protocol.DeviceID
	cfg             config.GUIConfiguration
	assetDir        string
	model           *model.Model
	listener        net.Listener
	fss             *folderSummarySvc
	stop            chan struct{}
	systemConfigMut sync.Mutex
	eventSub        *events.BufferedSubscription
}
示例#23
0
import (
	"bytes"
	"encoding/base64"
	"math/rand"
	"net/http"
	"strings"
	"time"

	"github.com/syncthing/syncthing/internal/config"
	"github.com/syncthing/syncthing/internal/sync"
	"golang.org/x/crypto/bcrypt"
)

var (
	sessions    = make(map[string]bool)
	sessionsMut = sync.NewMutex()
)

func basicAuthAndSessionMiddleware(cookieName string, cfg config.GUIConfiguration, next http.Handler) http.Handler {

	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		if cfg.APIKey != "" && r.Header.Get("X-API-Key") == cfg.APIKey {
			next.ServeHTTP(w, r)
			return
		}

		cookie, err := r.Cookie(cookieName)
		if err == nil && cookie != nil {
			sessionsMut.Lock()
			_, ok := sessions[cookie.Value]
			sessionsMut.Unlock()
示例#24
0
// Make sure that the copier routine hashes the content when asked, and pulls
// if it fails to find the block.
func TestLastResortPulling(t *testing.T) {
	db, _ := leveldb.Open(storage.NewMemStorage(), nil)
	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
	m.AddFolder(defaultFolderConfig)

	// Add a file to index (with the incorrect block representation, as content
	// doesn't actually match the block list)
	file := protocol.FileInfo{
		Name:     "empty",
		Flags:    0,
		Modified: 0,
		Blocks:   []protocol.BlockInfo{blocks[0]},
	}
	m.updateLocals("default", []protocol.FileInfo{file})

	// Pretend that we are handling a new file of the same content but
	// with a different name (causing to copy that particular block)
	file.Name = "newfile"

	iterFn := func(folder, file string, index int32) bool {
		return true
	}

	// Check that that particular block is there
	if !m.finder.Iterate(blocks[0].Hash, iterFn) {
		t.Error("Expected block not found")
	}

	p := rwFolder{
		folder:    "default",
		dir:       "testdata",
		model:     m,
		errors:    make(map[string]string),
		errorsMut: sync.NewMutex(),
	}

	copyChan := make(chan copyBlocksState)
	pullChan := make(chan pullBlockState, 1)
	finisherChan := make(chan *sharedPullerState, 1)

	// Run a single copier routine
	go p.copierRoutine(copyChan, pullChan, finisherChan)

	p.handleFile(file, copyChan, finisherChan)

	// Copier should hash empty file, realise that the region it has read
	// doesn't match the hash which was advertised by the block map, fix it
	// and ask to pull the block.
	<-pullChan

	// Verify that it did fix the incorrect hash.
	if m.finder.Iterate(blocks[0].Hash, iterFn) {
		t.Error("Found unexpected block")
	}

	if !m.finder.Iterate(scanner.SHA256OfNothing, iterFn) {
		t.Error("Expected block not found")
	}

	(<-finisherChan).fd.Close()
	os.Remove(filepath.Join("testdata", defTempNamer.TempName("newfile")))
}
示例#25
0
	"os"
	"os/exec"
	"os/signal"
	"runtime"
	"strings"
	"syscall"
	"time"

	"github.com/syncthing/syncthing/internal/osutil"
	"github.com/syncthing/syncthing/internal/sync"
)

var (
	stdoutFirstLines []string // The first 10 lines of stdout
	stdoutLastLines  []string // The last 50 lines of stdout
	stdoutMut        = sync.NewMutex()
)

const (
	countRestarts = 4
	loopThreshold = 60 * time.Second
)

func monitorMain() {
	os.Setenv("STNORESTART", "yes")
	os.Setenv("STMONITORED", "yes")
	l.SetPrefix("[monitor] ")

	var err error
	var dst io.Writer = os.Stdout
示例#26
0
package main

import (
	"bufio"
	"fmt"
	"net/http"
	"os"
	"strings"

	"github.com/syncthing/syncthing/internal/osutil"
	"github.com/syncthing/syncthing/internal/sync"
)

var csrfTokens []string
var csrfMut = sync.NewMutex()

// Check for CSRF token on /rest/ URLs. If a correct one is not given, reject
// the request with 403. For / and /index.html, set a new CSRF cookie if none
// is currently set.
func csrfMiddleware(unique, prefix, apiKey string, next http.Handler) http.Handler {
	loadCsrfTokens()
	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		// Allow requests carrying a valid API key
		if apiKey != "" && r.Header.Get("X-API-Key") == apiKey {
			next.ServeHTTP(w, r)
			return
		}

		// Allow requests for the front page, and set a CSRF cookie if there isn't already a valid one.
		if !strings.HasPrefix(r.URL.Path, prefix) {