Esempio n. 1
0
func TestMain(m *testing.M) {
	// We do this to make sure that the temp file required for the tests
	// does not get removed during the tests. Also set the prefix so it's
	// found correctly regardless of platform.
	if ignore.TempPrefix != ignore.WindowsTempPrefix {
		originalPrefix := ignore.TempPrefix
		ignore.TempPrefix = ignore.WindowsTempPrefix
		defer func() {
			ignore.TempPrefix = originalPrefix
		}()
	}
	future := time.Now().Add(time.Hour)
	err := os.Chtimes(filepath.Join("testdata", ignore.TempName("file")), future, future)
	if err != nil {
		panic(err)
	}
	os.Exit(m.Run())
}
Esempio n. 2
0
// Make sure that the copier routine hashes the content when asked, and pulls
// if it fails to find the block.
func TestLastResortPulling(t *testing.T) {
	// Add a file to index (with the incorrect block representation, as content
	// doesn't actually match the block list)
	file := setUpFile("empty", []int{0})
	m := setUpModel(file)

	// Pretend that we are handling a new file of the same content but
	// with a different name (causing to copy that particular block)
	file.Name = "newfile"

	iterFn := func(folder, file string, index int32) bool {
		return true
	}

	f := setUpSendReceiveFolder(m)

	copyChan := make(chan copyBlocksState)
	pullChan := make(chan pullBlockState, 1)
	finisherChan := make(chan *sharedPullerState, 1)

	// Run a single copier routine
	go f.copierRoutine(copyChan, pullChan, finisherChan)

	f.handleFile(file, copyChan, finisherChan)

	// Copier should hash empty file, realise that the region it has read
	// doesn't match the hash which was advertised by the block map, fix it
	// and ask to pull the block.
	<-pullChan

	// Verify that it did fix the incorrect hash.
	if m.finder.Iterate(folders, blocks[0].Hash, iterFn) {
		t.Error("Found unexpected block")
	}

	if !m.finder.Iterate(folders, scanner.SHA256OfNothing, iterFn) {
		t.Error("Expected block not found")
	}

	(<-finisherChan).fd.Close()
	os.Remove(filepath.Join("testdata", ignore.TempName("newfile")))
}
Esempio n. 3
0
func TestDeregisterOnFailInPull(t *testing.T) {
	file := setUpFile("filex", []int{0, 2, 0, 0, 5, 0, 0, 8})
	defer os.Remove("testdata/" + ignore.TempName("filex"))

	db := db.OpenMemory()
	m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db, nil)
	m.AddFolder(defaultFolderConfig)

	f := setUpSendReceiveFolder(m)

	// queue.Done should be called by the finisher routine
	f.queue.Push("filex", 0, time.Time{})
	f.queue.Pop()

	if f.queue.lenProgress() != 1 {
		t.Fatal("Expected file in progress")
	}

	copyChan := make(chan copyBlocksState)
	pullChan := make(chan pullBlockState)
	finisherBufferChan := make(chan *sharedPullerState)
	finisherChan := make(chan *sharedPullerState)

	go f.copierRoutine(copyChan, pullChan, finisherBufferChan)
	go f.pullerRoutine(pullChan, finisherBufferChan)
	go f.finisherRoutine(finisherChan)

	f.handleFile(file, copyChan, finisherChan)

	// Receive at finisher, we should error out as puller has nowhere to pull
	// from.
	select {
	case state := <-finisherBufferChan:
		// At this point the file should still be registered with both the job
		// queue, and the progress emitter. Verify this.
		if f.model.progressEmitter.lenRegistry() != 1 || f.queue.lenProgress() != 1 || f.queue.lenQueued() != 0 {
			t.Fatal("Could not find file")
		}

		// Pass the file down the real finisher, and give it time to consume
		finisherChan <- state
		time.Sleep(100 * time.Millisecond)

		state.mut.Lock()
		stateFd := state.fd
		state.mut.Unlock()
		if stateFd != nil {
			t.Fatal("File not closed?")
		}

		if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
			t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
		}

		// Doing it again should have no effect
		finisherChan <- state
		time.Sleep(100 * time.Millisecond)

		if f.model.progressEmitter.lenRegistry() != 0 || f.queue.lenProgress() != 0 || f.queue.lenQueued() != 0 {
			t.Fatal("Still registered", f.model.progressEmitter.lenRegistry(), f.queue.lenProgress(), f.queue.lenQueued())
		}
	case <-time.After(time.Second):
		t.Fatal("Didn't get anything to the finisher")
	}
}
Esempio n. 4
0
func TestWeakHash(t *testing.T) {
	tempFile := filepath.Join("testdata", ignore.TempName("weakhash"))
	var shift int64 = 10
	var size int64 = 1 << 20
	expectBlocks := int(size / protocol.BlockSize)
	expectPulls := int(shift / protocol.BlockSize)
	if shift > 0 {
		expectPulls++
	}

	cleanup := func() {
		for _, path := range []string{tempFile, "testdata/weakhash"} {
			os.Remove(path)
		}
	}

	cleanup()
	defer cleanup()

	f, err := os.Create("testdata/weakhash")
	if err != nil {
		t.Error(err)
	}
	defer f.Close()
	_, err = io.CopyN(f, rand.Reader, size)
	if err != nil {
		t.Error(err)
	}
	info, err := f.Stat()
	if err != nil {
		t.Error(err)
	}

	// Create two files, second file has `shifted` bytes random prefix, yet
	// both are of the same length, for example:
	// File 1: abcdefgh
	// File 2: xyabcdef
	f.Seek(0, os.SEEK_SET)
	existing, err := scanner.Blocks(f, protocol.BlockSize, size, nil)
	if err != nil {
		t.Error(err)
	}

	f.Seek(0, os.SEEK_SET)
	remainder := io.LimitReader(f, size-shift)
	prefix := io.LimitReader(rand.Reader, shift)
	nf := io.MultiReader(prefix, remainder)
	desired, err := scanner.Blocks(nf, protocol.BlockSize, size, nil)
	if err != nil {
		t.Error(err)
	}

	existingFile := protocol.FileInfo{
		Name:       "weakhash",
		Blocks:     existing,
		Size:       size,
		ModifiedS:  info.ModTime().Unix(),
		ModifiedNs: int32(info.ModTime().Nanosecond()),
	}
	desiredFile := protocol.FileInfo{
		Name:      "weakhash",
		Size:      size,
		Blocks:    desired,
		ModifiedS: info.ModTime().Unix() + 1,
	}

	// Setup the model/pull environment
	m := setUpModel(existingFile)
	fo := setUpSendReceiveFolder(m)
	copyChan := make(chan copyBlocksState)
	pullChan := make(chan pullBlockState, expectBlocks)
	finisherChan := make(chan *sharedPullerState, 1)

	// Run a single fetcher routine
	go fo.copierRoutine(copyChan, pullChan, finisherChan)

	// Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
	fo.WeakHashThresholdPct = 101
	fo.handleFile(desiredFile, copyChan, finisherChan)

	var pulls []pullBlockState
	for len(pulls) < expectBlocks {
		select {
		case pull := <-pullChan:
			pulls = append(pulls, pull)
		case <-time.After(10 * time.Second):
			t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
		}
	}
	finish := <-finisherChan

	select {
	case <-pullChan:
		t.Fatal("Pull channel has data to be read")
	case <-finisherChan:
		t.Fatal("Finisher channel has data to be read")
	default:
	}

	finish.fd.Close()
	if err := os.Remove(tempFile); err != nil && !os.IsNotExist(err) {
		t.Error(err)
	}

	// Test 2 - using weak hash, expectPulls blocks pulled.
	fo.WeakHashThresholdPct = -1
	fo.handleFile(desiredFile, copyChan, finisherChan)

	pulls = pulls[:0]
	for len(pulls) < expectPulls {
		select {
		case pull := <-pullChan:
			pulls = append(pulls, pull)
		case <-time.After(10 * time.Second):
			t.Errorf("timed out, got %d pulls expected %d", len(pulls), expectPulls)
		}
	}

	finish = <-finisherChan
	finish.fd.Close()

	expectShifted := expectBlocks - expectPulls
	if finish.copyOriginShifted != expectShifted {
		t.Errorf("did not copy %d shifted", expectShifted)
	}
}
Esempio n. 5
0
func TestCopierFinder(t *testing.T) {
	// After diff between required and existing we should:
	// Copy: 1, 2, 3, 4, 6, 7, 8
	// Since there is no existing file, nor a temp file

	// After dropping out blocks found locally:
	// Pull: 1, 5, 6, 8

	tempFile := filepath.Join("testdata", ignore.TempName("file2"))
	err := os.Remove(tempFile)
	if err != nil && !os.IsNotExist(err) {
		t.Error(err)
	}

	existingBlocks := []int{0, 2, 3, 4, 0, 0, 7, 0}
	existingFile := setUpFile(ignore.TempName("file"), existingBlocks)
	requiredFile := existingFile
	requiredFile.Blocks = blocks[1:]
	requiredFile.Name = "file2"

	m := setUpModel(existingFile)
	f := setUpSendReceiveFolder(m)
	copyChan := make(chan copyBlocksState)
	pullChan := make(chan pullBlockState, 4)
	finisherChan := make(chan *sharedPullerState, 1)

	// Run a single fetcher routine
	go f.copierRoutine(copyChan, pullChan, finisherChan)

	f.handleFile(requiredFile, copyChan, finisherChan)

	pulls := []pullBlockState{<-pullChan, <-pullChan, <-pullChan, <-pullChan}
	finish := <-finisherChan

	select {
	case <-pullChan:
		t.Fatal("Pull channel has data to be read")
	case <-finisherChan:
		t.Fatal("Finisher channel has data to be read")
	default:
	}

	// Verify that the right blocks went into the pull list.
	// They are pulled in random order.
	for _, idx := range []int{1, 5, 6, 8} {
		found := false
		block := blocks[idx]
		for _, pulledBlock := range pulls {
			if string(pulledBlock.block.Hash) == string(block.Hash) {
				found = true
				break
			}
		}
		if !found {
			t.Errorf("Did not find block %s", block.String())
		}
		if string(finish.file.Blocks[idx-1].Hash) != string(blocks[idx].Hash) {
			t.Errorf("Block %d mismatch: %s != %s", idx, finish.file.Blocks[idx-1].String(), blocks[idx].String())
		}
	}

	// Verify that the fetched blocks have actually been written to the temp file
	blks, err := scanner.HashFile(tempFile, protocol.BlockSize, nil)
	if err != nil {
		t.Log(err)
	}

	for _, eq := range []int{2, 3, 4, 7} {
		if string(blks[eq-1].Hash) != string(blocks[eq].Hash) {
			t.Errorf("Block %d mismatch: %s != %s", eq, blks[eq-1].String(), blocks[eq].String())
		}
	}
	finish.fd.Close()

	os.Remove(tempFile)
}