Example #1
0
func TestChunker(t *testing.T) {
	// setup data source
	buf := getRandom(23, 32*1024*1024)
	ch := chunker.New(bytes.NewReader(buf), testPol, sha256.New())
	chunks := testWithData(t, ch, chunks1)

	// test reader
	for i, c := range chunks {
		rd := c.Reader(bytes.NewReader(buf))

		h := sha256.New()
		n, err := io.Copy(h, rd)
		if err != nil {
			t.Fatalf("io.Copy(): %v", err)
		}

		if uint(n) != chunks1[i].Length {
			t.Fatalf("reader returned wrong number of bytes: expected %d, got %d",
				chunks1[i].Length, n)
		}

		d := h.Sum(nil)
		if !bytes.Equal(d, chunks1[i].Digest) {
			t.Fatalf("wrong hash returned: expected %02x, got %02x",
				chunks1[i].Digest, d)
		}
	}

	// setup nullbyte data source
	buf = bytes.Repeat([]byte{0}, len(chunks2)*chunker.MinSize)
	ch = chunker.New(bytes.NewReader(buf), testPol, sha256.New())

	testWithData(t, ch, chunks2)
}
Example #2
0
func BenchmarkNewChunker(b *testing.B) {
	p, err := chunker.RandomPolynomial()
	OK(b, err)

	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		chunker.New(bytes.NewBuffer(nil), p, nil)
	}
}
Example #3
0
func benchmarkChunker(b *testing.B, hash hash.Hash) {
	var (
		rd   io.ReadSeeker
		size int
	)

	if *benchmarkFile != "" {
		b.Logf("using file %q for benchmark", *benchmarkFile)
		f, err := os.Open(*benchmarkFile)
		if err != nil {
			b.Fatalf("open(%q): %v", *benchmarkFile, err)
		}

		fi, err := f.Stat()
		if err != nil {
			b.Fatalf("lstat(%q): %v", *benchmarkFile, err)
		}

		size = int(fi.Size())
		rd = f
	} else {
		size = 10 * 1024 * 1024
		rd = bytes.NewReader(getRandom(23, size))
	}

	b.ResetTimer()
	b.SetBytes(int64(size))

	var chunks int
	for i := 0; i < b.N; i++ {
		chunks = 0

		rd.Seek(0, 0)
		ch := chunker.New(rd, testPol, hash)

		for {
			_, err := ch.Next()

			if err == io.EOF {
				break
			}

			if err != nil {
				b.Fatalf("Unexpected error occurred: %v", err)
			}

			chunks++
		}
	}

	b.Logf("%d chunks, average chunk size: %d bytes", chunks, size/chunks)
}
Example #4
0
func TestChunkerWithoutHash(t *testing.T) {
	// setup data source
	buf := getRandom(23, 32*1024*1024)

	ch := chunker.New(bytes.NewReader(buf), testPol, nil)
	chunks := testWithData(t, ch, chunks1)

	// test reader
	for i, c := range chunks {
		rd := c.Reader(bytes.NewReader(buf))

		buf2, err := ioutil.ReadAll(rd)
		if err != nil {
			t.Fatalf("io.Copy(): %v", err)
		}

		if uint(len(buf2)) != chunks1[i].Length {
			t.Fatalf("reader returned wrong number of bytes: expected %d, got %d",
				chunks1[i].Length, uint(len(buf2)))
		}

		if uint(len(buf2)) != chunks1[i].Length {
			t.Fatalf("wrong number of bytes returned: expected %02x, got %02x",
				chunks[i].Length, len(buf2))
		}

		if !bytes.Equal(buf[c.Start:c.Start+c.Length], buf2) {
			t.Fatalf("invalid data for chunk returned: expected %02x, got %02x",
				buf[c.Start:c.Start+c.Length], buf2)
		}
	}

	// setup nullbyte data source
	buf = bytes.Repeat([]byte{0}, len(chunks2)*chunker.MinSize)
	ch = chunker.New(bytes.NewReader(buf), testPol, sha256.New())

	testWithData(t, ch, chunks2)
}
Example #5
0
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) {
	ch := chunker.New(rd, testPol, sha256.New())

	for {
		chunk, err := ch.Next()
		if err == io.EOF {
			break
		}

		// reduce length of chunkBuf
		buf = buf[:chunk.Length]
		io.ReadFull(chunk.Reader(rd), buf)
		crypto.Encrypt(key, buf, buf)
	}
}
Example #6
0
// SaveFile stores the content of the file on the backend as a Blob by calling
// Save for each chunk.
func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
	file, err := node.OpenForReading()
	defer file.Close()
	if err != nil {
		return err
	}

	node, err = arch.reloadFileIfChanged(node, file)
	if err != nil {
		return err
	}

	chnker := chunker.New(file, arch.repo.Config.ChunkerPolynomial, sha256.New())
	resultChannels := [](<-chan saveResult){}

	for {
		chunk, err := chnker.Next()
		if err == io.EOF {
			break
		}

		if err != nil {
			return errors.Annotate(err, "SaveFile() chunker.Next()")
		}

		resCh := make(chan saveResult, 1)
		go arch.saveChunk(chunk, p, <-arch.blobToken, file, resCh)
		resultChannels = append(resultChannels, resCh)
	}

	results, err := waitForResults(resultChannels)
	if err != nil {
		return err
	}

	err = updateNodeContent(node, results)
	return err
}
Example #7
0
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.Key) {
	rd.Seek(0, 0)
	ch := chunker.New(rd, testPol, sha256.New())

	for {
		chunk, err := ch.Next()

		if err == io.EOF {
			break
		}

		OK(b, err)

		// reduce length of buf
		buf = buf[:chunk.Length]
		n, err := io.ReadFull(chunk.Reader(rd), buf)
		OK(b, err)
		Assert(b, uint(n) == chunk.Length, "invalid length: got %d, expected %d", n, chunk.Length)

		_, err = crypto.Encrypt(key, buf2, buf)
		OK(b, err)
	}
}
Example #8
0
func TestChunkerWithRandomPolynomial(t *testing.T) {
	// setup data source
	buf := getRandom(23, 32*1024*1024)

	// generate a new random polynomial
	start := time.Now()
	p, err := chunker.RandomPolynomial()
	OK(t, err)
	t.Logf("generating random polynomial took %v", time.Since(start))

	start = time.Now()
	ch := chunker.New(bytes.NewReader(buf), p, sha256.New())
	t.Logf("creating chunker took %v", time.Since(start))

	// make sure that first chunk is different
	c, err := ch.Next()

	Assert(t, c.Cut != chunks1[0].CutFP,
		"Cut point is the same")
	Assert(t, c.Length != chunks1[0].Length,
		"Length is the same")
	Assert(t, !bytes.Equal(c.Digest, chunks1[0].Digest),
		"Digest is the same")
}