func TestChunks(t *testing.T) {
	expectedChunkSizes := []int{7141, 6482, 7778, 8192, 7500, 8192, 2101, 7995, 4888, 8192, 8192, 8111}
	if buffer, err := ioutil.ReadFile("input/testfile"); err == nil {
		chunker := util.DefaultChunker()
		chunks := chunker.Chunks(buffer)
		assertWithMsg(t, len(chunks) == 12, "Expected 12 chunks.")
		for i, expected := range expectedChunkSizes {
			assertWithMsg(t, len(chunks[i]) == expected,
				"Expected chunk "+string(i)+" to be "+string(expected)+" bytes long.")
		}
	} else {
		t.Logf("Couldn't read input file.")
		t.Fail()
	}
}
示例#2
0
func (file *File) commitChunks() {
	chunker := util.DefaultChunker()
	chunks := chunker.Chunks(file.data)
	file.DataBlocks = make([][]byte, len(chunks))
	for i, chunk := range chunks {
		hasher := sha1.New()
		hasher.Write(chunk)
		dataHash := hasher.Sum(nil)
		// Academic assumption: collisions aren't a thing, so we'll assume there will
		// never be any corruption, might as well save on writes in the process.
		util.P_out(string(dataHash))
		file.DataBlocks[i] = dataHash
		if !filesystem.DbContains(dataHash) {
			if dbErr := filesystem.PutChunk(dataHash, chunk); dbErr != nil {
				util.P_err("Failed to write chunk to db: ", dbErr)
			}
		}
	}
}
func TestSingleChunk(t *testing.T) {
	chunker := util.DefaultChunker()
	chunks := chunker.Chunks(make([]byte, 10))
	assert(t, len(chunks) == 1)
}