func TestRecordAllocator_Update(t *testing.T) {
	fakeDataFile := utils.NewFakeDataFile(6)
	if err := core.FormatDataFileIfNeeded(fakeDataFile); err != nil {
		t.Fatal(err)
	}

	dataBuffer := dbio.NewDataBuffer(fakeDataFile, 4)
	allocator := core.NewRecordAllocator(dataBuffer)

	// Fill up a datablock up to its limit
	maxData := dbio.DATABLOCK_SIZE - core.MIN_UTILIZATION - core.RECORD_HEADER_SIZE
	contents := ""
	for i := uint16(0); i < maxData; i++ {
		contents += fmt.Sprintf("%d", i%10)
	}
	allocator.Add(&core.Record{ID: 1, Data: []byte(contents)})

	// Add a new record that will go into the next datablock on the list
	allocator.Add(&core.Record{ID: 2, Data: []byte("Some data")})

	// Update records
	rowID := core.RowID{DataBlockID: 3, LocalID: 0}
	if err := allocator.Update(rowID, &core.Record{ID: 1, Data: []byte("NEW CONTENTS")}); err != nil {
		t.Fatal(err)
	}
	rowID = core.RowID{DataBlockID: 4, LocalID: 0}
	if err := allocator.Update(rowID, &core.Record{ID: 2, Data: []byte("EVEN MORE!")}); err != nil {
		t.Fatal(err)
	}

	// Flush data to data blocks and ensure that things work after a reload
	dataBuffer.Sync()
	dataBuffer = dbio.NewDataBuffer(fakeDataFile, 4)
	repo := core.NewDataBlockRepository(dataBuffer)

	// Ensure blocks have been updated
	recordBlock := repo.RecordBlock(3)
	data, err := recordBlock.ReadRecordData(0)
	if err != nil {
		t.Fatal(err)
	}
	if string(data) != "NEW CONTENTS" {
		t.Errorf("First record did not get updated, read `%s`", data)
	}

	recordBlock = repo.RecordBlock(4)
	data, err = recordBlock.ReadRecordData(0)
	if err != nil {
		t.Fatal(err)
	}
	if string(data) != "EVEN MORE!" {
		t.Errorf("Second record did not get updated, read `%s`", data)
	}
}
func TestEvictsBlocksAfterFillingInAllFrames(t *testing.T) {
	fakeDataFile := utils.NewFakeDataFileWithBlocks([][]byte{
		[]byte{}, []byte{}, []byte{},
	})

	readCount := 0
	original := fakeDataFile.ReadBlockFunc
	fakeDataFile.ReadBlockFunc = func(id uint16, data []byte) error {
		readCount += 1
		return original(id, data)
	}

	buffer := dbio.NewDataBuffer(fakeDataFile, 2)

	// From this point on, we fetch blocks in a way to ensure that we have multiple
	// hits on different blocks and also force a couple cache misses

	for blockId := 0; blockId < 3; blockId++ {
		for i := 0; i < 10; i++ {
			buffer.FetchBlock(uint16(blockId))
		}
	}
	// Fetch block 1 again to ensure it is still in memory
	buffer.FetchBlock(uint16(1))
	if readCount != 3 {
		t.Errorf("Read from datafile more than three times (total: %d times)", readCount)
	}

	// Force 2 cache misses
	buffer.FetchBlock(0)
	buffer.FetchBlock(1)
	if readCount != 5 {
		t.Error("Read from cache but should not do that")
	}
}
func TestReturnsErrorsWhenSyncing(t *testing.T) {
	fakeDataFile := utils.NewFakeDataFileWithBlocks([][]byte{
		[]byte{}, []byte{},
	})
	expectedError := errors.New("BOOM")
	fakeDataFile.WriteBlockFunc = func(id uint16, data []byte) error {
		return expectedError
	}

	buffer := dbio.NewDataBuffer(fakeDataFile, 2)

	buffer.FetchBlock(0)
	buffer.FetchBlock(1)
	if err := buffer.Sync(); err != nil {
		t.Fatal("Unexpected error", err)
	}

	buffer.MarkAsDirty(1)
	err := buffer.Sync()
	if err == nil {
		t.Fatal("Error not raised")
	} else if err != expectedError {
		t.Fatal("Unknown error raised")
	}
}
func TestRecordAllocator_Add(t *testing.T) {
	fakeDataFile := utils.NewFakeDataFile(7)
	if err := core.FormatDataFileIfNeeded(fakeDataFile); err != nil {
		t.Fatal(err)
	}
	dataBuffer := dbio.NewDataBuffer(fakeDataFile, 4)
	allocator := core.NewRecordAllocator(dataBuffer)

	// Fill up a datablock up to its limit
	maxData := dbio.DATABLOCK_SIZE - core.MIN_UTILIZATION - core.RECORD_HEADER_SIZE
	contents := ""
	for i := uint16(0); i < maxData; i++ {
		contents += fmt.Sprintf("%d", i%10)
	}
	allocator.Add(&core.Record{ID: uint32(1), Data: []byte(contents)})

	// Add a new record that will go into the next datablock on the list
	allocator.Add(&core.Record{ID: uint32(2), Data: []byte("Some data")})

	// Flush data to data blocks and ensure that things work after a reload
	dataBuffer.Sync()
	dataBuffer = dbio.NewDataBuffer(fakeDataFile, 4)
	repo := core.NewDataBlockRepository(dataBuffer)
	blockMap := repo.DataBlocksMap()

	// Ensure new blocks has been marked as used
	if !blockMap.IsInUse(3) || !blockMap.IsInUse(4) {
		t.Errorf("Blocks 3 and 4 should have been marked as in use")
	}

	// Ensure the blocks point to each other
	firstRecordBlock := repo.RecordBlock(3)
	if firstRecordBlock.NextBlockID() != 4 {
		t.Errorf("First allocated block does not point to the next one")
	}
	secondRecordBlock := repo.RecordBlock(4)
	if secondRecordBlock.PrevBlockID() != 3 {
		t.Errorf("Second allocated block does not point to the previous one")
	}

	// Ensure the pointer for the next datablock that has free space has been updated
	controlBlock := repo.ControlBlock()
	if controlBlock.NextAvailableRecordsDataBlockID() != 4 {
		t.Errorf("Did not update the pointer to the next datablock that allows insertion, got %d", controlBlock.NextAvailableRecordsDataBlockID())
	}
}
Ejemplo n.º 5
0
func NewWithDataFile(dataFile dbio.DataFile) (SimpleJSONDB, error) {
	if err := core.FormatDataFileIfNeeded(dataFile); err != nil {
		return nil, err
	}

	dataBuffer := dbio.NewDataBuffer(dataFile, BUFFER_SIZE)
	repo := core.NewDataBlockRepository(dataBuffer)
	index := core.NewUint32Index(dataBuffer, BTREE_IDX_BRANCH_MAX_ENTRIES, BTREE_IDX_LEAF_MAX_ENTRIES)
	return &simpleJSONDB{dataFile, dataBuffer, repo, index}, nil
}
func createIndex(t *testing.T, totalUsableBlocks, bufferFrames, branchCapacity int, leafCapacity int) core.Uint32Index {
	fakeDataFile := utils.NewFakeDataFile(totalUsableBlocks + 4)
	if err := core.FormatDataFileIfNeeded(fakeDataFile); err != nil {
		t.Fatal(err)
	}

	dataBuffer := dbio.NewDataBuffer(fakeDataFile, bufferFrames)
	index := core.NewUint32Index(dataBuffer, branchCapacity, leafCapacity)
	index.Init()
	return index
}
func TestFetchesBlockFromDataFile(t *testing.T) {
	fakeDataBlock := []byte{0x10, 0xF0}
	fakeDataFile := utils.NewFakeDataFileWithBlocks([][]byte{nil, fakeDataBlock})

	dataBlock, err := dbio.NewDataBuffer(fakeDataFile, 1).FetchBlock(1)
	if err != nil {
		t.Fatal("Unexpected error", err)
	}
	if dataBlock.ID != 1 {
		t.Errorf("ID doesn't match (expected %d got %d)", 1, dataBlock.ID)
	}
	if !utils.SlicesEqual(dataBlock.Data[0:2], fakeDataBlock) {
		t.Errorf("Data blocks do not match (expected %x got %x)", fakeDataBlock, dataBlock.Data[0:2])
	}
}
func TestSavesDirtyFramesOnSync(t *testing.T) {
	fakeDataFile := utils.NewFakeDataFileWithBlocks([][]byte{
		[]byte{}, []byte{}, []byte{},
	})

	blocksThatWereWritten := []uint16{}
	fakeDataFile.WriteBlockFunc = func(id uint16, data []byte) error {
		blocksThatWereWritten = append(blocksThatWereWritten, id)
		return nil
	}

	buffer := dbio.NewDataBuffer(fakeDataFile, 3)

	// Read the 3 blocks and flag two as dirty
	buffer.FetchBlock(0)
	buffer.MarkAsDirty(0)

	buffer.FetchBlock(1)
	// Not dirty

	buffer.FetchBlock(2)
	buffer.MarkAsDirty(2)

	if err := buffer.Sync(); err != nil {
		t.Fatal(err)
	}

	if len(blocksThatWereWritten) != 2 {
		t.Fatalf("Should have written 2 blocks, wrote %v", blocksThatWereWritten)
	}

	if blocksThatWereWritten[0] != 0 && blocksThatWereWritten[1] != 0 {
		t.Errorf("Should have written the block 0, wrote %v", blocksThatWereWritten)
	}

	if blocksThatWereWritten[0] != 2 && blocksThatWereWritten[1] != 2 {
		t.Errorf("Should have written the block 2, wrote %v", blocksThatWereWritten)
	}

	blocksThatWereWritten = []uint16{}
	if err := buffer.Sync(); err != nil {
		t.Fatal(err)
	}

	if len(blocksThatWereWritten) != 0 {
		t.Fatalf("Blocks have already been writen, wrote %v again", blocksThatWereWritten)
	}
}
func TestFetchBlockCachesData(t *testing.T) {
	fakeDataFile := utils.NewFakeDataFileWithBlocks([][]byte{nil, []byte{}})

	readCount := 0
	original := fakeDataFile.ReadBlockFunc
	fakeDataFile.ReadBlockFunc = func(id uint16, data []byte) error {
		readCount += 1
		return original(id, data)
	}

	buffer := dbio.NewDataBuffer(fakeDataFile, 1)
	for i := 0; i < 10; i++ {
		buffer.FetchBlock(1)
	}

	if readCount > 1 {
		t.Errorf("Read from datafile more than once (total: %d times)", readCount)
	}
}
Ejemplo n.º 10
0
func TestSavesDirtyFramesWhenEvicting(t *testing.T) {
	fakeDataBlock := []byte{0x00, 0x01, 0x02}
	fakeDataFile := utils.NewFakeDataFileWithBlocks([][]byte{
		fakeDataBlock, []byte{}, []byte{}, []byte{},
	})

	blockThatWasWritten := uint16(999)
	bytesWritten := []byte{}
	fakeDataFile.WriteBlockFunc = func(id uint16, data []byte) error {
		blockThatWasWritten = id
		bytesWritten = data
		return nil
	}

	buffer := dbio.NewDataBuffer(fakeDataFile, 2)

	// Read the first 2 blocks and flag the first one as dirty
	buffer.FetchBlock(0)
	buffer.FetchBlock(1)
	buffer.MarkAsDirty(0)

	// Evict the frame 1 by loading a third frame
	buffer.FetchBlock(2)

	// Evict the frame 0 by loading a fourth frame
	buffer.FetchBlock(3)

	if blockThatWasWritten == 999 {
		t.Fatalf("Block was not saved to disk (%d)", blockThatWasWritten)
	}
	if blockThatWasWritten != 0 {
		t.Errorf("Unknown block saved to disk (%d)", blockThatWasWritten)
	}
	if !utils.SlicesEqual(bytesWritten[0:3], fakeDataBlock) {
		t.Errorf("Invalid data saved to disk %x", bytesWritten[0:3])
	}
}
Ejemplo n.º 11
0
func TestDiscardsUnmodifiedFrames(t *testing.T) {
	fakeDataFile := utils.NewFakeDataFileWithBlocks([][]byte{
		[]byte{}, []byte{}, []byte{},
	})

	wroteToDisk := false
	fakeDataFile.WriteBlockFunc = func(id uint16, data []byte) error {
		wroteToDisk = true
		return nil
	}

	buffer := dbio.NewDataBuffer(fakeDataFile, 2)

	// Read the first 2 blocks
	buffer.FetchBlock(0)
	buffer.FetchBlock(1)

	// Evict the first frame (by loading a third frame)
	buffer.FetchBlock(2)

	if wroteToDisk {
		t.Fatal("No blocks should have been saved to disk")
	}
}
func FormatDataFileIfNeeded(dataFile dbio.DataFile) error {
	dataBuffer := dbio.NewDataBuffer(dataFile, 5)
	repo := NewDataBlockRepository(dataBuffer)

	controlBlock := repo.ControlBlock()
	if controlBlock.NextAvailableRecordsDataBlockID() != 0 {
		log.Println("DB_FORMAT_SKIPPED")
		return nil
	}

	log.Println("DB_FORMAT_DATA_FILE")
	controlBlock.Format()
	dataBuffer.MarkAsDirty(controlBlock.DataBlockID())

	blockMap := repo.DataBlocksMap()
	// 3 -> 1 for the control block
	//      + 2 for the datablocks bitmap
	//      + 1 for the first block used by records
	for i := uint16(0); i < 4; i++ {
		blockMap.MarkAsUsed(i)
	}

	return dataBuffer.Sync()
}
func TestDataBlocksMap(t *testing.T) {
	fakeDataFile := utils.NewFakeDataFile(3)
	dataBuffer := dbio.NewDataBuffer(fakeDataFile, 2)
	dbm := &dataBlocksMap{dataBuffer}

	// First position is free by default
	if free := dbm.FirstFree(); free != 0 {
		t.Errorf("Unexpected result for fetching the first free block")
	}

	// Mark some blocks as being in use
	dbm.MarkAsUsed(0)
	dbm.MarkAsUsed(1)
	dbm.MarkAsUsed(2)
	dbm.MarkAsUsed(5)
	dbm.MarkAsUsed(dbio.DATABLOCK_SIZE + 100)

	// Ensure it spots the gap
	if free := dbm.FirstFree(); free != 3 {
		t.Errorf("Unexpected result for fetching the first free block after some interaction with the map")
	}

	// Ensure it reclaims the free block
	dbm.MarkAsFree(2)
	if free := dbm.FirstFree(); free != 2 {
		t.Errorf("Did not reclaim the new free block")
	}

	// Ensure it works across data blocks
	if !dbm.IsInUse(1) {
		t.Errorf("Expected datablock 1 to be in use")
	}
	if !dbm.IsInUse(dbio.DATABLOCK_SIZE + 100) {
		t.Errorf("Expected datablock %d to be in use", dbio.DATABLOCK_SIZE+100)
	}

	// // Clear all positions first
	max := dbio.DATABLOCK_SIZE * 2
	for i := 0; i < max; i++ {
		dbm.MarkAsFree(uint16(i))
	}

	// Fill in the whole map
	for i := 0; i < max; i++ {
		dbm.MarkAsUsed(uint16(i))
		if free := dbm.FirstFree(); free != uint16(i+1) {
			t.Fatalf("Something is wrong with detecting the first free block after %d was marked as being in use, got %d", i, free)
		}
	}
	// Ensure it detects that there are no more available blocks
	if !dbm.AllInUse() {
		t.Error("Expected all positions to be in use")
	}
	dbm.MarkAsFree(2)
	if dbm.AllInUse() {
		t.Error("Expected all positions to not be in use")
	}

	// Ensure that the blocks / frames were flagged as dirty
	blocksThatWereWritten := []uint16{}
	fakeDataFile.WriteBlockFunc = func(id uint16, data []byte) error {
		blocksThatWereWritten = append(blocksThatWereWritten, id)
		return nil
	}
	dataBuffer.Sync()
	if len(blocksThatWereWritten) != 2 {
		t.Fatalf("Should have written 2 blocks, wrote %v", blocksThatWereWritten)
	}
}
func TestRecordAllocator_Remove(t *testing.T) {
	fakeDataFile := utils.NewFakeDataFile(8)
	dataBuffer := dbio.NewDataBuffer(fakeDataFile, 5)
	if err := core.FormatDataFileIfNeeded(fakeDataFile); err != nil {
		t.Fatal(err)
	}

	allocator := core.NewRecordAllocator(dataBuffer)

	// Prepare data to fill up a datablock up to its limit
	maxData := dbio.DATABLOCK_SIZE - core.MIN_UTILIZATION - core.RECORD_HEADER_SIZE
	contents := ""
	for i := uint16(0); i < maxData; i++ {
		contents += fmt.Sprintf("%d", i%10)
	}

	// Insert data into 3 different blocks
	allocator.Add(&core.Record{ID: uint32(3), Data: []byte(contents)})
	allocator.Add(&core.Record{ID: uint32(4), Data: []byte(contents)})
	allocator.Add(&core.Record{ID: uint32(5), Data: []byte(contents)})
	allocator.Add(&core.Record{ID: uint32(6), Data: []byte("Some data")})
	allocator.Add(&core.Record{ID: uint32(7), Data: []byte("More data")})

	// Free up some datablocks
	allocator.Remove(core.RowID{DataBlockID: 3, LocalID: 0})
	allocator.Remove(core.RowID{DataBlockID: 5, LocalID: 0})

	// Free part of another datablock
	allocator.Remove(core.RowID{DataBlockID: 6, LocalID: 0})

	// Flush data to data blocks and ensure that things work after a reload
	dataBuffer.Sync()

	dataBuffer = dbio.NewDataBuffer(fakeDataFile, 4)
	repo := core.NewDataBlockRepository(dataBuffer)
	blockMap := repo.DataBlocksMap()

	// Ensure blocks have been marked as free again
	if blockMap.IsInUse(3) {
		t.Errorf("Block 3 should have been marked as free")
	}
	if blockMap.IsInUse(5) {
		t.Errorf("Block 5 should have been marked as free")
	}

	// Ensure the linked list is set up properly
	// First records datablock is now at block 4
	controlBlock := repo.ControlBlock()
	if controlBlock.FirstRecordDataBlock() != 4 {
		t.Fatalf("First record datablock is set to the wrong block, found %d", controlBlock.FirstRecordDataBlock())
	}

	// Then the next block on the chain is at block 6
	recordBlock := repo.RecordBlock(4)
	if recordBlock.NextBlockID() != 6 {
		t.Fatalf("First record datablock next block pointer is set to the wrong block (%d)", recordBlock.NextBlockID())
	}

	// And the block 6 points back to the block 4
	recordBlock = repo.RecordBlock(6)
	if recordBlock.PrevBlockID() != 4 {
		t.Fatalf("Second record datablock previous block pointer is incorrect (%d)", recordBlock.PrevBlockID())
	}
}
func TestRecordAllocator_ChainedRows(t *testing.T) {
	fakeDataFile := utils.NewFakeDataFile(11)
	dataBuffer := dbio.NewDataBuffer(fakeDataFile, 5)
	if err := core.FormatDataFileIfNeeded(fakeDataFile); err != nil {
		t.Fatal(err)
	}

	allocator := core.NewRecordAllocator(dataBuffer)

	// Prepare data to fill up a datablock close to its limit
	maxData := dbio.DATABLOCK_SIZE - core.MIN_UTILIZATION - core.RECORD_HEADER_SIZE
	contents := ""
	for i := uint16(0); i < maxData; i++ {
		contents += fmt.Sprintf("%d", i%10)
	}

	// Insert data into 3 different blocks
	dummy, _ := allocator.Add(&core.Record{ID: uint32(3), Data: []byte(contents[0 : maxData-100])})
	chainedRowRowID, _ := allocator.Add(&core.Record{ID: uint32(4), Data: []byte(contents)})
	removedChainedRowID, _ := allocator.Add(&core.Record{ID: uint32(5), Data: []byte(contents)})
	allocator.Add(&core.Record{ID: uint32(6), Data: []byte("Some data")})
	allocator.Add(&core.Record{ID: uint32(7), Data: []byte("More data")})

	// Ensure that the blocks are chained
	if dummy.DataBlockID != chainedRowRowID.DataBlockID {
		t.Fatalf("Did not create a chained row, expected record to be written on block %d but was written on block %d", dummy.DataBlockID, chainedRowRowID.DataBlockID)
	}

	// Ensure we exercise the code path that deletes chained rows
	allocator.Remove(dummy)
	allocator.Remove(removedChainedRowID)

	// Flush data to data blocks and ensure that things work after a reload
	dataBuffer.Sync()
	dataBuffer = dbio.NewDataBuffer(fakeDataFile, 10)
	repo := core.NewDataBlockRepository(dataBuffer)
	allocator = core.NewRecordAllocator(dataBuffer)

	// Ensure the records can be read after a reload
	recordBlock := repo.RecordBlock(chainedRowRowID.DataBlockID)
	first, err := recordBlock.ReadRecordData(chainedRowRowID.LocalID)
	if err != nil {
		t.Fatal(err)
	}
	chainedRowID, err := recordBlock.ChainedRowID(chainedRowRowID.LocalID)
	if err != nil {
		t.Fatal(err)
	}
	recordBlock = repo.RecordBlock(chainedRowID.DataBlockID)
	second, err := recordBlock.ReadRecordData(chainedRowID.LocalID)
	if string(first)+string(second) != contents {
		t.Errorf("Invalid contents found for record, found `%s` and `%s`, expected `%s`", first, second, contents)
	}

	// Ensure deletes clear out headers properly
	recordBlock = repo.RecordBlock(removedChainedRowID.DataBlockID)
	if _, err = recordBlock.ReadRecordData(removedChainedRowID.LocalID); err == nil {
		t.Fatal("Did not clear out the record header of one of the a chained rows deleted")
	}
	recordBlock = repo.RecordBlock(removedChainedRowID.DataBlockID + 1)
	if _, err = recordBlock.ReadRecordData(0); err == nil {
		t.Fatal("Did not clear out the record header of the next block of the chained row")
	}

	dataBuffer.Sync()
	dataBuffer = dbio.NewDataBuffer(fakeDataFile, 10)
	repo = core.NewDataBlockRepository(dataBuffer)
	allocator = core.NewRecordAllocator(dataBuffer)

	// Add and update a chained row that spans 3 blocks
	bigContents := contents + contents + contents
	chainedUpdateRowID, _ := allocator.Add(&core.Record{ID: uint32(9), Data: []byte(bigContents)})

	// Keep track of the list of the following row ids of the chained row
	rowIDs := []core.RowID{}
	recordBlock = repo.RecordBlock(chainedUpdateRowID.DataBlockID)
	nextRowID, err := recordBlock.ChainedRowID(chainedUpdateRowID.LocalID)
	if err != nil {
		t.Fatal(err)
	}
	rowIDs = append(rowIDs, nextRowID)

	recordBlock = repo.RecordBlock(nextRowID.DataBlockID)
	nextRowID, err = recordBlock.ChainedRowID(nextRowID.LocalID)
	if err != nil {
		t.Fatal(err)
	}
	rowIDs = append(rowIDs, nextRowID)
	if len(rowIDs) != 2 {
		t.Errorf("Spread record on more blocks than expected %+v", rowIDs)
	}

	// Change record to be really small
	allocator.Update(chainedUpdateRowID, &core.Record{ID: uint32(9), Data: []byte("a string")})

	// Ensure the next element on the chained row list got cleared
	for _, rowID := range rowIDs {
		_, err := repo.RecordBlock(rowID.DataBlockID).ReadRecordData(rowID.LocalID)
		if err == nil {
			t.Errorf("Did not clear chained row %+v", rowID)
		}
	}

	// Ensure we can read it
	recordBlock = repo.RecordBlock(chainedUpdateRowID.DataBlockID)
	data, _ := recordBlock.ReadRecordData(chainedRowID.LocalID)
	if string(data) != "a string" {
		t.Error("Invalid contents found for record")
	}
}