func TestCreateAndRemoveRecords(t *testing.T) { fakeDataFile := utils.NewFakeDataFile(20) db, err := jsondb.NewWithDataFile(fakeDataFile) if err != nil { t.Fatalf("Unexpected error returned '%s'", err) } for i := 0; i < 10; i++ { id := uint32(i + 1) data := fmt.Sprintf(`{"a":%d}`, i) err := db.InsertRecord(id, data) if err != nil { t.Fatalf("Unexpected error returned when inserting '%s'", err) } err = db.DeleteRecord(id) if err != nil { t.Fatalf("Unexpected error returned when removing '%s'", err) } if record, err := db.FindRecord(id); err == nil { t.Errorf("Expected error to be returned when finding %d, got nil and data '%s'", id, string(record.Data)) } } }
func TestRecordAllocator_Update(t *testing.T) { fakeDataFile := utils.NewFakeDataFile(6) if err := core.FormatDataFileIfNeeded(fakeDataFile); err != nil { t.Fatal(err) } dataBuffer := dbio.NewDataBuffer(fakeDataFile, 4) allocator := core.NewRecordAllocator(dataBuffer) // Fill up a datablock up to its limit maxData := dbio.DATABLOCK_SIZE - core.MIN_UTILIZATION - core.RECORD_HEADER_SIZE contents := "" for i := uint16(0); i < maxData; i++ { contents += fmt.Sprintf("%d", i%10) } allocator.Add(&core.Record{ID: 1, Data: []byte(contents)}) // Add a new record that will go into the next datablock on the list allocator.Add(&core.Record{ID: 2, Data: []byte("Some data")}) // Update records rowID := core.RowID{DataBlockID: 3, LocalID: 0} if err := allocator.Update(rowID, &core.Record{ID: 1, Data: []byte("NEW CONTENTS")}); err != nil { t.Fatal(err) } rowID = core.RowID{DataBlockID: 4, LocalID: 0} if err := allocator.Update(rowID, &core.Record{ID: 2, Data: []byte("EVEN MORE!")}); err != nil { t.Fatal(err) } // Flush data to data blocks and ensure that things work after a reload dataBuffer.Sync() dataBuffer = dbio.NewDataBuffer(fakeDataFile, 4) repo := core.NewDataBlockRepository(dataBuffer) // Ensure blocks have been updated recordBlock := repo.RecordBlock(3) data, err := recordBlock.ReadRecordData(0) if err != nil { t.Fatal(err) } if string(data) != "NEW CONTENTS" { t.Errorf("First record did not get updated, read `%s`", data) } recordBlock = repo.RecordBlock(4) data, err = recordBlock.ReadRecordData(0) if err != nil { t.Fatal(err) } if string(data) != "EVEN MORE!" { t.Errorf("Second record did not get updated, read `%s`", data) } }
func createIndex(t *testing.T, totalUsableBlocks, bufferFrames, branchCapacity int, leafCapacity int) core.Uint32Index { fakeDataFile := utils.NewFakeDataFile(totalUsableBlocks + 4) if err := core.FormatDataFileIfNeeded(fakeDataFile); err != nil { t.Fatal(err) } dataBuffer := dbio.NewDataBuffer(fakeDataFile, bufferFrames) index := core.NewUint32Index(dataBuffer, branchCapacity, leafCapacity) index.Init() return index }
func TestRecordAllocator_Add(t *testing.T) { fakeDataFile := utils.NewFakeDataFile(7) if err := core.FormatDataFileIfNeeded(fakeDataFile); err != nil { t.Fatal(err) } dataBuffer := dbio.NewDataBuffer(fakeDataFile, 4) allocator := core.NewRecordAllocator(dataBuffer) // Fill up a datablock up to its limit maxData := dbio.DATABLOCK_SIZE - core.MIN_UTILIZATION - core.RECORD_HEADER_SIZE contents := "" for i := uint16(0); i < maxData; i++ { contents += fmt.Sprintf("%d", i%10) } allocator.Add(&core.Record{ID: uint32(1), Data: []byte(contents)}) // Add a new record that will go into the next datablock on the list allocator.Add(&core.Record{ID: uint32(2), Data: []byte("Some data")}) // Flush data to data blocks and ensure that things work after a reload dataBuffer.Sync() dataBuffer = dbio.NewDataBuffer(fakeDataFile, 4) repo := core.NewDataBlockRepository(dataBuffer) blockMap := repo.DataBlocksMap() // Ensure new blocks has been marked as used if !blockMap.IsInUse(3) || !blockMap.IsInUse(4) { t.Errorf("Blocks 3 and 4 should have been marked as in use") } // Ensure the blocks point to each other firstRecordBlock := repo.RecordBlock(3) if firstRecordBlock.NextBlockID() != 4 { t.Errorf("First allocated block does not point to the next one") } secondRecordBlock := repo.RecordBlock(4) if secondRecordBlock.PrevBlockID() != 3 { t.Errorf("Second allocated block does not point to the previous one") } // Ensure the pointer for the next datablock that has free space has been updated controlBlock := repo.ControlBlock() if controlBlock.NextAvailableRecordsDataBlockID() != 4 { t.Errorf("Did not update the pointer to the next datablock that allows insertion, got %d", controlBlock.NextAvailableRecordsDataBlockID()) } }
func TestSearchByTag(t *testing.T) { fakeDataFile := utils.NewFakeDataFile(20) db, err := jsondb.NewWithDataFile(fakeDataFile) if err != nil { t.Fatalf("Unexpected error returned '%s'", err) } // Insert some data states := []string{"RS", "BA", "SC"} for i := 0; i < 10; i++ { id := uint32(i + 1) data := fmt.Sprintf(`{"id":%d,"state":"%s"}`, id, states[i%len(states)]) err := db.InsertRecord(id, data) if err != nil { t.Fatalf("Unexpected error returned when inserting '%s'", err) } } // Search by state result, err := db.SearchRecords("state", "RS") if err != nil { t.Fatal(err) } if len(result) != 4 { t.Errorf("Unexpected results found, expected 4 items, got %d", len(result)) } for i, record := range result { document, err := record.ParseJSON() if err != nil { t.Fatal(err) } if document["state"].(string) != "RS" { t.Errorf("Invalid document returned with state != `RS`: %v", document) } else if document["id"].(float64) != float64(i*3)+1 { t.Errorf("Invalid document returned: %v", document) } } }
func TestCreateAndRetrieveLotsOfRecords(t *testing.T) { fakeDataFile := utils.NewFakeDataFile(80) db, err := jsondb.NewWithDataFile(fakeDataFile) if err != nil { t.Fatalf("Unexpected error returned '%s'", err) } for i := 0; i < 4500; i++ { id := uint32(i + 1) data := fmt.Sprintf(`{"a":%d}`, i) err := db.InsertRecord(id, data) if err != nil { t.Fatalf("Unexpected error returned for the %d-th record: '%s'", id, err) } record, err := db.FindRecord(id) if err != nil { t.Fatalf("Unexpected error returned while reading %d (%s)", id, err) } if string(record.Data) != data { t.Errorf("Unexpected data returned, got %s, expected %s", string(record.Data), data) } } }
func TestDataBlocksMap(t *testing.T) { fakeDataFile := utils.NewFakeDataFile(3) dataBuffer := dbio.NewDataBuffer(fakeDataFile, 2) dbm := &dataBlocksMap{dataBuffer} // First position is free by default if free := dbm.FirstFree(); free != 0 { t.Errorf("Unexpected result for fetching the first free block") } // Mark some blocks as being in use dbm.MarkAsUsed(0) dbm.MarkAsUsed(1) dbm.MarkAsUsed(2) dbm.MarkAsUsed(5) dbm.MarkAsUsed(dbio.DATABLOCK_SIZE + 100) // Ensure it spots the gap if free := dbm.FirstFree(); free != 3 { t.Errorf("Unexpected result for fetching the first free block after some interaction with the map") } // Ensure it reclaims the free block dbm.MarkAsFree(2) if free := dbm.FirstFree(); free != 2 { t.Errorf("Did not reclaim the new free block") } // Ensure it works across data blocks if !dbm.IsInUse(1) { t.Errorf("Expected datablock 1 to be in use") } if !dbm.IsInUse(dbio.DATABLOCK_SIZE + 100) { t.Errorf("Expected datablock %d to be in use", dbio.DATABLOCK_SIZE+100) } // // Clear all positions first max := dbio.DATABLOCK_SIZE * 2 for i := 0; i < max; i++ { dbm.MarkAsFree(uint16(i)) } // Fill in the whole map for i := 0; i < max; i++ { dbm.MarkAsUsed(uint16(i)) if free := dbm.FirstFree(); free != uint16(i+1) { t.Fatalf("Something is wrong with detecting the first free block after %d was marked as being in use, got %d", i, free) } } // Ensure it detects that there are no more available blocks if !dbm.AllInUse() { t.Error("Expected all positions to be in use") } dbm.MarkAsFree(2) if dbm.AllInUse() { t.Error("Expected all positions to not be in use") } // Ensure that the blocks / frames were flagged as dirty blocksThatWereWritten := []uint16{} fakeDataFile.WriteBlockFunc = func(id uint16, data []byte) error { blocksThatWereWritten = append(blocksThatWereWritten, id) return nil } dataBuffer.Sync() if len(blocksThatWereWritten) != 2 { t.Fatalf("Should have written 2 blocks, wrote %v", blocksThatWereWritten) } }
func TestCreateAndUpdateRecords(t *testing.T) { fakeDataFile := utils.NewFakeDataFile(20) db, err := jsondb.NewWithDataFile(fakeDataFile) if err != nil { t.Fatalf("Unexpected error returned '%s'", err) } // Insert some data for i := 0; i < 1000; i++ { data := fmt.Sprintf(`{"longest":%d}`, i) id := uint32(i + 1) err := db.InsertRecord(id, data) if err != nil { t.Fatalf("Unexpected error returned when inserting '%s'", err) } } // Shrink records for i := uint32(0); i < 1000; i++ { data := fmt.Sprintf(`{"a":%d}`, -int(i)) id := i + 1 err := db.UpdateRecord(id, data) if err != nil { t.Errorf("Unexpected error returned when updating record `%d` '%s'", id, err) continue } record, err := db.FindRecord(id) if err != nil { t.Errorf("Unexpected error returned while reading %d (%s)", id, err) continue } if string(record.Data) != data { t.Errorf("Unexpected data returned, got `%s`, expected `%s`", string(record.Data), data) continue } } // Grow records again for i := uint32(0); i < 1000; i++ { data := fmt.Sprintf(`{"waaaaat":%d}`, int(i)) id := i + 1 err := db.UpdateRecord(id, data) if err != nil { t.Errorf("Unexpected error returned when updating record `%d` '%s'", id, err) continue } record, err := db.FindRecord(id) if err != nil { t.Errorf("Unexpected error returned while reading %d (%s)", id, err) continue } if string(record.Data) != data { t.Errorf("Unexpected data returned, got `%s`, expected `%s`", string(record.Data), data) continue } } }
func TestRecordAllocator_Remove(t *testing.T) { fakeDataFile := utils.NewFakeDataFile(8) dataBuffer := dbio.NewDataBuffer(fakeDataFile, 5) if err := core.FormatDataFileIfNeeded(fakeDataFile); err != nil { t.Fatal(err) } allocator := core.NewRecordAllocator(dataBuffer) // Prepare data to fill up a datablock up to its limit maxData := dbio.DATABLOCK_SIZE - core.MIN_UTILIZATION - core.RECORD_HEADER_SIZE contents := "" for i := uint16(0); i < maxData; i++ { contents += fmt.Sprintf("%d", i%10) } // Insert data into 3 different blocks allocator.Add(&core.Record{ID: uint32(3), Data: []byte(contents)}) allocator.Add(&core.Record{ID: uint32(4), Data: []byte(contents)}) allocator.Add(&core.Record{ID: uint32(5), Data: []byte(contents)}) allocator.Add(&core.Record{ID: uint32(6), Data: []byte("Some data")}) allocator.Add(&core.Record{ID: uint32(7), Data: []byte("More data")}) // Free up some datablocks allocator.Remove(core.RowID{DataBlockID: 3, LocalID: 0}) allocator.Remove(core.RowID{DataBlockID: 5, LocalID: 0}) // Free part of another datablock allocator.Remove(core.RowID{DataBlockID: 6, LocalID: 0}) // Flush data to data blocks and ensure that things work after a reload dataBuffer.Sync() dataBuffer = dbio.NewDataBuffer(fakeDataFile, 4) repo := core.NewDataBlockRepository(dataBuffer) blockMap := repo.DataBlocksMap() // Ensure blocks have been marked as free again if blockMap.IsInUse(3) { t.Errorf("Block 3 should have been marked as free") } if blockMap.IsInUse(5) { t.Errorf("Block 5 should have been marked as free") } // Ensure the linked list is set up properly // First records datablock is now at block 4 controlBlock := repo.ControlBlock() if controlBlock.FirstRecordDataBlock() != 4 { t.Fatalf("First record datablock is set to the wrong block, found %d", controlBlock.FirstRecordDataBlock()) } // Then the next block on the chain is at block 6 recordBlock := repo.RecordBlock(4) if recordBlock.NextBlockID() != 6 { t.Fatalf("First record datablock next block pointer is set to the wrong block (%d)", recordBlock.NextBlockID()) } // And the block 6 points back to the block 4 recordBlock = repo.RecordBlock(6) if recordBlock.PrevBlockID() != 4 { t.Fatalf("Second record datablock previous block pointer is incorrect (%d)", recordBlock.PrevBlockID()) } }
func TestRecordAllocator_ChainedRows(t *testing.T) { fakeDataFile := utils.NewFakeDataFile(11) dataBuffer := dbio.NewDataBuffer(fakeDataFile, 5) if err := core.FormatDataFileIfNeeded(fakeDataFile); err != nil { t.Fatal(err) } allocator := core.NewRecordAllocator(dataBuffer) // Prepare data to fill up a datablock close to its limit maxData := dbio.DATABLOCK_SIZE - core.MIN_UTILIZATION - core.RECORD_HEADER_SIZE contents := "" for i := uint16(0); i < maxData; i++ { contents += fmt.Sprintf("%d", i%10) } // Insert data into 3 different blocks dummy, _ := allocator.Add(&core.Record{ID: uint32(3), Data: []byte(contents[0 : maxData-100])}) chainedRowRowID, _ := allocator.Add(&core.Record{ID: uint32(4), Data: []byte(contents)}) removedChainedRowID, _ := allocator.Add(&core.Record{ID: uint32(5), Data: []byte(contents)}) allocator.Add(&core.Record{ID: uint32(6), Data: []byte("Some data")}) allocator.Add(&core.Record{ID: uint32(7), Data: []byte("More data")}) // Ensure that the blocks are chained if dummy.DataBlockID != chainedRowRowID.DataBlockID { t.Fatalf("Did not create a chained row, expected record to be written on block %d but was written on block %d", dummy.DataBlockID, chainedRowRowID.DataBlockID) } // Ensure we exercise the code path that deletes chained rows allocator.Remove(dummy) allocator.Remove(removedChainedRowID) // Flush data to data blocks and ensure that things work after a reload dataBuffer.Sync() dataBuffer = dbio.NewDataBuffer(fakeDataFile, 10) repo := core.NewDataBlockRepository(dataBuffer) allocator = core.NewRecordAllocator(dataBuffer) // Ensure the records can be read after a reload recordBlock := repo.RecordBlock(chainedRowRowID.DataBlockID) first, err := recordBlock.ReadRecordData(chainedRowRowID.LocalID) if err != nil { t.Fatal(err) } chainedRowID, err := recordBlock.ChainedRowID(chainedRowRowID.LocalID) if err != nil { t.Fatal(err) } recordBlock = repo.RecordBlock(chainedRowID.DataBlockID) second, err := recordBlock.ReadRecordData(chainedRowID.LocalID) if string(first)+string(second) != contents { t.Errorf("Invalid contents found for record, found `%s` and `%s`, expected `%s`", first, second, contents) } // Ensure deletes clear out headers properly recordBlock = repo.RecordBlock(removedChainedRowID.DataBlockID) if _, err = recordBlock.ReadRecordData(removedChainedRowID.LocalID); err == nil { t.Fatal("Did not clear out the record header of one of the a chained rows deleted") } recordBlock = repo.RecordBlock(removedChainedRowID.DataBlockID + 1) if _, err = recordBlock.ReadRecordData(0); err == nil { t.Fatal("Did not clear out the record header of the next block of the chained row") } dataBuffer.Sync() dataBuffer = dbio.NewDataBuffer(fakeDataFile, 10) repo = core.NewDataBlockRepository(dataBuffer) allocator = core.NewRecordAllocator(dataBuffer) // Add and update a chained row that spans 3 blocks bigContents := contents + contents + contents chainedUpdateRowID, _ := allocator.Add(&core.Record{ID: uint32(9), Data: []byte(bigContents)}) // Keep track of the list of the following row ids of the chained row rowIDs := []core.RowID{} recordBlock = repo.RecordBlock(chainedUpdateRowID.DataBlockID) nextRowID, err := recordBlock.ChainedRowID(chainedUpdateRowID.LocalID) if err != nil { t.Fatal(err) } rowIDs = append(rowIDs, nextRowID) recordBlock = repo.RecordBlock(nextRowID.DataBlockID) nextRowID, err = recordBlock.ChainedRowID(nextRowID.LocalID) if err != nil { t.Fatal(err) } rowIDs = append(rowIDs, nextRowID) if len(rowIDs) != 2 { t.Errorf("Spread record on more blocks than expected %+v", rowIDs) } // Change record to be really small allocator.Update(chainedUpdateRowID, &core.Record{ID: uint32(9), Data: []byte("a string")}) // Ensure the next element on the chained row list got cleared for _, rowID := range rowIDs { _, err := repo.RecordBlock(rowID.DataBlockID).ReadRecordData(rowID.LocalID) if err == nil { t.Errorf("Did not clear chained row %+v", rowID) } } // Ensure we can read it recordBlock = repo.RecordBlock(chainedUpdateRowID.DataBlockID) data, _ := recordBlock.ReadRecordData(chainedRowID.LocalID) if string(data) != "a string" { t.Error("Invalid contents found for record") } }