Beispiel #1
0
func write(fp io.WriterAt,
	c *cache.CacheMap,
	devid, offset, blocks uint32,
	buffer []byte) {

	here := make(chan *message.Message, blocks)
	cacheoffset := cache.Address64(cache.Address{Devid: devid, Block: offset})

	// Send invalidates for each block
	iopkt := &message.IoPkt{
		Address: cacheoffset,
		Blocks:  blocks,
	}
	c.Invalidate(iopkt)

	// Write to storage back end
	// :TODO: check return status
	fp.WriteAt(buffer, int64(offset)*4*KB)

	// Now write to cache
	msg := message.NewMsgPut()
	msg.RetChan = here
	iopkt = msg.IoPkt()
	iopkt.Blocks = blocks
	iopkt.Address = cacheoffset
	iopkt.Buffer = buffer
	c.Put(msg)

	<-here
}
Beispiel #2
0
func readandstore(fp io.ReaderAt,
	c *cache.CacheMap,
	devid, offset, blocks uint32,
	buffer []byte,
	retchan chan *message.Message) {

	fp.ReadAt(buffer, int64(offset)*4*KB)

	m := message.NewMsgPut()
	m.RetChan = retchan
	io := m.IoPkt()
	io.Address = cache.Address64(cache.Address{Devid: devid, Block: offset})
	io.Buffer = buffer
	io.Blocks = blocks

	c.Put(m)
}
Beispiel #3
0
func (c *CacheMap) Put(msg *message.Message) error {

	err := msg.Check()
	if err != nil {
		return err
	}

	c.lock.Lock()
	defer c.lock.Unlock()

	io := msg.IoPkt()
	if io.Blocks > 1 {
		// Have parent message wait for its children
		defer msg.Done()

		//
		// It does not matter that we send small blocks to the Log, since
		// it will buffer them before sending them out to the cache device
		//
		// We do need to send each one sperately now so that the cache
		// policy hopefully aligns them one after the other.
		//
		for block := uint32(0); block < io.Blocks; block++ {
			child := message.NewMsgPut()
			msg.Add(child)

			child_io := child.IoPkt()
			child_io.Address = io.Address + uint64(block)
			child_io.Buffer = SubBlockBuffer(io.Buffer, c.blocksize, block, 1)
			child_io.LogBlock = c.put(child_io.Address)
			child_io.Blocks = 1

			// Send to next one in line
			c.pipeline <- child
		}
	} else {
		io.LogBlock = c.put(io.Address)
		c.pipeline <- msg
	}

	return nil
}
Beispiel #4
0
// Should wrap four times
func TestWrapPut(t *testing.T) {
	// Simple log
	blocks := uint32(16)

	testcachefile := tests.Tempfile()
	err := tests.CreateFile(testcachefile, 16*4096)
	tests.Assert(t, nil == err)
	defer os.Remove(testcachefile)

	l, logblocks, err := NewLog(testcachefile, 4096, 2, 4096*2, false)
	tests.Assert(t, err == nil)
	tests.Assert(t, l != nil)
	tests.Assert(t, blocks == logblocks)
	l.Start()

	here := make(chan *message.Message)
	wraps := uint32(4)

	// Write enough blocks to wrap around the log
	// as many times as determined by the value in 'wraps'
	for io := uint32(0); io < (blocks * wraps); io++ {
		buf := make([]byte, 4096)
		buf[0] = byte(io)

		msg := message.NewMsgPut()
		msg.RetChan = here

		iopkt := msg.IoPkt()
		iopkt.Buffer = buf
		iopkt.LogBlock = io % blocks

		l.Msgchan <- msg
		<-here
	}

	// Close will also empty all the channels
	l.Close()

	// Check that we have wrapped the correct number of times
	tests.Assert(t, l.Stats().Wraps == uint64(wraps))
}
Beispiel #5
0
func read(fp io.ReaderAt,
	c *cache.CacheMap,
	devid, offset, blocks uint32,
	buffer []byte) {

	godbc.Require(len(buffer)%(4*KB) == 0)

	here := make(chan *message.Message, blocks)
	cacheoffset := cache.Address64(cache.Address{Devid: devid, Block: offset})
	msg := message.NewMsgGet()
	msg.RetChan = here
	iopkt := msg.IoPkt()
	iopkt.Buffer = buffer
	iopkt.Address = cacheoffset
	iopkt.Blocks = blocks

	msgs := 0
	hitpkt, err := c.Get(msg)
	if err != nil {
		//fmt.Printf("|blocks:%d::hits:0--", blocks)
		// None found
		// Read the whole thing from backend
		fp.ReadAt(buffer, int64(offset)*4*KB)

		m := message.NewMsgPut()
		m.RetChan = here

		io := m.IoPkt()
		io.Address = cacheoffset
		io.Buffer = buffer
		io.Blocks = blocks
		c.Put(m)
		msgs++

	} else if hitpkt.Hits != blocks {
		//fmt.Printf("|******blocks:%d::hits:%d--", blocks, hitpkt.Hits)
		// Read from storage the ones that did not have
		// in the hit map.
		var be_offset, be_block, be_blocks uint32
		var be_read_ready = false
		for block := uint32(0); block < blocks; block++ {
			if !hitpkt.Hitmap[int(block)] {
				if be_read_ready {
					be_blocks++
				} else {
					be_read_ready = true
					be_offset = offset + block
					be_block = block
					be_blocks++
				}
			} else {
				if be_read_ready {
					// Send read
					msgs++
					go readandstore(fp, c, devid,
						be_offset,
						be_blocks,
						cache.SubBlockBuffer(buffer, 4*KB, be_block, be_blocks),
						here)
					be_read_ready = false
					be_blocks = 0
					be_offset = 0
					be_block = 0
				}
			}
		}
		if be_read_ready {
			msgs++
			go readandstore(fp, c, devid,
				be_offset,
				be_blocks,
				cache.SubBlockBuffer(buffer, 4*KB, be_block, be_blocks),
				here)
		}

	} else {
		msgs = 1
	}

	// Wait for blocks to be returned
	for msg := range here {

		msgs--
		godbc.Check(msg.Err == nil, msg)
		godbc.Check(msgs >= 0, msgs)

		if msgs == 0 {
			return
		}
	}
}
Beispiel #6
0
func TestLogMultiBlock(t *testing.T) {

	// 256 blocks available in the log
	seeklen := int64(256 * 4096)

	// Setup Mockfile
	mockfile := tests.NewMockFile()
	mockfile.MockSeek = func(offset int64, whence int) (int64, error) {
		return seeklen, nil
	}

	mock_byteswritten := 0
	mock_written := 0
	mock_off_written := int64(0)
	continue_test := make(chan bool, 1)
	mockfile.MockWriteAt = func(p []byte, off int64) (n int, err error) {
		mock_written++
		mock_off_written = off
		mock_byteswritten += len(p)
		continue_test <- true
		return len(p), nil
	}

	mock_bytesread := 0
	mock_read := 0
	mock_off_read := int64(0)
	mockfile.MockReadAt = func(p []byte, off int64) (n int, err error) {
		mock_read++
		mock_off_read = off
		mock_bytesread += len(p)
		continue_test <- true
		return len(p), nil
	}

	// Mock openFile
	defer tests.Patch(&openFile,
		func(name string, flag int, perm os.FileMode) (Filer, error) {
			return mockfile, nil
		}).Restore()

	// Simple log
	l, blocks, err := NewLog("file", 4096, 4, 0, false)
	tests.Assert(t, err == nil)
	tests.Assert(t, l != nil)
	tests.Assert(t, blocks == 256)
	l.Start()

	// Send 8 blocks
	here := make(chan *message.Message)
	m := message.NewMsgPut()
	iopkt := m.IoPkt()
	m.RetChan = here
	iopkt.Buffer = make([]byte, 8*4096)
	iopkt.Blocks = 8

	for block := uint32(0); block < iopkt.Blocks; block++ {
		child := message.NewMsgPut()
		m.Add(child)

		child_io := child.IoPkt()
		child_io.Address = iopkt.Address + uint64(block)
		child_io.Buffer = SubBlockBuffer(iopkt.Buffer, 4096, block, 1)
		child_io.LogBlock = block
		child_io.Blocks = 1

		l.Msgchan <- child
	}

	m.Done()
	<-here
	<-continue_test

	tests.Assert(t, mock_byteswritten == 4*4096)
	tests.Assert(t, mock_written == 1)
	tests.Assert(t, mock_off_written == 0)
	tests.Assert(t, mock_read == 0)
	tests.Assert(t, len(continue_test) == 0)

	// At this point we have 4 blocks written to the log storage
	// and 4 blocks in the current segment.

	// Read log blocks 0-3
	m = message.NewMsgGet()
	iopkt = m.IoPkt()
	m.RetChan = here
	iopkt.Blocks = 4
	iopkt.Buffer = make([]byte, 4*4096)
	iopkt.LogBlock = 0

	mock_written = 0
	mock_byteswritten = 0
	l.Msgchan <- m
	<-here
	<-continue_test

	tests.Assert(t, mock_byteswritten == 0)
	tests.Assert(t, mock_written == 0)
	tests.Assert(t, mock_read == 1)
	tests.Assert(t, mock_bytesread == 4*4096)
	tests.Assert(t, mock_off_read == 0)
	tests.Assert(t, len(continue_test) == 0)

	// Now read log blocks 1,2,3,4,5.  Blocks 1,2,3 will be on the storage
	// device, and blocks 4,5 will be in ram
	m = message.NewMsgGet()
	iopkt = m.IoPkt()
	m.RetChan = here
	iopkt.Blocks = 5
	iopkt.Buffer = make([]byte, 5*4096)
	iopkt.LogBlock = 1

	mock_written = 0
	mock_byteswritten = 0
	mock_bytesread = 0
	mock_read = 0
	l.Msgchan <- m
	<-here
	<-continue_test

	tests.Assert(t, mock_byteswritten == 0)
	tests.Assert(t, mock_written == 0)
	tests.Assert(t, mock_read == 1)
	tests.Assert(t, mock_bytesread == 3*4096)
	tests.Assert(t, mock_off_read == 1*4096)
	tests.Assert(t, len(continue_test) == 0)

	// Cleanup
	l.Close()
}
Beispiel #7
0
func TestLogConcurrency(t *testing.T) {
	// Simple log
	blocks := uint32(240)
	bs := uint32(4096)
	blocks_per_segment := uint32(2)
	buffercache := uint32(4096 * 24)
	testcachefile := tests.Tempfile()
	tests.Assert(t, nil == tests.CreateFile(testcachefile, int64(blocks*4096)))
	defer os.Remove(testcachefile)
	l, logblocks, err := NewLog(testcachefile,
		bs,
		blocks_per_segment,
		buffercache,
		false)
	tests.Assert(t, err == nil)
	tests.Assert(t, l != nil)
	tests.Assert(t, blocks == logblocks)
	l.Start()

	here := make(chan *message.Message)

	// Fill the log
	for io := uint32(0); io < blocks; io++ {
		buf := make([]byte, 4096)
		buf[0] = byte(io)

		msg := message.NewMsgPut()
		msg.RetChan = here

		iopkt := msg.IoPkt()
		iopkt.Buffer = buf
		iopkt.LogBlock = io

		l.Msgchan <- msg
		<-here
	}

	var wgIo, wgRet sync.WaitGroup

	// Start up response server
	returnch := make(chan *message.Message, 100)
	quit := make(chan struct{})
	wgRet.Add(1)
	go logtest_response_handler(t, &wgRet, quit, returnch)

	// Create 100 readers
	for i := 0; i < 100; i++ {
		wgIo.Add(1)
		go func() {
			defer wgIo.Done()
			r := rand.New(rand.NewSource(time.Now().UnixNano()))

			// Each client to send 1k IOs
			for io := 0; io < 1000; io++ {
				msg := message.NewMsgGet()
				iopkt := msg.IoPkt()
				iopkt.Buffer = make([]byte, bs)

				// Maximum "disk" size is 10 times bigger than cache
				iopkt.LogBlock = uint32(r.Int31n(int32(blocks)))
				msg.RetChan = returnch

				// Send request
				msg.TimeStart()
				l.Msgchan <- msg

				// Simulate waiting for more work by sleeping
				// anywhere from 100usecs to 10ms
				time.Sleep(time.Microsecond * time.Duration((r.Intn(10000) + 100)))
			}
		}()
	}

	// Write to the log while the readers are reading
	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	for wrap := 0; wrap < 30; wrap++ {
		for io := uint32(0); io < blocks; io++ {
			buf := make([]byte, 4096)
			buf[0] = byte(io)

			msg := message.NewMsgPut()
			msg.RetChan = returnch

			iopkt := msg.IoPkt()
			iopkt.Buffer = buf
			iopkt.LogBlock = io

			msg.TimeStart()
			l.Msgchan <- msg
			time.Sleep(time.Microsecond * time.Duration((r.Intn(1000) + 100)))
		}
	}

	// Wait for all clients to finish
	wgIo.Wait()

	// Send receiver a message that all clients have shut down
	close(quit)

	// Wait for receiver to finish emptying its channel
	wgRet.Wait()

	// Cleanup
	fmt.Print(l)
	l.Close()
	os.Remove(testcachefile)

}
Beispiel #8
0
func TestReadCorrectness(t *testing.T) {
	// Simple log
	blocks := uint32(240)
	bs := uint32(4096)
	blocks_per_segment := uint32(2)
	buffercache := uint32(4096 * 10)
	testcachefile := tests.Tempfile()
	tests.Assert(t, nil == tests.CreateFile(testcachefile, int64(blocks*4096)))
	defer os.Remove(testcachefile)
	l, logblocks, err := NewLog(testcachefile,
		bs,
		blocks_per_segment,
		buffercache,
		false)
	tests.Assert(t, err == nil)
	tests.Assert(t, l != nil)
	tests.Assert(t, blocks == logblocks)
	l.Start()

	here := make(chan *message.Message)

	// Write enough blocks in the log to reach
	// the end.
	for io := uint32(0); io < blocks; io++ {
		buf := make([]byte, 4096)

		// Save the block number in the buffer
		// so that we can check it later.  For simplicity
		// we have made sure the block number is only
		// one byte.
		buf[0] = byte(io)

		msg := message.NewMsgPut()
		msg.RetChan = here

		iopkt := msg.IoPkt()
		iopkt.Buffer = buf
		iopkt.LogBlock = io

		l.Msgchan <- msg
		<-here
	}
	buf := make([]byte, 4096)
	msg := message.NewMsgGet()
	msg.RetChan = here

	iopkt := msg.IoPkt()
	iopkt.Buffer = buf
	iopkt.LogBlock = blocks - 1

	l.Msgchan <- msg
	<-here

	tests.Assert(t, buf[0] == uint8(blocks-1))

	for io := uint32(0); io < blocks; io++ {
		buf := make([]byte, 4096)
		msg := message.NewMsgGet()
		msg.RetChan = here

		iopkt := msg.IoPkt()
		iopkt.Buffer = buf
		iopkt.LogBlock = io
		l.Msgchan <- msg

		// Wait here for the response
		<-here

		// Check the block number is correct
		tests.Assert(t, buf[0] == uint8(io))
	}

	l.Close()
}
Beispiel #9
0
func cacheio(t *testing.T, c *cache.CacheMap, log *cache.Log,
	actual_blocks, blocksize uint32) {
	var wgIo, wgRet sync.WaitGroup

	// Start up response server
	returnch := make(chan *message.Message, 100)
	wgRet.Add(1)
	go response_handler(t, &wgRet, returnch)

	// Create a parent message for all messages to notify
	// when they have been completed.
	messages := &message.Message{}
	messages_done := make(chan *message.Message)
	messages.RetChan = messages_done

	// Create 100 clients
	for i := 0; i < 100; i++ {
		wgIo.Add(1)
		go func() {
			defer wgIo.Done()
			z := zipf.NewZipfWorkload(uint64(actual_blocks)*10, 60)
			r := rand.New(rand.NewSource(time.Now().UnixNano()))

			// Each client to send 5k IOs
			for io := 0; io < 5000; io++ {
				var msg *message.Message
				offset, isread := z.ZipfGenerate()

				if isread {
					msg = message.NewMsgGet()
				} else {
					// On a write the client would first
					// invalidate the block, write the data to the
					// storage device, then place it in the cache
					iopkt := &message.IoPkt{
						Address: offset,
						Blocks:  1,
					}
					c.Invalidate(iopkt)

					// Simulate waiting for storage device to write data
					time.Sleep(time.Microsecond * time.Duration((r.Intn(100))))

					// Now, we can do a put
					msg = message.NewMsgPut()
				}

				messages.Add(msg)
				iopkt := msg.IoPkt()
				iopkt.Buffer = make([]byte, blocksize)
				iopkt.Address = offset
				msg.RetChan = returnch

				msg.TimeStart()

				// Write the offset into the buffer so that we can
				// check it on reads.
				if !isread {
					bio := bufferio.NewBufferIO(iopkt.Buffer)
					bio.WriteDataLE(offset)
					c.Put(msg)
				} else {
					_, err := c.Get(msg)
					if err != nil {
						msg.Err = err
						msg.Done()
					}
				}

				// Maximum "disk" size is 10 times bigger than cache

				// Send request

				// Simulate waiting for more work by sleeping
				time.Sleep(time.Microsecond * time.Duration((r.Intn(100))))
			}
		}()

	}

	// Wait for all clients to finish
	wgIo.Wait()

	// Wait for all messages to finish
	messages.Done()
	<-messages_done

	// Print stats
	fmt.Print(c)
	fmt.Print(log)

	// Close cache and log
	c.Close()
	log.Close()

	stats := log.Stats()
	Assert(t, stats.Seg_skipped == 0)

	// Send receiver a message that all clients have shut down
	close(returnch)

	// Wait for receiver to finish emptying its channel
	wgRet.Wait()

}
Beispiel #10
0
func TestCacheMapSimple(t *testing.T) {
	mocklog := make(chan *message.Message)

	// This service will run in its own goroutine
	// and send to mocklog any messages
	pipeline := message.NewNullPipeline(mocklog)
	pipeline.Start()
	defer pipeline.Close()

	c := NewCacheMap(8, 4096, pipeline.In)
	tests.Assert(t, c != nil)

	here := make(chan *message.Message)
	buffer := make([]byte, 4096)
	m := message.NewMsgPut()
	m.RetChan = here
	io := m.IoPkt()
	io.Buffer = buffer
	io.Address = 1
	m.Priv = c

	// First Put
	err := c.Put(m)
	tests.Assert(t, err == nil)

	logmsg := <-mocklog
	logio := logmsg.IoPkt()
	tests.Assert(t, m.Type == logmsg.Type)
	tests.Assert(t, logmsg.Priv.(*CacheMap) == c)
	tests.Assert(t, io.Blocks == logio.Blocks)
	tests.Assert(t, io.Address == logio.Address)
	tests.Assert(t, logio.LogBlock == 0)
	logmsg.Done()

	returnedmsg := <-here
	rio := returnedmsg.IoPkt()
	tests.Assert(t, m.Type == returnedmsg.Type)
	tests.Assert(t, returnedmsg.Priv.(*CacheMap) == c)
	tests.Assert(t, io.Blocks == rio.Blocks)
	tests.Assert(t, io.Address == rio.Address)
	tests.Assert(t, c.stats.insertions == 1)
	tests.Assert(t, returnedmsg.Err == nil)

	val, ok := c.addressmap[io.Address]
	tests.Assert(t, val == 0)
	tests.Assert(t, ok == true)

	// Check that we cannot resend this message
	err = c.Put(m)
	tests.Assert(t, err == message.ErrMessageUsed)

	// Insert again.  Should allocate
	// next block
	m = message.NewMsgPut()
	m.RetChan = here
	io = m.IoPkt()
	io.Buffer = buffer
	io.Address = 1
	m.Priv = c
	err = c.Put(m)
	tests.Assert(t, err == nil)

	logmsg = <-mocklog
	logio = logmsg.IoPkt()
	tests.Assert(t, logio.LogBlock == 1)
	logmsg.Done()

	returnedmsg = <-here
	rio = returnedmsg.IoPkt()
	tests.Assert(t, returnedmsg.Err == nil)
	tests.Assert(t, c.stats.insertions == 2)

	val, ok = c.addressmap[io.Address]
	tests.Assert(t, val == 1)
	tests.Assert(t, ok == true)

	// Send a Get
	mg := message.NewMsgGet()
	io = mg.IoPkt()
	io.Address = 1
	io.Buffer = buffer
	mg.RetChan = here

	hitmap, err := c.Get(mg)
	tests.Assert(t, err == nil)
	tests.Assert(t, hitmap.Hits == 1)
	tests.Assert(t, hitmap.Hitmap[0] == true)
	tests.Assert(t, hitmap.Hits == io.Blocks)

	logmsg = <-mocklog
	logio = logmsg.IoPkt()
	tests.Assert(t, logio.LogBlock == 1)
	logmsg.Done()

	returnedmsg = <-here
	io = returnedmsg.IoPkt()
	tests.Assert(t, returnedmsg.Err == nil)
	tests.Assert(t, c.stats.insertions == 2)
	tests.Assert(t, c.stats.readhits == 1)
	tests.Assert(t, c.stats.reads == 1)

	// Test we cannot send the same message
	hitmap, err = c.Get(mg)
	tests.Assert(t, err == message.ErrMessageUsed)
	tests.Assert(t, hitmap == nil)

	// Send Invalidate
	iopkt := &message.IoPkt{}
	iopkt.Address = 1
	iopkt.Blocks = 1
	c.Invalidate(iopkt)
	tests.Assert(t, c.stats.insertions == 2)
	tests.Assert(t, c.stats.readhits == 1)
	tests.Assert(t, c.stats.reads == 1)
	tests.Assert(t, c.stats.invalidations == 1)
	tests.Assert(t, c.stats.invalidatehits == 1)

	// Send Invalidate
	iopkt = &message.IoPkt{}
	iopkt.Address = 1
	iopkt.Blocks = 1
	c.Invalidate(iopkt)
	tests.Assert(t, c.stats.insertions == 2)
	tests.Assert(t, c.stats.readhits == 1)
	tests.Assert(t, c.stats.reads == 1)
	tests.Assert(t, c.stats.invalidations == 2)
	tests.Assert(t, c.stats.invalidatehits == 1)

	// Send a Get again, but it should not be there
	mg = message.NewMsgGet()
	io = mg.IoPkt()
	io.Address = 1
	io.Buffer = buffer
	mg.RetChan = here
	hitmap, err = c.Get(mg)
	tests.Assert(t, err == ErrNotFound)
	tests.Assert(t, hitmap == nil)
	tests.Assert(t, c.stats.insertions == 2)
	tests.Assert(t, c.stats.readhits == 1)
	tests.Assert(t, c.stats.reads == 2)
	tests.Assert(t, c.stats.invalidations == 2)
	tests.Assert(t, c.stats.invalidatehits == 1)

	// Check the stats
	stats := c.Stats()
	tests.Assert(t, stats.Readhits == c.stats.readhits)
	tests.Assert(t, stats.Invalidatehits == c.stats.invalidatehits)
	tests.Assert(t, stats.Reads == c.stats.reads)
	tests.Assert(t, stats.Evictions == c.stats.evictions)
	tests.Assert(t, stats.Invalidations == c.stats.invalidations)
	tests.Assert(t, stats.Insertions == c.stats.insertions)

	// Clear the stats
	c.StatsClear()
	tests.Assert(t, 0 == c.stats.readhits)
	tests.Assert(t, 0 == c.stats.invalidatehits)
	tests.Assert(t, 0 == c.stats.reads)
	tests.Assert(t, 0 == c.stats.evictions)
	tests.Assert(t, 0 == c.stats.invalidations)
	tests.Assert(t, 0 == c.stats.insertions)

	c.Close()
}
Beispiel #11
0
func TestCacheMapConcurrency(t *testing.T) {
	var wgIo, wgRet sync.WaitGroup

	nc := message.NewNullTerminator()
	nc.Start()
	defer nc.Close()

	c := NewCacheMap(300, 4096, nc.In)

	// Start up response server
	returnch := make(chan *message.Message, 100)
	quit := make(chan struct{})
	wgRet.Add(1)
	go response_handler(&wgRet, quit, returnch)

	// Create 100 clients
	for i := 0; i < 100; i++ {
		wgIo.Add(1)
		go func() {
			defer wgIo.Done()
			r := rand.New(rand.NewSource(time.Now().UnixNano()))

			// Each client to send 1k IOs
			for io := 0; io < 1000; io++ {
				var msg *message.Message
				switch r.Intn(2) {
				case 0:
					msg = message.NewMsgGet()
				case 1:
					msg = message.NewMsgPut()
				}
				iopkt := msg.IoPkt()
				iopkt.Buffer = make([]byte, 4096)

				// Maximum "disk" size is 10 times bigger than cache
				iopkt.Address = uint64(r.Int63n(3000))
				msg.RetChan = returnch

				// Send request
				msg.TimeStart()

				switch msg.Type {
				case message.MsgGet:
					c.Get(msg)
				case message.MsgPut:
					c.Invalidate(iopkt)
					c.Put(msg)
				}

				// Simulate waiting for more work by sleeping
				// anywhere from 100usecs to 10ms
				time.Sleep(time.Microsecond * time.Duration((r.Intn(10000) + 100)))
			}
		}()

	}

	// Wait for all clients to finish
	wgIo.Wait()

	// Send receiver a message that all clients have shut down
	fmt.Print(c)
	c.Close()
	close(quit)

	// Wait for receiver to finish emptying its channel
	wgRet.Wait()
}
Beispiel #12
0
// This test will check that the cache tries to place
// as many contigous blocks as possible. We will initialize
// the 8 slot cache with four slots, then remove slot 1 and 2
// to leave the following: [X__X____]
// When we put a message with 6 blocks the cache should be
// populated as follows:  [X45X01234]
//
// At the end, check multiblock Get()
//
func TestCacheMapMultiblock(t *testing.T) {
	// This service will run in its own goroutine
	// and send to mocklog any messages
	mocklog := make(chan *message.Message)
	pipe := message.NewNullPipeline(mocklog)
	pipe.Start()
	defer pipe.Close()

	c := NewCacheMap(8, 4096, pipe.In)
	tests.Assert(t, c != nil)

	here := make(chan *message.Message)
	buffer := make([]byte, 4096)

	// Initialize data in cache
	for i := uint64(0); i < 4; i++ {
		m := message.NewMsgPut()
		m.RetChan = here
		io := m.IoPkt()
		io.Buffer = buffer
		io.Address = i

		// First Put
		err := c.Put(m)
		tests.Assert(t, err == nil)
		retmsg := <-mocklog
		retmsg.Done()
		<-here
	}

	c.Invalidate(&message.IoPkt{Address: 1, Blocks: 2})
	tests.Assert(t, c.stats.insertions == 4)
	tests.Assert(t, c.stats.invalidatehits == 2)
	tests.Assert(t, c.bda.bds[0].used == true)
	tests.Assert(t, c.bda.bds[0].key == 0)
	tests.Assert(t, c.bda.bds[1].used == false)
	tests.Assert(t, c.bda.bds[2].used == false)
	tests.Assert(t, c.bda.bds[3].used == true)
	tests.Assert(t, c.bda.bds[3].key == 3)

	// Set the clock so they do not get erased
	c.bda.bds[0].clock_set = true
	c.bda.bds[3].clock_set = true

	// Insert multiblock
	largebuffer := make([]byte, 6*4096)
	m := message.NewMsgPut()
	m.RetChan = here
	io := m.IoPkt()
	io.Buffer = largebuffer
	io.Address = 10
	io.Blocks = 6

	// First Put
	err := c.Put(m)
	tests.Assert(t, err == nil)
	for i := uint32(0); i < io.Blocks; i++ {
		// Put send a message for each block
		retmsg := <-mocklog
		retmsg.Done()
	}
	<-here

	tests.Assert(t, c.stats.insertions == 10)
	tests.Assert(t, c.stats.invalidatehits == 2)

	// Check the two blocks left from before
	tests.Assert(t, c.bda.bds[0].used == true)
	tests.Assert(t, c.bda.bds[0].key == 0)
	tests.Assert(t, c.bda.bds[0].clock_set == false)

	tests.Assert(t, c.bda.bds[3].used == true)
	tests.Assert(t, c.bda.bds[3].key == 3)
	tests.Assert(t, c.bda.bds[3].clock_set == true)

	// Now check the blocks we inserted
	tests.Assert(t, c.bda.bds[4].used == true)
	tests.Assert(t, c.bda.bds[4].key == 10)
	tests.Assert(t, c.bda.bds[4].clock_set == false)

	tests.Assert(t, c.bda.bds[5].used == true)
	tests.Assert(t, c.bda.bds[5].key == 11)
	tests.Assert(t, c.bda.bds[5].clock_set == false)

	tests.Assert(t, c.bda.bds[6].used == true)
	tests.Assert(t, c.bda.bds[6].key == 12)
	tests.Assert(t, c.bda.bds[6].clock_set == false)

	tests.Assert(t, c.bda.bds[7].used == true)
	tests.Assert(t, c.bda.bds[7].key == 13)
	tests.Assert(t, c.bda.bds[7].clock_set == false)

	tests.Assert(t, c.bda.bds[1].used == true)
	tests.Assert(t, c.bda.bds[1].key == 14)
	tests.Assert(t, c.bda.bds[1].clock_set == false)

	tests.Assert(t, c.bda.bds[2].used == true)
	tests.Assert(t, c.bda.bds[2].key == 15)
	tests.Assert(t, c.bda.bds[2].clock_set == false)

	// Check for a block not in the cache
	m = message.NewMsgGet()
	m.RetChan = here
	io = m.IoPkt()
	io.Buffer = buffer
	io.Address = 20
	io.Blocks = 1
	hitmap, err := c.Get(m)
	tests.Assert(t, err == ErrNotFound)
	tests.Assert(t, hitmap == nil)
	tests.Assert(t, len(here) == 0)

	// Get offset 0, 4 blocks.  It should return
	// a bit map of [1001]
	buffer4 := make([]byte, 4*4096)
	m = message.NewMsgGet()
	m.RetChan = here
	io = m.IoPkt()
	io.Buffer = buffer4
	io.Address = 0
	io.Blocks = 4

	hitmap, err = c.Get(m)
	tests.Assert(t, err == nil)
	tests.Assert(t, hitmap.Hits == 2)
	tests.Assert(t, len(hitmap.Hitmap) == int(io.Blocks))
	tests.Assert(t, hitmap.Hitmap[0] == true)
	tests.Assert(t, hitmap.Hitmap[1] == false)
	tests.Assert(t, hitmap.Hitmap[2] == false)
	tests.Assert(t, hitmap.Hitmap[3] == true)
	for i := 0; i < 2; i++ {
		// Get sends a get for each contiguous blocks
		retmsg := <-mocklog
		retmsg.Done()
	}
	<-here

	// Get the 6 blocks we inserted previously.  This
	// should show that there are two sets of continguous
	// blocks
	m = message.NewMsgGet()
	m.RetChan = here
	io = m.IoPkt()
	io.Buffer = largebuffer
	io.Address = 10
	io.Blocks = 6

	hitmap, err = c.Get(m)
	tests.Assert(t, err == nil)
	tests.Assert(t, hitmap.Hits == 6)
	tests.Assert(t, len(hitmap.Hitmap) == int(io.Blocks))
	tests.Assert(t, hitmap.Hitmap[0] == true)
	tests.Assert(t, hitmap.Hitmap[1] == true)
	tests.Assert(t, hitmap.Hitmap[2] == true)
	tests.Assert(t, hitmap.Hitmap[3] == true)
	tests.Assert(t, hitmap.Hitmap[4] == true)
	tests.Assert(t, hitmap.Hitmap[5] == true)

	// The first message to the log
	retmsg := <-mocklog
	retio := retmsg.IoPkt()
	tests.Assert(t, retmsg.RetChan == nil)
	tests.Assert(t, retio.Address == 10)
	tests.Assert(t, retio.LogBlock == 4)
	tests.Assert(t, retio.Blocks == 4)
	retmsg.Done()

	// Second message will have the rest of the contigous block
	retmsg = <-mocklog
	retio = retmsg.IoPkt()
	tests.Assert(t, retmsg.RetChan == nil)
	tests.Assert(t, retio.Address == 14)
	tests.Assert(t, retio.LogBlock == 1)
	tests.Assert(t, retio.Blocks == 2)
	retmsg.Done()

	<-here

	// Save the cache metadata
	save := tests.Tempfile()
	defer os.Remove(save)
	err = c.Save(save, nil)
	tests.Assert(t, err == nil)

	c.Close()
	c = NewCacheMap(8, 4096, pipe.In)
	tests.Assert(t, c != nil)

	err = c.Load(save, nil)
	tests.Assert(t, err == nil)

	// Get data again.
	m = message.NewMsgGet()
	m.RetChan = here
	io = m.IoPkt()
	io.Buffer = largebuffer
	io.Address = 10
	io.Blocks = 6

	hitmap, err = c.Get(m)
	tests.Assert(t, err == nil)
	tests.Assert(t, hitmap.Hits == 6)
	tests.Assert(t, len(hitmap.Hitmap) == int(io.Blocks))
	tests.Assert(t, hitmap.Hitmap[0] == true)
	tests.Assert(t, hitmap.Hitmap[1] == true)
	tests.Assert(t, hitmap.Hitmap[2] == true)
	tests.Assert(t, hitmap.Hitmap[3] == true)
	tests.Assert(t, hitmap.Hitmap[4] == true)
	tests.Assert(t, hitmap.Hitmap[5] == true)

	// The first message to the log
	retmsg = <-mocklog
	retio = retmsg.IoPkt()
	tests.Assert(t, retmsg.RetChan == nil)
	tests.Assert(t, retio.Address == 10)
	tests.Assert(t, retio.LogBlock == 4)
	tests.Assert(t, retio.Blocks == 4)
	retmsg.Done()

	// Second message will have the rest of the contigous block
	retmsg = <-mocklog
	retio = retmsg.IoPkt()
	tests.Assert(t, retmsg.RetChan == nil)
	tests.Assert(t, retio.Address == 14)
	tests.Assert(t, retio.LogBlock == 1)
	tests.Assert(t, retio.Blocks == 2)
	retmsg.Done()

	<-here

	c.Close()
}