Esempio n. 1
0
func write(fp io.WriterAt,
	c *cache.CacheMap,
	devid, offset, blocks uint32,
	buffer []byte) {

	here := make(chan *message.Message, blocks)
	cacheoffset := cache.Address64(cache.Address{Devid: devid, Block: offset})

	// Send invalidates for each block
	iopkt := &message.IoPkt{
		Address: cacheoffset,
		Blocks:  blocks,
	}
	c.Invalidate(iopkt)

	// Write to storage back end
	// :TODO: check return status
	fp.WriteAt(buffer, int64(offset)*4*KB)

	// Now write to cache
	msg := message.NewMsgPut()
	msg.RetChan = here
	iopkt = msg.IoPkt()
	iopkt.Blocks = blocks
	iopkt.Address = cacheoffset
	iopkt.Buffer = buffer
	c.Put(msg)

	<-here
}
Esempio n. 2
0
func readandstore(fp io.ReaderAt,
	c *cache.CacheMap,
	devid, offset, blocks uint32,
	buffer []byte,
	retchan chan *message.Message) {

	fp.ReadAt(buffer, int64(offset)*4*KB)

	m := message.NewMsgPut()
	m.RetChan = retchan
	io := m.IoPkt()
	io.Address = cache.Address64(cache.Address{Devid: devid, Block: offset})
	io.Buffer = buffer
	io.Blocks = blocks

	c.Put(m)
}
Esempio n. 3
0
func read(fp io.ReaderAt,
	c *cache.CacheMap,
	devid, offset, blocks uint32,
	buffer []byte) {

	godbc.Require(len(buffer)%(4*KB) == 0)

	here := make(chan *message.Message, blocks)
	cacheoffset := cache.Address64(cache.Address{Devid: devid, Block: offset})
	msg := message.NewMsgGet()
	msg.RetChan = here
	iopkt := msg.IoPkt()
	iopkt.Buffer = buffer
	iopkt.Address = cacheoffset
	iopkt.Blocks = blocks

	msgs := 0
	hitpkt, err := c.Get(msg)
	if err != nil {
		//fmt.Printf("|blocks:%d::hits:0--", blocks)
		// None found
		// Read the whole thing from backend
		fp.ReadAt(buffer, int64(offset)*4*KB)

		m := message.NewMsgPut()
		m.RetChan = here

		io := m.IoPkt()
		io.Address = cacheoffset
		io.Buffer = buffer
		io.Blocks = blocks
		c.Put(m)
		msgs++

	} else if hitpkt.Hits != blocks {
		//fmt.Printf("|******blocks:%d::hits:%d--", blocks, hitpkt.Hits)
		// Read from storage the ones that did not have
		// in the hit map.
		var be_offset, be_block, be_blocks uint32
		var be_read_ready = false
		for block := uint32(0); block < blocks; block++ {
			if !hitpkt.Hitmap[int(block)] {
				if be_read_ready {
					be_blocks++
				} else {
					be_read_ready = true
					be_offset = offset + block
					be_block = block
					be_blocks++
				}
			} else {
				if be_read_ready {
					// Send read
					msgs++
					go readandstore(fp, c, devid,
						be_offset,
						be_blocks,
						cache.SubBlockBuffer(buffer, 4*KB, be_block, be_blocks),
						here)
					be_read_ready = false
					be_blocks = 0
					be_offset = 0
					be_block = 0
				}
			}
		}
		if be_read_ready {
			msgs++
			go readandstore(fp, c, devid,
				be_offset,
				be_blocks,
				cache.SubBlockBuffer(buffer, 4*KB, be_block, be_blocks),
				here)
		}

	} else {
		msgs = 1
	}

	// Wait for blocks to be returned
	for msg := range here {

		msgs--
		godbc.Check(msg.Err == nil, msg)
		godbc.Check(msgs >= 0, msgs)

		if msgs == 0 {
			return
		}
	}
}
Esempio n. 4
0
func cacheio(t *testing.T, c *cache.CacheMap, log *cache.Log,
	actual_blocks, blocksize uint32) {
	var wgIo, wgRet sync.WaitGroup

	// Start up response server
	returnch := make(chan *message.Message, 100)
	wgRet.Add(1)
	go response_handler(t, &wgRet, returnch)

	// Create a parent message for all messages to notify
	// when they have been completed.
	messages := &message.Message{}
	messages_done := make(chan *message.Message)
	messages.RetChan = messages_done

	// Create 100 clients
	for i := 0; i < 100; i++ {
		wgIo.Add(1)
		go func() {
			defer wgIo.Done()
			z := zipf.NewZipfWorkload(uint64(actual_blocks)*10, 60)
			r := rand.New(rand.NewSource(time.Now().UnixNano()))

			// Each client to send 5k IOs
			for io := 0; io < 5000; io++ {
				var msg *message.Message
				offset, isread := z.ZipfGenerate()

				if isread {
					msg = message.NewMsgGet()
				} else {
					// On a write the client would first
					// invalidate the block, write the data to the
					// storage device, then place it in the cache
					iopkt := &message.IoPkt{
						Address: offset,
						Blocks:  1,
					}
					c.Invalidate(iopkt)

					// Simulate waiting for storage device to write data
					time.Sleep(time.Microsecond * time.Duration((r.Intn(100))))

					// Now, we can do a put
					msg = message.NewMsgPut()
				}

				messages.Add(msg)
				iopkt := msg.IoPkt()
				iopkt.Buffer = make([]byte, blocksize)
				iopkt.Address = offset
				msg.RetChan = returnch

				msg.TimeStart()

				// Write the offset into the buffer so that we can
				// check it on reads.
				if !isread {
					bio := bufferio.NewBufferIO(iopkt.Buffer)
					bio.WriteDataLE(offset)
					c.Put(msg)
				} else {
					_, err := c.Get(msg)
					if err != nil {
						msg.Err = err
						msg.Done()
					}
				}

				// Maximum "disk" size is 10 times bigger than cache

				// Send request

				// Simulate waiting for more work by sleeping
				time.Sleep(time.Microsecond * time.Duration((r.Intn(100))))
			}
		}()

	}

	// Wait for all clients to finish
	wgIo.Wait()

	// Wait for all messages to finish
	messages.Done()
	<-messages_done

	// Print stats
	fmt.Print(c)
	fmt.Print(log)

	// Close cache and log
	c.Close()
	log.Close()

	stats := log.Stats()
	Assert(t, stats.Seg_skipped == 0)

	// Send receiver a message that all clients have shut down
	close(returnch)

	// Wait for receiver to finish emptying its channel
	wgRet.Wait()

}