Esempio n. 1
0
func (c *Log) put(msg *message.Message) error {

	iopkt := msg.IoPkt()
	godbc.Require(iopkt.LogBlock < c.blocks)

	// Make sure the block number curresponds to the
	// current segment.  If not, c.sync() will place
	// the next available segment into c.segment
	for !c.inRange(iopkt.LogBlock, c.segment) {
		c.sync()
	}

	// get log offset
	offset := c.offset(iopkt.LogBlock)

	// Write to current buffer
	n, err := c.segment.data.WriteAt(iopkt.Buffer, offset-c.segment.offset)
	godbc.Check(n == len(iopkt.Buffer))
	godbc.Check(err == nil)

	c.segment.written = true

	// We have written the data, and we are done with the message
	msg.Done()

	return err
}
Esempio n. 2
0
func (c *CacheMap) create_get_submsg(msg *message.Message,
	address uint64, logblock uint32,
	buffer []byte) *message.Message {

	m := message.NewMsgGet()
	msg.Add(m)

	// Set IoPkt
	mio := m.IoPkt()
	mio.Address = address
	mio.Buffer = buffer
	mio.LogBlock = logblock

	return m
}
Esempio n. 3
0
func (c *CacheMap) Put(msg *message.Message) error {

	err := msg.Check()
	if err != nil {
		return err
	}

	c.lock.Lock()
	defer c.lock.Unlock()

	io := msg.IoPkt()
	if io.Blocks > 1 {
		// Have parent message wait for its children
		defer msg.Done()

		//
		// It does not matter that we send small blocks to the Log, since
		// it will buffer them before sending them out to the cache device
		//
		// We do need to send each one sperately now so that the cache
		// policy hopefully aligns them one after the other.
		//
		for block := uint32(0); block < io.Blocks; block++ {
			child := message.NewMsgPut()
			msg.Add(child)

			child_io := child.IoPkt()
			child_io.Address = io.Address + uint64(block)
			child_io.Buffer = SubBlockBuffer(io.Buffer, c.blocksize, block, 1)
			child_io.LogBlock = c.put(child_io.Address)
			child_io.Blocks = 1

			// Send to next one in line
			c.pipeline <- child
		}
	} else {
		io.LogBlock = c.put(io.Address)
		c.pipeline <- msg
	}

	return nil
}
Esempio n. 4
0
func (c *CacheMap) Get(msg *message.Message) (*HitmapPkt, error) {

	err := msg.Check()
	if err != nil {
		return nil, err
	}

	c.lock.Lock()
	defer c.lock.Unlock()

	io := msg.IoPkt()
	hitmap := make([]bool, io.Blocks)
	hits := uint32(0)

	// Create a message
	var m *message.Message
	var mblock uint32

	for block := uint32(0); block < io.Blocks; block++ {
		// Get
		current_address := io.Address + uint64(block)
		if index, ok := c.get(current_address); ok {
			hitmap[block] = true
			hits++

			// Check if we already have a message ready
			if m == nil {

				// This is the first message, so let's set it up
				m = c.create_get_submsg(msg,
					current_address,
					index,
					SubBlockBuffer(io.Buffer, c.blocksize, block, 1))
				mblock = block
			} else {
				// Let's check what block we are using starting from the block
				// setup by the message
				numblocks := block - mblock

				// If the next block is available on the log after this block, then
				// we can optimize the read by reading a larger amount from the log.
				if m.IoPkt().LogBlock+numblocks == index && hitmap[block-1] == true {
					// It is the next in both the cache and storage device
					mio := m.IoPkt()
					mio.Blocks++
					mio.Buffer = SubBlockBuffer(io.Buffer, c.blocksize, mblock, mio.Blocks)
				} else {
					// Send the previous one
					c.pipeline <- m

					// This is the first message, so let's set it up
					m = c.create_get_submsg(msg,
						current_address,
						index,
						SubBlockBuffer(io.Buffer, c.blocksize, block, 1))
					mblock = block
				}

			}
		}
	}

	// Check if we have one more message
	if m != nil {
		c.pipeline <- m
	}
	if hits > 0 {
		hitmappkt := &HitmapPkt{
			Hitmap: hitmap,
			Hits:   hits,
		}
		msg.Done()
		return hitmappkt, nil
	} else {
		return nil, ErrNotFound
	}
}
Esempio n. 5
0
func (c *Log) get(msg *message.Message) error {

	var n int
	var err error

	defer msg.Done()
	iopkt := msg.IoPkt()

	var readmsg *message.Message
	var readmsg_block uint32
	for block := uint32(0); block < iopkt.Blocks; block++ {
		ramhit := false
		index := iopkt.LogBlock + block
		offset := c.offset(index)

		// Check if the data is in RAM.  Go through each buffered segment
		for i := 0; i < c.segmentbuffers; i++ {

			c.segments[i].lock.RLock()
			if c.inRange(index, &c.segments[i]) {

				ramhit = true
				n, err = c.segments[i].data.ReadAt(SubBlockBuffer(iopkt.Buffer, c.blocksize, block, 1),
					offset-c.segments[i].offset)

				godbc.Check(err == nil, err, block, offset, i)
				godbc.Check(uint32(n) == c.blocksize)
				c.stats.RamHit()
			}
			c.segments[i].lock.RUnlock()
		}

		// We did not find it in ram, let's start making a message
		if !ramhit {
			if readmsg == nil {
				readmsg = message.NewMsgGet()
				msg.Add(readmsg)
				io := readmsg.IoPkt()
				io.LogBlock = index
				io.Blocks = 1
				readmsg_block = block
			} else {
				readmsg.IoPkt().Blocks++
			}

			io := readmsg.IoPkt()
			io.Buffer = SubBlockBuffer(iopkt.Buffer,
				c.blocksize,
				readmsg_block,
				io.Blocks)

		} else if readmsg != nil {
			// We have a pending message, but the
			// buffer block was not contiguous.
			c.logreaders <- readmsg
			readmsg = nil
		}
	}

	// Send pending read
	if readmsg != nil {
		c.logreaders <- readmsg
	}

	return nil
}
Esempio n. 6
0
func cacheio(t *testing.T, c *cache.CacheMap, log *cache.Log,
	actual_blocks, blocksize uint32) {
	var wgIo, wgRet sync.WaitGroup

	// Start up response server
	returnch := make(chan *message.Message, 100)
	wgRet.Add(1)
	go response_handler(t, &wgRet, returnch)

	// Create a parent message for all messages to notify
	// when they have been completed.
	messages := &message.Message{}
	messages_done := make(chan *message.Message)
	messages.RetChan = messages_done

	// Create 100 clients
	for i := 0; i < 100; i++ {
		wgIo.Add(1)
		go func() {
			defer wgIo.Done()
			z := zipf.NewZipfWorkload(uint64(actual_blocks)*10, 60)
			r := rand.New(rand.NewSource(time.Now().UnixNano()))

			// Each client to send 5k IOs
			for io := 0; io < 5000; io++ {
				var msg *message.Message
				offset, isread := z.ZipfGenerate()

				if isread {
					msg = message.NewMsgGet()
				} else {
					// On a write the client would first
					// invalidate the block, write the data to the
					// storage device, then place it in the cache
					iopkt := &message.IoPkt{
						Address: offset,
						Blocks:  1,
					}
					c.Invalidate(iopkt)

					// Simulate waiting for storage device to write data
					time.Sleep(time.Microsecond * time.Duration((r.Intn(100))))

					// Now, we can do a put
					msg = message.NewMsgPut()
				}

				messages.Add(msg)
				iopkt := msg.IoPkt()
				iopkt.Buffer = make([]byte, blocksize)
				iopkt.Address = offset
				msg.RetChan = returnch

				msg.TimeStart()

				// Write the offset into the buffer so that we can
				// check it on reads.
				if !isread {
					bio := bufferio.NewBufferIO(iopkt.Buffer)
					bio.WriteDataLE(offset)
					c.Put(msg)
				} else {
					_, err := c.Get(msg)
					if err != nil {
						msg.Err = err
						msg.Done()
					}
				}

				// Maximum "disk" size is 10 times bigger than cache

				// Send request

				// Simulate waiting for more work by sleeping
				time.Sleep(time.Microsecond * time.Duration((r.Intn(100))))
			}
		}()

	}

	// Wait for all clients to finish
	wgIo.Wait()

	// Wait for all messages to finish
	messages.Done()
	<-messages_done

	// Print stats
	fmt.Print(c)
	fmt.Print(log)

	// Close cache and log
	c.Close()
	log.Close()

	stats := log.Stats()
	Assert(t, stats.Seg_skipped == 0)

	// Send receiver a message that all clients have shut down
	close(returnch)

	// Wait for receiver to finish emptying its channel
	wgRet.Wait()

}
Esempio n. 7
0
func TestCacheMapConcurrency(t *testing.T) {
	var wgIo, wgRet sync.WaitGroup

	nc := message.NewNullTerminator()
	nc.Start()
	defer nc.Close()

	c := NewCacheMap(300, 4096, nc.In)

	// Start up response server
	returnch := make(chan *message.Message, 100)
	quit := make(chan struct{})
	wgRet.Add(1)
	go response_handler(&wgRet, quit, returnch)

	// Create 100 clients
	for i := 0; i < 100; i++ {
		wgIo.Add(1)
		go func() {
			defer wgIo.Done()
			r := rand.New(rand.NewSource(time.Now().UnixNano()))

			// Each client to send 1k IOs
			for io := 0; io < 1000; io++ {
				var msg *message.Message
				switch r.Intn(2) {
				case 0:
					msg = message.NewMsgGet()
				case 1:
					msg = message.NewMsgPut()
				}
				iopkt := msg.IoPkt()
				iopkt.Buffer = make([]byte, 4096)

				// Maximum "disk" size is 10 times bigger than cache
				iopkt.Address = uint64(r.Int63n(3000))
				msg.RetChan = returnch

				// Send request
				msg.TimeStart()

				switch msg.Type {
				case message.MsgGet:
					c.Get(msg)
				case message.MsgPut:
					c.Invalidate(iopkt)
					c.Put(msg)
				}

				// Simulate waiting for more work by sleeping
				// anywhere from 100usecs to 10ms
				time.Sleep(time.Microsecond * time.Duration((r.Intn(10000) + 100)))
			}
		}()

	}

	// Wait for all clients to finish
	wgIo.Wait()

	// Send receiver a message that all clients have shut down
	fmt.Print(c)
	c.Close()
	close(quit)

	// Wait for receiver to finish emptying its channel
	wgRet.Wait()
}