예제 #1
0
func response_handler(t *testing.T,
	wg *sync.WaitGroup,
	m chan *message.Message) {

	var (
		gethits, getmisses, puts int
		errors                   int
		tgh, tgm, tp             tm.TimeDuration
	)

	defer wg.Done()

	// Check incoming channels
	for msg := range m {
		// Collect stats
		switch msg.Type {
		case message.MsgGet:
			if msg.Err == nil {
				tgh.Add(msg.TimeElapsed())
				gethits++

				// Check correctness.  The value
				// of the offset should have been
				// saved in the buffer
				iopkt := msg.IoPkt()
				bio := bufferio.NewBufferIO(iopkt.Buffer)
				var offset_in_buffer uint64
				bio.ReadDataLE(&offset_in_buffer)

				if offset_in_buffer != iopkt.Address {
					errors++
				}

			} else {
				tgm.Add(msg.TimeElapsed())
				getmisses++
			}
		case message.MsgPut:
			puts++
			tp.Add(msg.TimeElapsed())
		}
	}

	fmt.Printf("ERRORS: %d\nGet H:%d M:%d, Puts:%d\n"+
		"Get Hit Rate: %.2f\n"+
		"Mean times in usecs:\n"+
		"Get H:%.2f M:%.2f, Puts:%.2f\n",
		errors, gethits, getmisses, puts,
		float64(gethits)/float64(gethits+getmisses),
		tgh.MeanTimeUsecs(), tgm.MeanTimeUsecs(),
		tp.MeanTimeUsecs())
	Assert(t, errors == 0)
}
예제 #2
0
파일: log.go 프로젝트: chenweicai/pblcache
func NewLog(logfile string,
	blocksize, blocks_per_segment, bcsize uint32,
	usedirectio bool) (*Log, uint32, error) {

	var err error

	// Initialize Log
	log := &Log{}
	log.stats = &logstats{}
	log.blocksize = blocksize
	log.blocks_per_segment = blocks_per_segment
	log.segmentsize = log.blocks_per_segment * log.blocksize

	// For DirectIO
	if usedirectio {
		log.fp, err = openFile(logfile, OSSYNC|os.O_RDWR|os.O_EXCL, os.ModePerm)
	} else {
		log.fp, err = openFile(logfile, os.O_RDWR|os.O_EXCL, os.ModePerm)
	}
	if err != nil {
		return nil, 0, err
	}

	// Determine cache size
	var size int64
	size, err = log.fp.Seek(0, os.SEEK_END)
	if err != nil {
		return nil, 0, err
	}
	if size == 0 {
		return nil, 0, ErrLogTooSmall
	}
	blocks := size / int64(blocksize)
	if logMaxBlocks <= blocks {
		return nil, 0, ErrLogTooLarge
	}

	// We have to make sure that the number of blocks requested
	// fit into the segments tracked by the log
	log.numsegments = uint32(blocks) / log.blocks_per_segment
	log.size = uint64(log.numsegments) * uint64(log.segmentsize)

	// maximum number of aligned blocks to segments
	log.blocks = log.numsegments * log.blocks_per_segment

	// Adjust the number of segment buffers
	if log.numsegments < NumberSegmentBuffers {
		log.segmentbuffers = int(log.numsegments)
	} else {
		log.segmentbuffers = NumberSegmentBuffers
	}

	godbc.Check(log.numsegments != 0,
		fmt.Sprintf("bs:%v ssize:%v sbuffers:%v blocks:%v max:%v ns:%v size:%v\n",
			log.blocksize, log.segmentsize, log.segmentbuffers, log.blocks,
			log.blocks_per_segment, log.numsegments, log.size))

	// Incoming message channel
	log.Msgchan = make(chan *message.Message, 32)
	log.quitchan = make(chan struct{})
	log.logreaders = make(chan *message.Message, 32)

	// Segment channel state machine:
	// 		-> Client writes available segment
	// 		-> Segment written to storage
	// 		-> Segment read from storage
	// 		-> Segment available
	log.chwriting = make(chan *IoSegment, log.segmentbuffers)
	log.chavailable = make(chan *IoSegment, log.segmentbuffers)
	log.chreader = make(chan *IoSegment, log.segmentbuffers)

	// Set up each of the segments
	log.segments = make([]IoSegment, log.segmentbuffers)
	for i := 0; i < log.segmentbuffers; i++ {
		log.segments[i].segmentbuf = make([]byte, log.segmentsize)
		log.segments[i].data = bufferio.NewBufferIO(log.segments[i].segmentbuf)

		// Fill ch available with all the available buffers
		log.chreader <- &log.segments[i]
	}

	godbc.Ensure(log.size != 0)
	godbc.Ensure(log.blocksize == blocksize)
	godbc.Ensure(log.Msgchan != nil)
	godbc.Ensure(log.chwriting != nil)
	godbc.Ensure(log.chavailable != nil)
	godbc.Ensure(log.chreader != nil)
	godbc.Ensure(log.segmentbuffers == len(log.segments))
	godbc.Ensure(log.segmentbuffers == len(log.chreader))
	godbc.Ensure(0 == len(log.chavailable))
	godbc.Ensure(0 == len(log.chwriting))

	// Return the log object to the caller.
	// Also return the maximum number of blocks, which may
	// be different from what the caller asked.  The log
	// will make sure that the maximum number of blocks
	// are contained per segment
	return log, log.blocks, nil
}
예제 #3
0
func cacheio(t *testing.T, c *cache.CacheMap, log *cache.Log,
	actual_blocks, blocksize uint32) {
	var wgIo, wgRet sync.WaitGroup

	// Start up response server
	returnch := make(chan *message.Message, 100)
	wgRet.Add(1)
	go response_handler(t, &wgRet, returnch)

	// Create a parent message for all messages to notify
	// when they have been completed.
	messages := &message.Message{}
	messages_done := make(chan *message.Message)
	messages.RetChan = messages_done

	// Create 100 clients
	for i := 0; i < 100; i++ {
		wgIo.Add(1)
		go func() {
			defer wgIo.Done()
			z := zipf.NewZipfWorkload(uint64(actual_blocks)*10, 60)
			r := rand.New(rand.NewSource(time.Now().UnixNano()))

			// Each client to send 5k IOs
			for io := 0; io < 5000; io++ {
				var msg *message.Message
				offset, isread := z.ZipfGenerate()

				if isread {
					msg = message.NewMsgGet()
				} else {
					// On a write the client would first
					// invalidate the block, write the data to the
					// storage device, then place it in the cache
					iopkt := &message.IoPkt{
						Address: offset,
						Blocks:  1,
					}
					c.Invalidate(iopkt)

					// Simulate waiting for storage device to write data
					time.Sleep(time.Microsecond * time.Duration((r.Intn(100))))

					// Now, we can do a put
					msg = message.NewMsgPut()
				}

				messages.Add(msg)
				iopkt := msg.IoPkt()
				iopkt.Buffer = make([]byte, blocksize)
				iopkt.Address = offset
				msg.RetChan = returnch

				msg.TimeStart()

				// Write the offset into the buffer so that we can
				// check it on reads.
				if !isread {
					bio := bufferio.NewBufferIO(iopkt.Buffer)
					bio.WriteDataLE(offset)
					c.Put(msg)
				} else {
					_, err := c.Get(msg)
					if err != nil {
						msg.Err = err
						msg.Done()
					}
				}

				// Maximum "disk" size is 10 times bigger than cache

				// Send request

				// Simulate waiting for more work by sleeping
				time.Sleep(time.Microsecond * time.Duration((r.Intn(100))))
			}
		}()

	}

	// Wait for all clients to finish
	wgIo.Wait()

	// Wait for all messages to finish
	messages.Done()
	<-messages_done

	// Print stats
	fmt.Print(c)
	fmt.Print(log)

	// Close cache and log
	c.Close()
	log.Close()

	stats := log.Stats()
	Assert(t, stats.Seg_skipped == 0)

	// Send receiver a message that all clients have shut down
	close(returnch)

	// Wait for receiver to finish emptying its channel
	wgRet.Wait()

}