Пример #1
0
func cacheio(t *testing.T, c *cache.CacheMap, log *cache.Log,
	actual_blocks, blocksize uint32) {
	var wgIo, wgRet sync.WaitGroup

	// Start up response server
	returnch := make(chan *message.Message, 100)
	wgRet.Add(1)
	go response_handler(t, &wgRet, returnch)

	// Create a parent message for all messages to notify
	// when they have been completed.
	messages := &message.Message{}
	messages_done := make(chan *message.Message)
	messages.RetChan = messages_done

	// Create 100 clients
	for i := 0; i < 100; i++ {
		wgIo.Add(1)
		go func() {
			defer wgIo.Done()
			z := zipf.NewZipfWorkload(uint64(actual_blocks)*10, 60)
			r := rand.New(rand.NewSource(time.Now().UnixNano()))

			// Each client to send 5k IOs
			for io := 0; io < 5000; io++ {
				var msg *message.Message
				offset, isread := z.ZipfGenerate()

				if isread {
					msg = message.NewMsgGet()
				} else {
					// On a write the client would first
					// invalidate the block, write the data to the
					// storage device, then place it in the cache
					iopkt := &message.IoPkt{
						Address: offset,
						Blocks:  1,
					}
					c.Invalidate(iopkt)

					// Simulate waiting for storage device to write data
					time.Sleep(time.Microsecond * time.Duration((r.Intn(100))))

					// Now, we can do a put
					msg = message.NewMsgPut()
				}

				messages.Add(msg)
				iopkt := msg.IoPkt()
				iopkt.Buffer = make([]byte, blocksize)
				iopkt.Address = offset
				msg.RetChan = returnch

				msg.TimeStart()

				// Write the offset into the buffer so that we can
				// check it on reads.
				if !isread {
					bio := bufferio.NewBufferIO(iopkt.Buffer)
					bio.WriteDataLE(offset)
					c.Put(msg)
				} else {
					_, err := c.Get(msg)
					if err != nil {
						msg.Err = err
						msg.Done()
					}
				}

				// Maximum "disk" size is 10 times bigger than cache

				// Send request

				// Simulate waiting for more work by sleeping
				time.Sleep(time.Microsecond * time.Duration((r.Intn(100))))
			}
		}()

	}

	// Wait for all clients to finish
	wgIo.Wait()

	// Wait for all messages to finish
	messages.Done()
	<-messages_done

	// Print stats
	fmt.Print(c)
	fmt.Print(log)

	// Close cache and log
	c.Close()
	log.Close()

	stats := log.Stats()
	Assert(t, stats.Seg_skipped == 0)

	// Send receiver a message that all clients have shut down
	close(returnch)

	// Wait for receiver to finish emptying its channel
	wgRet.Wait()

}
Пример #2
0
func TestCacheMapConcurrency(t *testing.T) {
	var wgIo, wgRet sync.WaitGroup

	nc := message.NewNullTerminator()
	nc.Start()
	defer nc.Close()

	c := NewCacheMap(300, 4096, nc.In)

	// Start up response server
	returnch := make(chan *message.Message, 100)
	quit := make(chan struct{})
	wgRet.Add(1)
	go response_handler(&wgRet, quit, returnch)

	// Create 100 clients
	for i := 0; i < 100; i++ {
		wgIo.Add(1)
		go func() {
			defer wgIo.Done()
			r := rand.New(rand.NewSource(time.Now().UnixNano()))

			// Each client to send 1k IOs
			for io := 0; io < 1000; io++ {
				var msg *message.Message
				switch r.Intn(2) {
				case 0:
					msg = message.NewMsgGet()
				case 1:
					msg = message.NewMsgPut()
				}
				iopkt := msg.IoPkt()
				iopkt.Buffer = make([]byte, 4096)

				// Maximum "disk" size is 10 times bigger than cache
				iopkt.Address = uint64(r.Int63n(3000))
				msg.RetChan = returnch

				// Send request
				msg.TimeStart()

				switch msg.Type {
				case message.MsgGet:
					c.Get(msg)
				case message.MsgPut:
					c.Invalidate(iopkt)
					c.Put(msg)
				}

				// Simulate waiting for more work by sleeping
				// anywhere from 100usecs to 10ms
				time.Sleep(time.Microsecond * time.Duration((r.Intn(10000) + 100)))
			}
		}()

	}

	// Wait for all clients to finish
	wgIo.Wait()

	// Send receiver a message that all clients have shut down
	fmt.Print(c)
	c.Close()
	close(quit)

	// Wait for receiver to finish emptying its channel
	wgRet.Wait()
}