func write(fp io.WriterAt, c *cache.CacheMap, devid, offset, blocks uint32, buffer []byte) { here := make(chan *message.Message, blocks) cacheoffset := cache.Address64(cache.Address{Devid: devid, Block: offset}) // Send invalidates for each block iopkt := &message.IoPkt{ Address: cacheoffset, Blocks: blocks, } c.Invalidate(iopkt) // Write to storage back end // :TODO: check return status fp.WriteAt(buffer, int64(offset)*4*KB) // Now write to cache msg := message.NewMsgPut() msg.RetChan = here iopkt = msg.IoPkt() iopkt.Blocks = blocks iopkt.Address = cacheoffset iopkt.Buffer = buffer c.Put(msg) <-here }
func readandstore(fp io.ReaderAt, c *cache.CacheMap, devid, offset, blocks uint32, buffer []byte, retchan chan *message.Message) { fp.ReadAt(buffer, int64(offset)*4*KB) m := message.NewMsgPut() m.RetChan = retchan io := m.IoPkt() io.Address = cache.Address64(cache.Address{Devid: devid, Block: offset}) io.Buffer = buffer io.Blocks = blocks c.Put(m) }
func read(fp io.ReaderAt, c *cache.CacheMap, devid, offset, blocks uint32, buffer []byte) { godbc.Require(len(buffer)%(4*KB) == 0) here := make(chan *message.Message, blocks) cacheoffset := cache.Address64(cache.Address{Devid: devid, Block: offset}) msg := message.NewMsgGet() msg.RetChan = here iopkt := msg.IoPkt() iopkt.Buffer = buffer iopkt.Address = cacheoffset iopkt.Blocks = blocks msgs := 0 hitpkt, err := c.Get(msg) if err != nil { //fmt.Printf("|blocks:%d::hits:0--", blocks) // None found // Read the whole thing from backend fp.ReadAt(buffer, int64(offset)*4*KB) m := message.NewMsgPut() m.RetChan = here io := m.IoPkt() io.Address = cacheoffset io.Buffer = buffer io.Blocks = blocks c.Put(m) msgs++ } else if hitpkt.Hits != blocks { //fmt.Printf("|******blocks:%d::hits:%d--", blocks, hitpkt.Hits) // Read from storage the ones that did not have // in the hit map. var be_offset, be_block, be_blocks uint32 var be_read_ready = false for block := uint32(0); block < blocks; block++ { if !hitpkt.Hitmap[int(block)] { if be_read_ready { be_blocks++ } else { be_read_ready = true be_offset = offset + block be_block = block be_blocks++ } } else { if be_read_ready { // Send read msgs++ go readandstore(fp, c, devid, be_offset, be_blocks, cache.SubBlockBuffer(buffer, 4*KB, be_block, be_blocks), here) be_read_ready = false be_blocks = 0 be_offset = 0 be_block = 0 } } } if be_read_ready { msgs++ go readandstore(fp, c, devid, be_offset, be_blocks, cache.SubBlockBuffer(buffer, 4*KB, be_block, be_blocks), here) } } else { msgs = 1 } // Wait for blocks to be returned for msg := range here { msgs-- godbc.Check(msg.Err == nil, msg) godbc.Check(msgs >= 0, msgs) if msgs == 0 { return } } }
func main() { // Gather command line arguments flag.Parse() // According to spc.h, this needs to be set to // contexts = (b+99)/100 for SPC workload generator // conformance contexts = int((bsu + 99) / 100) if asu1 == "" || asu2 == "" || asu3 == "" { fmt.Print("ASU files must be set\n") return } // Open stats file fp, err := os.Create(pbliodata) if err != nil { fmt.Print(err) return } metrics := bufio.NewWriter(fp) defer fp.Close() // Setup number of blocks blocksize_bytes := uint32(blocksize * KB) // Open cache var c *cache.CacheMap var log *cache.Log var logblocks uint32 // Show banner fmt.Println("-----") fmt.Println("pblio") fmt.Println("-----") // Determine if we need to use the cache if cachefilename != "" { // Create log log, logblocks, err = cache.NewLog(cachefilename, blocksize_bytes, (512*KB)/blocksize_bytes, 0, // buffer cache has been removed for now true, // Use DirectIO to SSD ) if err != nil { fmt.Println(err) return } // Connect cache metadata with log c = cache.NewCacheMap(logblocks, blocksize_bytes, log.Msgchan) cache_state := "New" if _, err = os.Stat(cachesavefile); err == nil { err = c.Load(cachesavefile, log) if err != nil { fmt.Printf("Unable to load metadata: %s", err) return } cache_state = "Loaded" } // Start log goroutines log.Start() // Print banner fmt.Printf("Cache : %s (%s)\n"+ "C Size : %.2f GB\n", cachefilename, cache_state, float64(logblocks*blocksize_bytes)/GB) } else { fmt.Println("Cache : None") } // Initialize spc1info spcinfo := spc.NewSpcInfo(c, usedirectio, blocksize) // Open asus for _, v := range strings.Split(asu1, ",") { err = spcinfo.Open(1, v) if err != nil { fmt.Print(err) return } } for _, v := range strings.Split(asu2, ",") { err = spcinfo.Open(2, v) if err != nil { fmt.Print(err) return } } for _, v := range strings.Split(asu3, ",") { err = spcinfo.Open(3, v) if err != nil { fmt.Print(err) return } } defer spcinfo.Close() // Start cpu profiling if cpuprofile { f, _ := os.Create("cpuprofile") pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } // Initialize Spc1 workload err = spcinfo.Spc1Init(bsu, contexts) if err != nil { fmt.Print(err) return } // This channel will be used for the io to return // the latency iotime := make(chan *spc.IoStats, 1024) // Before starting, let's print out the sizes // and test information fmt.Printf("ASU1 : %.2f GB\n"+ "ASU2 : %.2f GB\n"+ "ASU3 : %.2f GB\n"+ "BSUs : %v\n"+ "Contexts: %v\n"+ "Run time: %v s\n", spcinfo.Size(1), spcinfo.Size(2), spcinfo.Size(3), bsu, contexts, runlen) fmt.Println("-----") // Shutdown on signal quit := make(chan struct{}) signalch := make(chan os.Signal, 1) signal.Notify(signalch, os.Interrupt) go func() { select { case <-signalch: close(quit) return } }() // Spawn contexts coroutines var wg sync.WaitGroup for context := 0; context < contexts; context++ { wg.Add(1) go spcinfo.Context(&wg, iotime, quit, runlen, context) } // Used to collect all the stats spcstats := spc.NewSpcStats() prev_spcstats := spcstats.Copy() pbliostats := &PblioStats{} pbliostats.Spc = spcstats // This goroutine will be used to collect the data // from the io routines and print out to the console // every few seconds var outputwg sync.WaitGroup outputwg.Add(1) go func() { defer outputwg.Done() start := time.Now() totaltime := start totalios := uint64(0) print_iops := time.After(time.Second * time.Duration(dataperiod)) for iostat := range iotime { // Save stats spcstats.Collect(iostat) totalios += 1 // Do this every few seconds select { case <-print_iops: end := time.Now() ios := spcstats.IosDelta(prev_spcstats) iops := float64(ios) / end.Sub(start).Seconds() fmt.Printf("ios:%v IOPS:%.2f Latency:%.4f ms"+ " \r", ios, iops, spcstats.MeanLatencyDeltaUsecs(prev_spcstats)/1000) // Get stats from the cache if c != nil { pbliostats.Cache = c.Stats() pbliostats.Log = log.Stats() } // Save stats pbliostats.Timestamp = time.Now().Unix() jsonstats, err := json.Marshal(pbliostats) if err != nil { fmt.Println(err) } else { metrics.WriteString(string(jsonstats) + "\n") } // Reset counters start = time.Now() prev_spcstats = spcstats.Copy() // Set the timer for the next time print_iops = time.After(time.Second * time.Duration(dataperiod)) default: } } end := time.Now() iops := float64(totalios) / end.Sub(totaltime).Seconds() // Print final info fmt.Printf("Avg IOPS:%.2f Avg Latency:%.4f ms"+ " \n", iops, spcstats.MeanLatencyUsecs()/1000) fmt.Print("\n") }() // Wait here for all the context goroutines to finish wg.Wait() // Now we can close the output goroutine close(iotime) outputwg.Wait() // Print cache stats if c != nil { c.Close() log.Close() err = c.Save(cachesavefile, log) if err != nil { fmt.Printf("Unable to save metadata: %s\n", err) os.Remove(cachesavefile) } fmt.Print(c) fmt.Print(log) } metrics.Flush() }
func cacheio(t *testing.T, c *cache.CacheMap, log *cache.Log, actual_blocks, blocksize uint32) { var wgIo, wgRet sync.WaitGroup // Start up response server returnch := make(chan *message.Message, 100) wgRet.Add(1) go response_handler(t, &wgRet, returnch) // Create a parent message for all messages to notify // when they have been completed. messages := &message.Message{} messages_done := make(chan *message.Message) messages.RetChan = messages_done // Create 100 clients for i := 0; i < 100; i++ { wgIo.Add(1) go func() { defer wgIo.Done() z := zipf.NewZipfWorkload(uint64(actual_blocks)*10, 60) r := rand.New(rand.NewSource(time.Now().UnixNano())) // Each client to send 5k IOs for io := 0; io < 5000; io++ { var msg *message.Message offset, isread := z.ZipfGenerate() if isread { msg = message.NewMsgGet() } else { // On a write the client would first // invalidate the block, write the data to the // storage device, then place it in the cache iopkt := &message.IoPkt{ Address: offset, Blocks: 1, } c.Invalidate(iopkt) // Simulate waiting for storage device to write data time.Sleep(time.Microsecond * time.Duration((r.Intn(100)))) // Now, we can do a put msg = message.NewMsgPut() } messages.Add(msg) iopkt := msg.IoPkt() iopkt.Buffer = make([]byte, blocksize) iopkt.Address = offset msg.RetChan = returnch msg.TimeStart() // Write the offset into the buffer so that we can // check it on reads. if !isread { bio := bufferio.NewBufferIO(iopkt.Buffer) bio.WriteDataLE(offset) c.Put(msg) } else { _, err := c.Get(msg) if err != nil { msg.Err = err msg.Done() } } // Maximum "disk" size is 10 times bigger than cache // Send request // Simulate waiting for more work by sleeping time.Sleep(time.Microsecond * time.Duration((r.Intn(100)))) } }() } // Wait for all clients to finish wgIo.Wait() // Wait for all messages to finish messages.Done() <-messages_done // Print stats fmt.Print(c) fmt.Print(log) // Close cache and log c.Close() log.Close() stats := log.Stats() Assert(t, stats.Seg_skipped == 0) // Send receiver a message that all clients have shut down close(returnch) // Wait for receiver to finish emptying its channel wgRet.Wait() }