func writeBatch(db storage.Engine, c *Config) int { ws := c.MakeBatch() if err := db.BatchPut(ws); err != nil { panic(err) } return len(ws) }
func queryAndDelete(db storage.Engine, points, series int) { // query the database startCount := points / series / 4 endCount := points * 3 / series / 4 total := 0 var d time.Duration for series -= 1; series >= 0; series-- { count := 0 var delStart []byte var delEnd []byte query(db, int64(series), func(itr storage.Iterator) { count++ if count == startCount { delStart = itr.Key() } if count == endCount-1 { delEnd = itr.Key() total += endCount - startCount } }) start := time.Now() err := db.Del(delStart, delEnd) if err != nil { panic(err) } d += time.Now().Sub(start) } fmt.Printf("Took %s to delete %d points\n", d, total) start := time.Now() db.Compact() fmt.Printf("Took %s to compact\n", time.Now().Sub(start)) }
func benchmarkDbCommon(db storage.Engine, c Config) { fmt.Printf("################ Benchmarking: %s\n", db.Name()) start := time.Now() count := benchmarkWrites(db, &c, c.points) d := time.Now().Sub(start) fmt.Printf("Writing %d points in batches of %d points took %s (%f microsecond per point)\n", count, c.batch, d, float64(d.Nanoseconds())/1000.0/float64(count), ) timeQuerying(db, c.series) fmt.Printf("Size: %s\n", getSize(db.Path())) queryAndDelete(db, c.points, c.series) timeQuerying(db, c.series) fmt.Printf("Size: %s\n", getSize(db.Path())) start = time.Now() count = benchmarkWrites(db, &c, c.points/2) d = time.Now().Sub(start) fmt.Printf("Writing %d points in batches of %d points took %s (%f microsecond per point)\n", count, c.batch, d, float64(d.Nanoseconds())/1000.0/float64(count), ) fmt.Printf("Size: %s\n", getSize(db.Path())) }
func benchmarkWrites(db storage.Engine, c *Config, points int) int { writesChan := make([]chan []storage.Write, c.threads) for i := range writesChan { writesChan[i] = make(chan []storage.Write, 1) } count := 0 p := points go func() { defer func() { for _, ch := range writesChan { close(ch) } }() for { for _, ch := range writesChan { w := c.MakeBatch() ch <- w l := len(w) count += l p -= l if p <= 0 { return } } } }() wg := sync.WaitGroup{} for i := 0; i < c.threads; i++ { wg.Add(1) go func(idx int) { defer wg.Done() for w := range writesChan[idx] { if err := db.BatchPut(w); err != nil { panic(err) } fmt.Print(".") } }(i) } wg.Wait() fmt.Printf("\n") return count }
func query(db storage.Engine, s int64, yield func(storage.Iterator)) { sb := bytes.NewBuffer(nil) binary.Write(sb, binary.BigEndian, s) binary.Write(sb, binary.BigEndian, int64(0)) binary.Write(sb, binary.BigEndian, int64(0)) eb := bytes.NewBuffer(nil) binary.Write(eb, binary.BigEndian, s) binary.Write(eb, binary.BigEndian, int64(-1)) binary.Write(eb, binary.BigEndian, int64(-1)) itr := db.Iterator() defer itr.Close() count := 0 for itr.Seek(sb.Bytes()); itr.Valid(); itr.Next() { key := itr.Key() if bytes.Compare(key, eb.Bytes()) > 0 { break } count++ yield(itr) } }