Beispiel #1
0
func worker(numops int, prot common.Prot, wg *sync.WaitGroup) {
	conn, err := common.Connect(f.Host, f.Port)
	if err != nil {
		panic(err.Error())
	}

	rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
	r := rand.New(rand.NewSource(time.Now().UnixNano()))

	for i := 0; i < numops; i++ {
		curOpCount := atomic.AddUint64(opCount, 1)
		if curOpCount%10000 == 0 {
			log.Println(curOpCount)
		}

		// Generate key anew each time
		key := common.RandData(r, f.KeyLength, false)

		// Value size between 5k and 20k
		valLen := r.Intn(15*1024) + 5*1024
		value := common.RandData(nil, valLen, true)

		// continue on even if there's errors here
		if err := prot.Set(rw, key, value); err != nil {
			log.Println("Error during set:", err.Error())
			conn.Close()

			conn, err = common.Connect(f.Host, f.Port)
			if err != nil {
				panic(err.Error())
			}

			rw = bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
		}
	}

	wg.Done()
}
Beispiel #2
0
func main() {
	var prot common.Prot
	if f.Binary {
		var b binprot.BinProt
		prot = b
	} else {
		var t textprot.TextProt
		prot = t
	}

	wg := &sync.WaitGroup{}
	wg.Add(f.NumWorkers)

	for i := 0; i < f.NumWorkers; i++ {
		go func(prot common.Prot, wg *sync.WaitGroup) {
			conn, err := common.Connect("localhost", f.Port)
			if err != nil {
				panic("Couldn't connect")
			}

			r := rand.New(rand.NewSource(common.RandSeed()))
			rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))

			// 0 to 100k data
			for i := 0; i < 102400; i++ {
				key := common.RandData(r, f.KeyLength, false)
				value := common.RandData(nil, i, true)

				prot.Set(rw, key, value)
				prot.Get(rw, key)
			}

			fmt.Println("Done.")
			wg.Done()
		}(prot, wg)
	}

	wg.Wait()
}
Beispiel #3
0
func main() {
	var prot common.Prot
	if f.Binary {
		prot = binprot.BinProt{}
	} else {
		prot = textprot.TextProt{}
	}

	wg := new(sync.WaitGroup)
	wg.Add(f.NumWorkers)

	// create and fill key channels to drive workers
	perChan := int(math.Ceil(float64(f.NumOps) / float64(f.NumWorkers)))
	chans := make([]chan []byte, f.NumWorkers)
	for i := range chans {
		chans[i] = make(chan []byte, perChan)
	}
	fillKeys(chans)

	log.Println("Done generating keys")

	start := time.Now()
	// spawn worker goroutines
	for i := 0; i < f.NumWorkers; i++ {
		conn, err := common.Connect(f.Host, f.Port)
		if err != nil {
			panic(err.Error())
		}
		rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
		go worker(prot, rw, chans[i], wg)
	}

	wg.Wait()

	log.Println("Total comm time:", time.Since(start))
}
Beispiel #4
0
func main() {
	if f.Pprof != "" {
		f, err := os.Create(f.Pprof)
		if err != nil {
			panic(err.Error())
		}
		pprof.StartCPUProfile(f)
		defer pprof.StopCPUProfile()
	}

	var prot common.Prot
	var numCmds int
	var usedCmds string
	var protString string

	if f.Binary {
		var b binprot.BinProt
		prot = b
		numCmds = 10
		usedCmds = "get, batch get, get and touch, set, add, replace, append, prepend, touch, delete"
		protString = "binary"
	} else {
		var t textprot.TextProt
		prot = t
		numCmds = 9
		usedCmds = "get, batch get, set, add, replace, append, prepend, touch, delete"
		protString = "text"
	}

	fmt.Printf("Performing %v operations total with:\n"+
		"\t%v communication goroutines\n"+
		"\tcommands %v\n"+
		"\tover the %v protocol\n\n",
		f.NumOps, f.NumWorkers, usedCmds, protString)

	tasks := make(chan *common.Task)
	taskGens := new(sync.WaitGroup)
	comms := new(sync.WaitGroup)

	// TODO: Better math
	opsPerTask := f.NumOps / numCmds / f.NumWorkers

	// HUGE channel so the comm threads never block
	metrics := make(chan metric, f.NumOps)

	// spawn task generators
	for i := 0; i < f.NumWorkers; i++ {
		taskGens.Add(numCmds)
		go cmdGenerator(tasks, taskGens, opsPerTask, common.Set)
		go cmdGenerator(tasks, taskGens, opsPerTask, common.Add)
		go cmdGenerator(tasks, taskGens, opsPerTask, common.Replace)
		go cmdGenerator(tasks, taskGens, opsPerTask, common.Append)
		go cmdGenerator(tasks, taskGens, opsPerTask, common.Prepend)
		go cmdGenerator(tasks, taskGens, opsPerTask, common.Get)
		go cmdGenerator(tasks, taskGens, opsPerTask, common.Bget)
		go cmdGenerator(tasks, taskGens, opsPerTask, common.Delete)
		go cmdGenerator(tasks, taskGens, opsPerTask, common.Touch)

		if f.Binary {
			go cmdGenerator(tasks, taskGens, opsPerTask, common.Gat)
		}
	}

	// spawn communicators
	for i := 0; i < f.NumWorkers; i++ {
		comms.Add(1)
		conn, err := common.Connect(f.Host, f.Port)

		if err != nil {
			i--
			comms.Add(-1)
			continue
		}

		go communicator(prot, conn, tasks, metrics, comms)
	}

	summaries := &sync.WaitGroup{}
	summaries.Add(1)
	go func() {
		// consolidate some metrics
		// bucketize the timings based on operation
		// print min, max, average, 50%, 90%
		hits := make(map[common.Op][]int)
		misses := make(map[common.Op][]int)

		for m := range metrics {
			if m.miss {
				if _, ok := misses[m.op]; ok {
					misses[m.op] = append(misses[m.op], int(m.d))
				} else {
					misses[m.op] = []int{int(m.d)}
				}
			} else {
				if _, ok := hits[m.op]; ok {
					hits[m.op] = append(hits[m.op], int(m.d))
				} else {
					hits[m.op] = []int{int(m.d)}
				}
			}

			metricPool.Put(m)
		}

		for _, op := range common.AllOps {
			if f.Text && op == common.Gat {
				continue
			}

			times := hits[op]
			sort.Ints(times)
			s := stats.Get(times)

			fmt.Println()
			fmt.Printf("%s hits (n = %d)\n", op.String(), len(times))
			fmt.Printf("Min: %fms\n", s.Min)
			fmt.Printf("Max: %fms\n", s.Max)
			fmt.Printf("Avg: %fms\n", s.Avg)
			fmt.Printf("p50: %fms\n", s.P50)
			fmt.Printf("p75: %fms\n", s.P75)
			fmt.Printf("p90: %fms\n", s.P90)
			fmt.Printf("p95: %fms\n", s.P95)
			fmt.Printf("p99: %fms\n", s.P99)
			fmt.Println()

			stats.PrintHist(times)

			times = misses[op]

			if len(times) == 0 {
				fmt.Printf("\nNo %s misses\n\n", op.String())
				continue
			}

			sort.Ints(times)
			s = stats.Get(times)

			fmt.Println()
			fmt.Printf("%s misses (n = %d)\n", op.String(), len(times))
			fmt.Printf("Min: %fms\n", s.Min)
			fmt.Printf("Max: %fms\n", s.Max)
			fmt.Printf("Avg: %fms\n", s.Avg)
			fmt.Printf("p50: %fms\n", s.P50)
			fmt.Printf("p75: %fms\n", s.P75)
			fmt.Printf("p90: %fms\n", s.P90)
			fmt.Printf("p95: %fms\n", s.P95)
			fmt.Printf("p99: %fms\n", s.P99)
			fmt.Println()

			stats.PrintHist(times)
		}
		summaries.Done()
	}()

	// First wait for all the tasks to be generated,
	// then close the channel so the comm threads complete
	fmt.Println("Waiting for taskGens.")
	taskGens.Wait()

	fmt.Println("Task gens done.")
	close(tasks)

	fmt.Println("Tasks closed, waiting on comms.")
	comms.Wait()

	fmt.Println("Comms done.")
	close(metrics)

	summaries.Wait()
}