// ZipfRandom emits a Zipfian distributed random number // notation follows the wikipedia page http://en.wikipedia.org/wiki/Zipf%E2%80%93Mandelbrot_law not the golang Zipf parameters func ZipfRandom() Spec { return Spec{ Name: "Zipf", Inputs: []Pin{ Pin{"q", NUMBER}, Pin{"s", NUMBER}, Pin{"N", NUMBER}}, Outputs: []Pin{Pin{"draw", NUMBER}}, Kernel: func(in, out, internal MessageMap, ss Source, i chan Interrupt) Interrupt { q, ok := in[0].(float64) if !ok { out[0] = NewError("q must be a number") return nil } s, ok := in[1].(float64) if !ok { out[0] = NewError("s must be a number") return nil } N, ok := in[2].(float64) if !ok { out[0] = NewError("N must be an number") return nil } z := rand.NewZipf(RAND, s, q, uint64(N)) out[0] = z.Uint64() return nil }, } }
func (d Zipf) Emit() (interface{}, error) { if d.zipf == nil { r := rand.New(rand.NewSource(42)) d.zipf = rand.NewZipf(r, d.S, d.V, d.Imax) } return d.zipf.Uint64(), nil }
func newZipfValues() *zipfValues { r := rand.New(rand.NewSource(rngSeed)) z := rand.NewZipf(r, 1.2, 1, 1024*1024) return &zipfValues{ z: z, } }
func TestSWilk(t *testing.T) { const l = 1000 r := rand.New(rand.NewSource(1)) zr := rand.NewZipf(r, 1.01, 1, 10000) zipf := make([]float64, l) for i := 0; i < l; i++ { zipf[i] = float64(zr.Uint64()) } var w, pw float64 var err error w, pw, err = SWilk(zipf) t.Logf("zipf: w=%f pw=%f err=%v", w, pw, err) // fly wing lengths in mm are normally distributed // via http://www.seattlecentral.edu/qelp/sets/057/057.html var wings = []float64{ 43, 48, 45, 48, 45, 39, 47, 43, 37, 46, 38, 47, 53, 43, 42, 44, 51, 42, 48, 42, 36, 46, 44, 41, 50, 47, 47, 44, 45, 46, 46, 40, 49, 40, 42, 45, 41, 51, 45, 44, 38, 50, 51, 41, 46, 49, 48, 47, 40, 42, 44, 45, 47, 42, 45, 46, 47, 42, 46, 47, 39, 45, 40, 50, 49, 52, 48, 45, 45, 54, 50, 41, 46, 48, 43, 43, 53, 41, 51, 46, 41, 48, 43, 47, 43, 48, 43, 44, 50, 44, 52, 49, 44, 46, 55, 50, 49, 44, 49, 49, } w, pw, err = SWilk(wings) t.Logf("wings: w=%f pw=%f err=%v", w, pw, err) }
// Run is the block's main loop. Here we listen on the different channels we set up. // this is actually the Zipf-Manadlebrot "law". // http://en.wikipedia.org/wiki/Zipf%E2%80%93Mandelbrot_law // the parameter `v` is denoted `q` on wikipedia. func (b *Zipf) Run() { var err error var s, v, imax float64 s = 2.0 v = 5.0 imax = 99.0 r := rand.New(rand.NewSource(12345)) sampler := rand.NewZipf(r, s, v, uint64(imax)) for { select { case ruleI := <-b.inrule: // set a parameter of the block rule, ok := ruleI.(map[string]interface{}) if !ok { b.Error(errors.New("couldn't assert rule to map")) } s, err = util.ParseFloat(rule, "s") if err != nil { b.Error(err) } v, err = util.ParseFloat(rule, "v") if err != nil { b.Error(err) } imax, err = util.ParseFloat(rule, "N") if err != nil { b.Error(err) } sampler = rand.NewZipf(r, s, v, uint64(imax)) case <-b.quit: // quit the block return case <-b.inpoll: // deal with a poll request b.out <- map[string]interface{}{ "sample": float64(sampler.Uint64()), } case c := <-b.queryrule: // deal with a query request c <- map[string]interface{}{ "s": s, "v": v, "N": imax, } } } }
// Benchmark inserting distinct rows in batches where the min and max rows in // separate batches overlap. This stresses the command queue implementation and // verifies that we're allowing parallel execution of commands where possible. func runBenchmarkInsertDistinct(b *testing.B, db *gosql.DB, numUsers int) { if _, err := db.Exec(`DROP TABLE IF EXISTS bench.insert_distinct`); err != nil { b.Fatal(err) } const schema = ` CREATE TABLE bench.insert_distinct ( articleID INT, userID INT, uniqueID INT DEFAULT unique_rowid(), PRIMARY KEY (articleID, userID, uniqueID)) ` if _, err := db.Exec(schema); err != nil { b.Fatal(err) } b.ResetTimer() var wg sync.WaitGroup wg.Add(numUsers) var count int64 for i := 0; i < numUsers; i++ { go func(i int) { defer wg.Done() var buf bytes.Buffer rnd := rand.New(rand.NewSource(int64(i))) // Article IDs are chosen from a zipf distribution. These values select // articleIDs that are mostly <10000. The parameters were experimentally // determined, but somewhat arbitrary. zipf := rand.NewZipf(rnd, 2, 10000, 100000) for { n := atomic.AddInt64(&count, 1) if int(n) >= b.N { return } // Insert between [1,100] articles in a batch. numArticles := 1 + rnd.Intn(100) buf.Reset() buf.WriteString(`INSERT INTO bench.insert_distinct VALUES `) for j := 0; j < numArticles; j++ { if j > 0 { buf.WriteString(", ") } fmt.Fprintf(&buf, "(%d, %d)", zipf.Uint64(), n) } if _, err := db.Exec(buf.String()); err != nil { b.Fatal(err) } } }(i) } wg.Wait() b.StopTimer() }
func (f *protoFuzzer) fuzzyInt() int64 { i := int64(rand.NewZipf(f.r, 3, 1, 200).Uint64() + 1) if rand.Intn(2) == 0 { i = -i } fmt.Printf("Changing int by %d\n", i) return i }
//makeZipfer: Initialize the stream of random elements for the tests. func makeZipfer(r *rand.Rand) *rand.Zipf { //Make the zipf distribution of random input var s, v float64 var imax uint64 s = 1.2 v = 1.0 imax = 2 << 10 zipfer := rand.NewZipf(r, s, v, imax) return zipfer }
// Index of partition starts from 0 // Integer Key starts from 0 also func NewZipfKey(partIndex int, nKeys int64, nParts int, pKeysArray []int64, s float64, hp *HashPartitioner) *ZipfKey { zk := &ZipfKey{ partIndex: partIndex, nParts: nParts, nKeys: nKeys, pKeysArray: pKeysArray, hp: hp, } zk.isPartition = (*SysType == PARTITION) || *PhyPart zk.wholeUniform = rand.New(rand.NewSource(time.Now().Unix() / int64(partIndex+1))) if zk.isPartition { zk.partUniform = make([]*rand.Rand, nParts) for i := 0; i < nParts; i++ { zk.partUniform[i] = rand.New(rand.NewSource(time.Now().Unix() / int64(partIndex*13+i*7+1))) } } // Uniform distribution if s == 1 { zk.isZipf = false } else { zk.isZipf = true // Generate Zipf for whole store zk.wholeZipf = rand.NewZipf(zk.wholeUniform, s, 1, uint64(nKeys-1)) if zk.isPartition { // Generate Zipf for for each part zk.partZipf = make([]*rand.Zipf, nParts) for i := 0; i < nParts; i++ { zk.partZipf[i] = rand.NewZipf(zk.partUniform[i], s, 1, uint64(pKeysArray[i]-1)) } } } return zk }
func main() { //Handling command line parameters log.Printf("starting main\n") src := rand.NewSource(0) r := rand.New(src) var Depth, Width, efactor, numElements int64 Depth = *depthPtr Width = *widthPtr efactor = *efactorPtr numElements = Depth * Width * efactor log.Printf("params:Depth:%d\n", Depth) log.Printf("params:Width:%d\n", Width) log.Printf("params:efactor:%d\n", efactor) log.Printf("params:numElements:%d\n", numElements) //Initialize Data Structures hslice := RandomHashes(r, Depth) cms := NewCMSketch(Depth, Width) cms.Hash = hslice //Make the zipf distribution of random input var j, z int64 var s, v float64 var imax uint64 s = 1.2 v = 1.0 imax = 2 << 10 zipfer := rand.NewZipf(r, s, v, imax) //Use set to store the exact answers set := make(map[int64]int64) log.Printf("Inserting\n") ts := time.Now() for j = 0; j < numElements; j++ { z = int64(zipfer.Uint64()) //set[z] += 1 //fmt.Println(z) cms.UpdateSerial(z, 1) } te := time.Now().Sub(ts) fmt.Printf("time: %v\n", te) fmt.Printf("%s\n", cms.Counter.String()) var qj int64 //approximate answers var totalLoss float64 loss := func(cj, qj float64) float64 { return (cj - qj) * (cj - qj) } for j, cj := range set { qj = cms.PointQuery(j) //fmt.Printf("results:%d %d %d %f\n", j, qj, cj, float64(qj)/float64(cj)) totalLoss += loss(float64(cj), float64(qj)) } fmt.Printf("Total Loss: %f/%d\n", totalLoss, numElements) }
func New(seed int64, n int) func() int { src := rand.NewSource(seed) r := rand.New(src) z := rand.NewZipf(r, 2, 1, uint64(n-1)) c := make(chan int, 32) go func() { for { c <- int(z.Uint64()) } }() return func() int { return <-c } }
func BenchmarkCounter(b *testing.B) { once.Do(func() { var buf []byte buf, err = ioutil.ReadFile("/usr/share/dict/connectives") if err != nil { return } for _, b := range bytes.Fields(buf) { words = append(words, string(b)) } }) if err != nil { b.Skipf("could not open dictionary: %v", err) } c := NewCounter() r := rand.New(rand.NewSource(1234)) z := rand.NewZipf(r, 2, 1, uint64(len(words)-1)) var seq [1024]int for i := 0; i < len(seq); i++ { // seq[i] = rand.Intn(len(words)) seq[i] = int(z.Uint64()) } _ = z b.ResetTimer() // for i := 0; i < b.N; i++ { // c.Add(words[b.N%len(words)]) // c.Add(words[rand.Intn(len(words))]) // } b.RunParallel(func(pb *testing.PB) { var i int for pb.Next() { // c.Add(words[rand.Intn(len(words))]) // c.Add(words[int(z.Uint64())]) c.Add(words[seq[i]]) i = (i + 1) % 1024 } }) b.StopTimer() if got := c.Sum(); got != b.N { b.Errorf("Sum=%d want %d", got, b.N) } }
func benchmarkObserve(b *testing.B, capacity int, distinct uint64) { r := rand.New(rand.NewSource(1)) zipf := rand.NewZipf(r, 1.5, 5, distinct) items := make(chan string, b.N) for i := 0; i < b.N; i++ { items <- strconv.FormatUint(zipf.Uint64(), 10) } summary := NewSummary(capacity) b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { summary.Observe(<-items) } }) }
func main() { words := data.GetData() // shuffle rand.Seed(time.Now().UnixNano()) for i := range words { j := rand.Intn(i + 1) words[i], words[j] = words[j], words[i] } client, err := skizze.Dial("127.0.0.1:3596", skizze.Options{Insecure: true}) if err != nil { fmt.Printf("Error connecting to Skizze: %s\n", err) return } domainName := "skizze_stress" if _, err := client.CreateDomain(domainName); err != nil { fmt.Println(err) } end := time.Duration(0) r := rand.New(rand.NewSource(time.Now().UnixNano())) zipf := rand.NewZipf(r, 1.1, 1.1, uint64(len(words)-1)) totalAdds := 0 for i := 0; i < 100000; i++ { fill := make([]string, 1000, 1000) for j := 0; j < len(fill); j++ { k := zipf.Uint64() fill[j] = words[k] } totalAdds += len(fill) t := time.Now() if err := client.AddToDomain(domainName, fill...); err != nil { fmt.Println(err) return } end += time.Since(t) if end.Seconds() > 0 { fmt.Printf("Added %d values (%d unique) in %ds (avg. %d v/s)\n", totalAdds, len(words), int(end.Seconds()), totalAdds/int(end.Seconds()+1)) } } client.Close() fmt.Printf("Added %d values (%d unique) in %ds (avg. %d v/s)\n", totalAdds, len(words), int(end.Seconds()), totalAdds/int(end.Seconds()+1)) }
func TestGenZipfGo(t *testing.T) { r := rand.New(rand.NewSource(time.Now().UnixNano())) z := rand.NewZipf(r, 1.000001, 1, uint64(100)) n := 1000 x := make([]uint64, n) for i := 0; i < n; i++ { x[i] = z.Uint64() } first := 0 second := 0 third := 0 for i := 0; i < len(x); i++ { if x[i] == uint64(0) { first++ } else if x[i] == uint64(1) { second++ } else if x[i] == 2 { third++ } } fmt.Printf("go 1.000001: first: %v, second: %v, third: %v\n", first, second, third) }
func BenchmarkEpsilonGreedy(b *testing.B) { b.StopTimer() // Make up some response times zipfDist := rand.NewZipf(rand.New(rand.NewSource(0)), 1.1, 5, 5000) timings := make([]uint64, b.N) for i := 0; i < b.N; i++ { timings[i] = zipfDist.Uint64() } // Make the hostpool with a few hosts p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool) b.StartTimer() for i := 0; i < b.N; i++ { if i != 0 && i%100 == 0 { p.performEpsilonGreedyDecay() } hostR := p.Get() p.timer = &mockTimer{t: int(timings[i])} hostR.Mark(nil) } }
func (g *generator) Flow(spread float64) (matrix.Matrix, error) { if spread < 0 || spread >= 1 { return nil, fmt.Errorf("Error: spread must be between 0 and 1.") } // Create Zipf generator scale := 1000 r := rand.New(rand.NewSource(0)) zipf := rand.NewZipf(r, 1.01, float64(g.n), uint64(scale)) // Populate frequencies of unigrams k := make([]float64, g.n) for i := 0; i < g.n; i++ { k[i] = float64(zipf.Uint64()) } // Populate ideal bigram matrix m := matrix.Matrix(make([][]matrix.Element, g.n)) for i := 0; i < g.n; i++ { m[i] = make([]matrix.Element, g.n) for j := 0; j < g.n; j++ { e := (rand.Float64() - 0.5) * spread m[i][j] = matrix.Element((k[i] * k[j]) * (1 + e)) } } // Scale back to 100,000 total freq totalF := m.Sum() s := g.fscale / totalF for i := 0; i < g.n; i++ { for j := 0; j < g.n; j++ { m[i][j] = matrix.Element(math.Floor(float64(m[i][j]) * s)) } } return m, nil }
func main() { hosts := flag.String("hosts", "http://localhost:2379", "comma separated etcd hosts to spew at") flag.Parse() numCPU := runtime.NumCPU() runtime.GOMAXPROCS(numCPU) ms := loghisto.NewMetricSystem(time.Second, false) ms.Start() metricStream := make(chan *loghisto.ProcessedMetricSet, 2) ms.SubscribeToProcessedMetrics(metricStream) defer ms.UnsubscribeFromProcessedMetrics(metricStream) machines := strings.Split(*hosts, ",") // use zipfian distribution r := rand.New(rand.NewSource(time.Now().UnixNano())) zipf := rand.NewZipf(r, 3.14, 2.72, 500000) go reporter(metricStream) for i := 0; i < 5; i++ { go func() { client := etcd.NewClient(machines) for i := 0; i < 3; i++ { go func() { for { rando := rand.Float64() valLen := int32(math.Max(float64(zipf.Uint64()), 1)) if rando > 0.8 { t := ms.StartTimer("PutLat") if _, err := client.Set("/"+RandString(1), RandString(500), 0); err != nil { log.Fatal(err) } t.Stop() ms.Histogram("PutSz", float64(valLen)) ms.Counter("Put", 1) } else if rando > 0.7 { t := ms.StartTimer("DeleteLat") client.Delete("/"+RandString(1), true) t.Stop() ms.Counter("Delete", 1) } else if rando > 0.65 { t := ms.StartTimer("AddChildLat") client.AddChild("/"+RandString(2), RandString(valLen), 0) t.Stop() ms.Counter("AddChild", 1) } else { t := ms.StartTimer("GetLat") r, err := client.Get("/"+RandString(1), false, false) if err == nil { ms.Histogram("GetSz", float64(len(r.Node.Value))) } t.Stop() ms.Counter("Get", 1) } } }() } }() } <-make(chan struct{}) }
func simulatedClient(rlReply *masterproto.GetReplicaListReply, leader int, readings chan float64, done chan bool) { N := len(rlReply.ReplicaList) servers := make([]net.Conn, N) readers := make([]*bufio.Reader, N) writers := make([]*bufio.Writer, N) rarray := make([]int, *reqsNb) karray := make([]int64, *reqsNb) perReplicaCount := make([]int, N) M := N if *barOne { M = N - 1 } randObj := rand.New(rand.NewSource(42)) zipf := rand.NewZipf(randObj, *s, *v, uint64(*reqsNb)) for i := 0; i < len(rarray); i++ { r := rand.Intn(M) rarray[i] = r perReplicaCount[r]++ if *conflicts >= 0 { r = rand.Intn(100) if r < *conflicts { karray[i] = 42 } else { karray[i] = int64(*startRange + 43 + i) } } else { karray[i] = int64(zipf.Uint64()) } } repliesChan := make(chan int32, *reqsNb*N) for i := 0; i < N; i++ { var err error servers[i], err = net.Dial("tcp", rlReply.ReplicaList[i]) if err != nil { log.Printf("Error connecting to replica %d\n", i) } readers[i] = bufio.NewReader(servers[i]) if *fast { //wait for replies from every replica go waitForReplies(readers[i], repliesChan) } writers[i] = bufio.NewWriter(servers[i]) } id := int32(*idStart) args := genericsmrproto.Propose{id, state.Command{state.PUT, 0, 0}} n := *reqsNb for i := 0; i < n; i++ { before := time.Now() args.ClientId = id args.Command.K = state.Key(karray[i]) if !*fast { if *noLeader { leader = rarray[i] } writers[leader].WriteByte(genericsmrproto.PROPOSE) args.Marshal(writers[leader]) writers[leader].Flush() } else { //send to everyone for rep := 0; rep < N; rep++ { writers[rep].WriteByte(genericsmrproto.PROPOSE) args.Marshal(writers[rep]) writers[rep].Flush() } } for true { rid := <-repliesChan if rid == id { break } } after := time.Now() id++ readings <- (after.Sub(before)).Seconds() * 1000 if *sleep > 0 { time.Sleep(100 * 1000 * 1000) } } for _, client := range servers { if client != nil { client.Close() } } done <- true }
func main() { flag.Parse() runtime.GOMAXPROCS(*procs) randObj := rand.New(rand.NewSource(42)) zipf := rand.NewZipf(randObj, *s, *v, uint64(*reqsNb / *rounds + *eps)) if *conflicts > 100 { log.Fatalf("Conflicts percentage must be between 0 and 100.\n") } master, err := rpc.DialHTTP("tcp", fmt.Sprintf("%s:%d", *masterAddr, *masterPort)) if err != nil { log.Fatalf("Error connecting to master\n") } rlReply := new(masterproto.GetReplicaListReply) err = master.Call("Master.GetReplicaList", new(masterproto.GetReplicaListArgs), rlReply) if err != nil { log.Fatalf("Error making the GetReplicaList RPC") } N = len(rlReply.ReplicaList) servers := make([]net.Conn, N) readers := make([]*bufio.Reader, N) writers := make([]*bufio.Writer, N) rarray = make([]int, *reqsNb / *rounds + *eps) karray := make([]int64, *reqsNb / *rounds + *eps) perReplicaCount := make([]int, N) test := make([]int, *reqsNb / *rounds + *eps) for i := 0; i < len(rarray); i++ { r := rand.Intn(N) rarray[i] = r if i < *reqsNb / *rounds { perReplicaCount[r]++ } if *conflicts >= 0 { r = rand.Intn(100) if r < *conflicts { karray[i] = 42 } else { karray[i] = int64(43 + i) } } else { karray[i] = int64(zipf.Uint64()) test[karray[i]]++ } } if *conflicts >= 0 { fmt.Println("Uniform distribution") } else { fmt.Println("Zipfian distribution:") //fmt.Println(test[0:100]) } for i := 0; i < N; i++ { var err error servers[i], err = net.Dial("tcp", rlReply.ReplicaList[i]) if err != nil { log.Printf("Error connecting to replica %d\n", i) } readers[i] = bufio.NewReader(servers[i]) writers[i] = bufio.NewWriter(servers[i]) } successful = make([]int, N) leader := 0 if *noLeader == false { reply := new(masterproto.GetLeaderReply) if err = master.Call("Master.GetLeader", new(masterproto.GetLeaderArgs), reply); err != nil { log.Fatalf("Error making the GetLeader RPC\n") } leader = reply.LeaderId log.Printf("The leader is replica %d\n", leader) } var id int32 = 0 done := make(chan bool, N) args := genericsmrproto.Propose{id, state.Command{state.PUT, 0, 0}} before_total := time.Now() for j := 0; j < *rounds; j++ { n := *reqsNb / *rounds if *check { rsp = make([]bool, n) for j := 0; j < n; j++ { rsp[j] = false } } donePrinting := make(chan bool) readings := make(chan int64, n) go printer(readings, donePrinting) if *noLeader { for i := 0; i < N; i++ { go waitReplies(readers, i, perReplicaCount[i], done, readings) } } else { go waitReplies(readers, leader, n, done, readings) } before := time.Now() for i := 0; i < n+*eps; i++ { dlog.Printf("Sending proposal %d\n", id) args.ClientId = id args.Command.K = state.Key(karray[i]) args.Command.V = state.Value(time.Now().UnixNano()) if !*fast { if *noLeader { leader = rarray[i] } writers[leader].WriteByte(genericsmrproto.PROPOSE) args.Marshal(writers[leader]) } else { //send to everyone for rep := 0; rep < N; rep++ { writers[rep].WriteByte(genericsmrproto.PROPOSE) args.Marshal(writers[rep]) writers[rep].Flush() } } //fmt.Println("Sent", id) id++ if i%*batch == 0 { for i := 0; i < N; i++ { writers[i].Flush() } if *nanosleep > 0 { time.Sleep(time.Duration(*nanosleep)) } } } for i := 0; i < N; i++ { writers[i].Flush() } err := false if *noLeader { for i := 0; i < N; i++ { e := <-done err = e || err } } else { err = <-done } after := time.Now() <-donePrinting fmt.Printf("Round took %v\n", after.Sub(before)) if *check { for j := 0; j < n; j++ { if !rsp[j] { fmt.Println("Didn't receive", j) } } } if err { if *noLeader { N = N - 1 } else { reply := new(masterproto.GetLeaderReply) master.Call("Master.GetLeader", new(masterproto.GetLeaderArgs), reply) leader = reply.LeaderId log.Printf("New leader is replica %d\n", leader) } } } after_total := time.Now() fmt.Printf("Test took %v\n", after_total.Sub(before_total)) s := 0 for _, succ := range successful { s += succ } fmt.Printf("Successful: %d\n", s) for _, client := range servers { if client != nil { client.Close() } } master.Close() }
func doBenchmark(numOps int, b *testing.B, gomaxprocs int, numStripes int, numGoRoutines int, maxSize int64, maxAge time.Duration, fakeDbMax int64, simLatency time.Duration) { if b != nil { b.StopTimer() } origGomaxprocs := runtime.GOMAXPROCS(-1) runtime.GOMAXPROCS(gomaxprocs) db := &FakeDatabase{fakeDbMax, simLatency} loader := func(key string) (interface{}, error) { return db.Get(key) } sizer := func(x interface{}) int64 { return 1 } randMax := int64(1000000) opsForGoroutine := make([]int, numGoRoutines) for i := 0; i < numGoRoutines; i++ { opsForGoroutine[i] = numOps / numGoRoutines } remainder := numOps - (numGoRoutines * (numOps / numGoRoutines)) for i := 0; i < remainder; i++ { opsForGoroutine[i] += 1 } initWg := new(sync.WaitGroup) finishedWg := new(sync.WaitGroup) initWg.Add(1) finishedWg.Add(numGoRoutines) cache := NewCache(numStripes, loader, sizer) for i := 0; i < numGoRoutines; i++ { go func(numOpsForThread int, seed int) { // fmt.Printf("numops: %d\n", numOpsForThread) rng := rand.New(rand.NewSource(int64(seed))) // We use a zipfian distribution of key lookup frequency to simulate the fact // that different cache keys are more popular than others. // The params to zipf are totally hacked, I just eyeballed the output values and // the distribution looks kind of OK-ish. // We probably shouldn't hardcode these, and instead take them as arguments. zipf := rand.NewZipf(rng, 1.1, 10, uint64(randMax)) initWg.Wait() // Block until all goroutines are ready for j := 0; j < numOpsForThread; j++ { r := zipf.Uint64() // fmt.Printf("zipf: %d\t", r) result, err := cache.GetOrLoad(strconv.FormatInt(int64(r), 10)) cache.Expire(maxSize, maxAge) // Enforce cache size and max age constraints if err != nil { panic("Cache lookup error") } if r <= uint64(fakeDbMax) && result != 10*int(r) { panic(fmt.Sprintf("Unexpected result %T:%d looking up %T:%d", result, result, r, r)) } else if r > uint64(fakeDbMax) && result != nil { panic("Result should have been nil") } } finishedWg.Done() }(opsForGoroutine[i], i) } if b != nil { b.StartTimer() } initWg.Done() // Unblock worker goroutines finishedWg.Wait() // Wait for workers to finish runtime.GOMAXPROCS(origGomaxprocs) }
func main() { flag.Parse() runtime.GOMAXPROCS(*procs) randObj := rand.New(rand.NewSource(42)) zipf := rand.NewZipf(randObj, *s, *v, uint64(*reqsNb)) //uint64(*reqsNb / *rounds + *eps)) if *conflicts > 100 { log.Fatalf("Conflicts percentage must be between 0 and 100.\n") } master, err := rpc.DialHTTP("tcp", fmt.Sprintf("%s:%d", *masterAddr, *masterPort)) if err != nil { log.Fatalf("Error connecting to master\n") } rlReply := new(masterproto.GetReplicaListReply) err = master.Call("Master.GetReplicaList", new(masterproto.GetReplicaListArgs), rlReply) if err != nil { log.Fatalf("Error making the GetReplicaList RPC") } N = len(rlReply.ReplicaList) servers := make([]net.Conn, N) readers := make([]*bufio.Reader, N) writers := make([]*bufio.Writer, N) rarray = make([]int, *reqsNb / *rounds + *eps) karray := make([]int64, *reqsNb / *rounds + *eps) perReplicaCount := make([]int, N) //test := make([]int, *reqsNb / *rounds + *eps) M := N if *barOne { M = N - 1 } for i := 0; i < len(rarray); i++ { r := rand.Intn(M) rarray[i] = r if i < *reqsNb / *rounds { perReplicaCount[r]++ } if *conflicts >= 0 { r = rand.Intn(100) if r < *conflicts { karray[i] = 42 } else { karray[i] = int64(43 + i) } } else { karray[i] = int64(zipf.Uint64()) //test[karray[i]]++ } } if *conflicts >= 0 { //fmt.Println("Uniform distribution") } else { /*fmt.Println("Zipfian distribution:") sum := 0 for _, val := range test[0:2000] { sum += val } fmt.Println(test[0:100]) fmt.Println(sum)*/ } for i := 0; i < N; i++ { var err error servers[i], err = net.Dial("tcp", rlReply.ReplicaList[i]) if err != nil { log.Printf("Error connecting to replica %d\n", i) N = N - 1 } readers[i] = bufio.NewReader(servers[i]) writers[i] = bufio.NewWriter(servers[i]) } successful = make([]int, N) leader := 0 if *noLeader == false { reply := new(masterproto.GetLeaderReply) if err = master.Call("Master.GetLeader", new(masterproto.GetLeaderArgs), reply); err != nil { log.Fatalf("Error making the GetLeader RPC\n") } leader = reply.LeaderId //log.Printf("The leader is replica %d\n", leader) } var id int32 = 0 done := make(chan bool, N) args := genericsmrproto.Propose{id, state.Command{state.PUT, 0, 0}} //make([]int64, state.VALUE_SIZE)}} pdone := make(chan bool) go printer(pdone) before_total := time.Now() for j := 0; j < *rounds; j++ { n := *reqsNb / *rounds if *check { rsp = make([]bool, n) for j := 0; j < n; j++ { rsp[j] = false } } if *noLeader { for i := 0; i < N; i++ { go waitReplies(readers, i, perReplicaCount[i], done) } } else { go waitReplies(readers, leader, n, done) // go waitReplies(readers, 2, n, done) } // before := time.Now() for i := 0; i < n+*eps; i++ { //dlog.Printf("Sending proposal %d\n", id) if *noLeader { leader = rarray[i] if leader >= N { continue } } args.ClientId = id args.Command.K = state.Key(karray[i]) writers[leader].WriteByte(genericsmrproto.PROPOSE) args.Marshal(writers[leader]) writers[leader].Flush() //fmt.Println("Sent", id) id++ if i%100 == 0 { for i := 0; i < N; i++ { writers[i].Flush() } } } for i := 0; i < N; i++ { writers[i].Flush() } err := false if *noLeader { W := N if *waitLess { W = N - 1 } for i := 0; i < W; i++ { e := <-done err = e || err } } else { err = <-done } // after := time.Now() // fmt.Printf("Round took %v\n", after.Sub(before)) if *check { for j := 0; j < n; j++ { if !rsp[j] { fmt.Println("Didn't receive", j) } } } if err { if *noLeader { N = N - 1 } else { reply := new(masterproto.GetLeaderReply) master.Call("Master.GetLeader", new(masterproto.GetLeaderArgs), reply) leader = reply.LeaderId log.Printf("New leader is replica %d\n", leader) } } } after_total := time.Now() //fmt.Printf("Test took %v\n", after_total.Sub(before_total)) //fmt.Printf("%v\n", (after_total.Sub(before_total)).Seconds()) s := 0 for _, succ := range successful { s += succ } fmt.Printf("Successful: %d\n", s) fmt.Printf("%v\n", float64(s)/(after_total.Sub(before_total)).Seconds()) for _, client := range servers { if client != nil { client.Close() } } master.Close() }
func simulatedClient(rlReply *masterproto.GetReplicaListReply, leader int, readings chan float64, done chan bool) { N := len(rlReply.ReplicaList) servers := make([]net.Conn, N) readers := make([]*bufio.Reader, N) writers := make([]*bufio.Writer, N) rarray := make([]int, *reqsNb) karray := make([]int64, *reqsNb) perReplicaCount := make([]int, N) M := N if *barOne { M = N - 1 } randObj := rand.New(rand.NewSource(42)) zipf := rand.NewZipf(randObj, *s, *v, uint64(*reqsNb)) for i := 0; i < len(rarray); i++ { r := rand.Intn(M) rarray[i] = r perReplicaCount[r]++ if *conflicts >= 0 { r = rand.Intn(100) if r < *conflicts { karray[i] = 42 } else { karray[i] = int64(*startRange + 43 + i) } } else { karray[i] = int64(zipf.Uint64()) } } for i := 0; i < N; i++ { var err error servers[i], err = net.Dial("tcp", rlReply.ReplicaList[i]) if err != nil { log.Printf("Error connecting to replica %d\n", i) } readers[i] = bufio.NewReader(servers[i]) writers[i] = bufio.NewWriter(servers[i]) } var id int32 = 0 args := genericsmrproto.Propose{id, state.Command{state.PUT, 0, 0}} var reply genericsmrproto.ProposeReply n := *reqsNb for i := 0; i < n; i++ { if *noLeader { leader = rarray[i] } args.ClientId = id args.Command.K = state.Key(karray[i]) writers[leader].WriteByte(genericsmrproto.PROPOSE) before := time.Now() args.Marshal(writers[leader]) writers[leader].Flush() if err := reply.Unmarshal(readers[leader]); err != nil || reply.OK == 0 { fmt.Println("Error when reading:", err) continue } after := time.Now() id++ readings <- (after.Sub(before)).Seconds() * 1000 if *sleep > 0 { time.Sleep(100 * 1000 * 1000) } } for _, client := range servers { if client != nil { client.Close() } } done <- true }
backend_latency_ms = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "backend_latency_ms", Help: "request latency in milliseconds", Buckets: prometheus.ExponentialBuckets(1, 2, 20)}) ) func init() { prometheus.MustRegister(requests) prometheus.MustRegister(errors) prometheus.MustRegister(latency_ms) prometheus.MustRegister(backend_latency_ms) } var ( randLock sync.Mutex zipf = rand.NewZipf(rand.New(rand.NewSource(0)), 1.1, 1, 1000) ) func handleHi(w http.ResponseWriter, r *http.Request) { start := time.Now() requests.Add(1) // COUNTER // Perform a "database" "lookup". backend_start := time.Now() randLock.Lock() // golang issue 3611 time.Sleep(time.Duration(zipf.Uint64()) * time.Millisecond) randLock.Unlock() backend_latency_ms.Observe(float64(time.Since(backend_start).Nanoseconds() / 1e6)) // HISTOGRAM // Fail sometimes. switch v := rand.Intn(100); {
func (z *zeroSum) accountDistribution(r *rand.Rand) *rand.Zipf { // We use a Zipf distribution for selecting accounts. return rand.NewZipf(r, 1.1, float64(z.numAccounts/10), uint64(z.numAccounts-1)) }