Example #1
0
func main() {
	flag.Parse()
	runtime.GOMAXPROCS(*nprocs)

	if *clientGoRoutines == 0 {
		*clientGoRoutines = *nprocs
	}
	if *nworkers == 0 {
		*nworkers = *nprocs
	}
	if *prob == -1 && *ZipfDist < 0 {
		log.Fatalf("Zipf distribution must be positive")
	}
	if *ZipfDist >= 0 && *prob > -1 {
		log.Fatalf("Set contention to -1 to use Zipf distribution of keys")
	}
	s := ddtxn.NewStore()
	sp := uint32(*nbidders / *nworkers)
	for i := 0; i < *nbidders; i++ {
		k := ddtxn.ProductKey(i)
		s.CreateKey(k, int32(0), ddtxn.SUM)
	}
	dlog.Printf("Done with Populate")

	coord := ddtxn.NewCoordinator(*nworkers, s)

	if *ddtxn.CountKeys {
		for i := 0; i < *nworkers; i++ {
			w := coord.Workers[i]
			w.NKeyAccesses = make([]int64, *nbidders)
		}
	}

	dlog.Printf("Done initializing single\n")

	p := prof.StartProfile()
	start := time.Now()
	var wg sync.WaitGroup
	pkey := int(sp - 1)
	dlog.Printf("Partition size: %v; Contended key %v\n", sp/2, pkey)
	gave_up := make([]int64, *clientGoRoutines)

	goZipf := make([]*ddtxn.Zipf, *clientGoRoutines)
	if *prob == -1 && *ZipfDist >= 0 {
		for i := 0; i < *clientGoRoutines; i++ {
			rnd := rand.New(rand.NewSource(int64(i * 12467)))
			goZipf[i] = ddtxn.NewZipf(rnd, *ZipfDist, 1, uint64(*nbidders)-1)
			if goZipf[i] == nil {
				panic("nil zipf")
			}
		}
	}

	for i := 0; i < *clientGoRoutines; i++ {
		wg.Add(1)
		go func(n int) {
			exp := ddtxn.MakeExp(50)
			retries := make(ddtxn.RetryHeap, 0)
			heap.Init(&retries)
			var local_seed uint32 = uint32(rand.Intn(10000000))
			wi := n % (*nworkers)
			w := coord.Workers[wi]
			top := (wi + 1) * int(sp)
			bottom := wi * int(sp)
			dlog.Printf("%v: Noncontended section: %v to %v\n", n, bottom, top)
			end_time := time.Now().Add(time.Duration(*nsec) * time.Second)
			for {
				tm := time.Now()
				if !end_time.After(tm) {
					break
				}
				var t ddtxn.Query
				if len(retries) > 0 && retries[0].TS.Before(tm) {
					t = heap.Pop(&retries).(ddtxn.Query)
				} else {
					x := float64(ddtxn.RandN(&local_seed, 100))
					if *prob == -1 {
						x := goZipf[n].Uint64()
						if x >= uint64(*nbidders) || x < 0 {
							log.Fatalf("x not in bounds: %v\n", x)
						}
						t.K1 = ddtxn.ProductKey(int(x))
					} else if x < *prob {
						// contended txn
						t.K1 = ddtxn.ProductKey(pkey)
					} else {
						// uncontended
						k := pkey
						for k == pkey {
							if *partition {
								rnd := ddtxn.RandN(&local_seed, sp-1)
								lb := int(rnd)
								k = lb + wi*int(sp) + 1
								if k < bottom || k >= top+1 {
									log.Fatalf("%v: outside my range %v [%v-%v]\n", n, k, bottom, top)
								}
							} else {
								k = int(ddtxn.RandN(&local_seed, uint32(*nbidders)))
							}
						}
						t.K1 = ddtxn.ProductKey(k)
					}
					t.TXN = ddtxn.D_INCR_ONE
					if *atomicIncr {
						t.TXN = ddtxn.D_ATOMIC_INCR_ONE
					}
					y := int(ddtxn.RandN(&local_seed, 100))
					if y < *readrate {
						t.TXN = ddtxn.D_READ_ONE
					}
				}
				committed := false
				_, err := w.One(t)
				if err == ddtxn.EABORT {
					committed = false
				} else {
					committed = true
				}
				t.I++
				if !committed {
					e := exp.Exp(t.I)
					if e <= 0 {
						e = 1
					}
					rnd := ddtxn.RandN(&local_seed, e)
					if rnd <= 0 {
						rnd = 1
					}
					t.TS = tm.Add(time.Duration(rnd) * time.Microsecond)
					if t.TS.Before(end_time) {
						heap.Push(&retries, t)
					} else {
						gave_up[n]++
					}
				}
			}
			w.Finished()
			wg.Done()
			if len(retries) > 0 {
				dlog.Printf("[%v] Length of retry queue on exit: %v\n", n, len(retries))
			}
			gave_up[n] = gave_up[n] + int64(len(retries))
		}(i)
	}
	wg.Wait()
	coord.Finish()
	end := time.Since(start)
	p.Stop()

	stats := make([]int64, ddtxn.LAST_STAT)
	nitr, nwait, _, _, _, _, _ := ddtxn.CollectCounts(coord, stats)

	for i := 1; i < *clientGoRoutines; i++ {
		gave_up[0] = gave_up[0] + gave_up[i]
	}

	// nitr + NABORTS + ENOKEY is how many requests were issued.  A
	// stashed transaction eventually executes and contributes to
	// nitr.
	out := fmt.Sprintf(" nworkers: %v, nwmoved: %v, nrmoved: %v, sys: %v, total/sec: %v, abortrate: %.2f, stashrate: %.2f, rr: %v, nkeys: %v, contention: %v, zipf: %v, done: %v, actual time: %v, nreads: %v, nincrs: %v, epoch changes: %v, throughput ns/txn: %v, naborts: %v, coord time: %v, coord stats time: %v, total worker time transitioning: %v, nstashed: %v, rlock: %v, wrratio: %v, nsamples: %v, getkeys: %v, ddwrites: %v, nolock: %v, failv: %v, nlocked: %v, stashdone: %v, nfast: %v, gaveup: %v, potential: %v ", *nworkers, ddtxn.WMoved, ddtxn.RMoved, *ddtxn.SysType, float64(nitr)/end.Seconds(), 100*float64(stats[ddtxn.NABORTS])/float64(nitr+stats[ddtxn.NABORTS]), 100*float64(stats[ddtxn.NSTASHED])/float64(nitr+stats[ddtxn.NABORTS]), *readrate, *nbidders, *prob, *ZipfDist, nitr, end, stats[ddtxn.D_READ_ONE], stats[ddtxn.D_INCR_ONE], ddtxn.NextEpoch, end.Nanoseconds()/nitr, stats[ddtxn.NABORTS], ddtxn.Time_in_IE, ddtxn.Time_in_IE1, nwait, stats[ddtxn.NSTASHED], *ddtxn.UseRLocks, *ddtxn.WRRatio, stats[ddtxn.NSAMPLES], stats[ddtxn.NGETKEYCALLS], stats[ddtxn.NDDWRITES], stats[ddtxn.NO_LOCK], stats[ddtxn.NFAIL_VERIFY], stats[ddtxn.NLOCKED], stats[ddtxn.NDIDSTASHED], ddtxn.Nfast, gave_up[0], coord.PotentialPhaseChanges)
	fmt.Printf(out)
	fmt.Printf("\n")

	f, err := os.OpenFile(*dataFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
	if err != nil {
		panic(err)
	}
	defer f.Close()

	ddtxn.PrintStats(out, stats, f, coord, s, *nbidders)
}
Example #2
0
File: big.go Project: ngaut/ddtxn
func main() {
	flag.Parse()
	runtime.GOMAXPROCS(*nprocs)

	if *clientGoRoutines == 0 {
		*clientGoRoutines = *nprocs
	}
	if *nworkers == 0 {
		*nworkers = *nprocs
	}

	nproducts := *nbidders / *contention
	if *doValidate {
		if !*ddtxn.Allocate {
			log.Fatalf("Cannot correctly validate without waiting for results; add -allocate\n")
		}
	}
	s := ddtxn.NewStore()
	coord := ddtxn.NewCoordinator(*nworkers, s)

	if *ddtxn.CountKeys {
		for i := 0; i < *nworkers; i++ {
			w := coord.Workers[i]
			w.NKeyAccesses = make([]int64, *nbidders)
		}
	}

	big_app := &apps.Big{}
	big_app.Init(*nbidders, nproducts, *nworkers, *readrate, *clientGoRoutines, *notcontended_readrate)
	big_app.Populate(s, coord.Workers[0].E)

	dlog.Printf("Done initializing buy\n")

	p := prof.StartProfile()
	start := time.Now()

	var wg sync.WaitGroup
	for i := 0; i < *clientGoRoutines; i++ {
		wg.Add(1)
		go func(n int) {
			duration := time.Now().Add(time.Duration(*nsec) * time.Second)
			var local_seed uint32 = uint32(rand.Intn(10000000))
			wi := n % (*nworkers)
			w := coord.Workers[wi]
			// It's ok to reuse t because it gets copied in
			// w.One(), and if we're actually reading from t later
			// we pause and don't re-write it until it's done.
			var t ddtxn.Query
			for duration.After(time.Now()) {
				big_app.MakeOne(w.ID, &local_seed, &t)
				if *apps.Latency || *doValidate {
					t.W = make(chan struct {
						R *ddtxn.Result
						E error
					})
					txn_start := time.Now()
					_, err := w.One(t)
					if err == ddtxn.ESTASH {
						x := <-t.W
						err = x.E
					}
					txn_end := time.Since(txn_start)
					if *apps.Latency {
						big_app.Time(&t, txn_end, n)
					}
					if *doValidate {
						if err == nil {
							big_app.Add(t)
						}
					}
				} else {
					w.One(t)
				}
			}
			wg.Done()
		}(i)
	}
	wg.Wait()
	coord.Finish()
	end := time.Since(start)
	p.Stop()

	stats := make([]int64, ddtxn.LAST_STAT)
	nitr, nwait, nwait2 := ddtxn.CollectCounts(coord, stats)

	if *doValidate {
		big_app.Validate(s, int(nitr))
	}

	out := fmt.Sprintf(" sys: %v, contention: %v, nworkers: %v, rr: %v, ncrr: %v, nusers: %v, done: %v, actual time: %v,  epoch changes: %v, total/sec: %v, throughput ns/txn: %v, naborts: %v, nwmoved: %v, nrmoved: %v, ietime: %v, ietime1: %v, etime: %v, etime2: %v, nstashed: %v, rlock: %v, wrratio: %v, nsamples: %v ", *ddtxn.SysType, *contention, *nworkers, *readrate, *notcontended_readrate*float64(*readrate), *nbidders, nitr, end, ddtxn.NextEpoch, float64(nitr)/end.Seconds(), end.Nanoseconds()/nitr, stats[ddtxn.NABORTS], ddtxn.WMoved, ddtxn.RMoved, ddtxn.Time_in_IE.Seconds(), ddtxn.Time_in_IE1.Seconds(), nwait.Seconds()/float64(*nworkers), nwait2.Seconds()/float64(*nworkers), stats[ddtxn.NSTASHED], *ddtxn.UseRLocks, *ddtxn.WRRatio, stats[ddtxn.NSAMPLES])
	fmt.Printf(out)
	fmt.Printf("\n")
	f, err := os.OpenFile(*dataFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
	if err != nil {
		panic(err)
	}
	defer f.Close()

	ddtxn.PrintStats(out, stats, f, coord, s, *nbidders)

	x, y := big_app.LatencyString()
	f.WriteString(x)
	f.WriteString(y)
	f.WriteString("\n")
}
Example #3
0
File: bid.go Project: ngaut/ddtxn
func main() {
	flag.Parse()
	runtime.GOMAXPROCS(*nprocs)

	if *clientGoRoutines == 0 {
		*clientGoRoutines = *nprocs
	}
	if *nworkers == 0 {
		*nworkers = *nprocs
	}

	if *doValidate {
		if !*ddtxn.Allocate {
			log.Fatalf("Cannot correctly validate without waiting for results; add -allocate\n")
		}
	}

	var nproducts int
	if *contention > 0 {
		nproducts = *nbidders / int(*contention)
	} else {
		nproducts = ddtxn.NUM_ITEMS
	}
	s := ddtxn.NewStore()
	coord := ddtxn.NewCoordinator(*nworkers, s)

	if *ddtxn.CountKeys {
		for i := 0; i < *nworkers; i++ {
			w := coord.Workers[i]
			w.NKeyAccesses = make([]int64, *nbidders)
		}
	}

	rubis := &apps.Rubis{}
	rubis.Init(nproducts, *nbidders, *nworkers, *clientGoRoutines, *ZipfDist, 0)
	rubis.PopulateBids(s, coord) // Just creates items to bid on
	fmt.Printf("Done populating bids\n")

	if !*ddtxn.Allocate {
		prealloc := time.Now()
		tmp := *ddtxn.UseRLocks
		*ddtxn.UseRLocks = true
		// Preallocate keys

		bids_per_worker := 200000.0
		if *nworkers == 20 {
			bids_per_worker *= 20
		}

		if *rounds {
			parallelism := 10
			rounds := *nworkers / parallelism
			if rounds == 0 {
				rounds = 1
			}
			for j := 0; j < rounds; j++ {
				fmt.Printf("Doing round %v\n", j)
				var wg sync.WaitGroup
				for i := j * parallelism; i < (j+1)*parallelism; i++ {
					if i >= *nworkers {
						break
					}
					wg.Add(1)
					go func(i int) {
						coord.Workers[i].PreallocateRubis(0, int(bids_per_worker), ddtxn.NUM_ITEMS)
						wg.Done()
					}(i)
				}
				wg.Wait()
			}
		} else {
			var wg sync.WaitGroup
			for i := 0; i < *nworkers; i++ {
				wg.Add(1)
				go func(i int) {
					coord.Workers[i].PreallocateRubis(0, int(bids_per_worker), ddtxn.NUM_ITEMS)
					wg.Done()
				}(i)
			}
			wg.Wait()
		}
		*ddtxn.UseRLocks = tmp
		fmt.Printf("Allocation took %v\n", time.Since(prealloc))
	}
	fmt.Printf("Done initializing rubis\n")

	p := prof.StartProfile()
	start := time.Now()
	gave_up := make([]int64, *clientGoRoutines)
	var wg sync.WaitGroup
	for i := 0; i < *clientGoRoutines; i++ {
		exp := ddtxn.MakeExp(30)
		wg.Add(1)
		go func(n int) {
			retries := make(ddtxn.RetryHeap, 0)
			heap.Init(&retries)
			end_time := time.Now().Add(time.Duration(*nsec) * time.Second)
			var local_seed uint32 = uint32(rand.Intn(1000000))
			wi := n % (*nworkers)
			w := coord.Workers[wi]
			for {
				tm := time.Now()
				if !end_time.After(tm) {
					break
				}
				var t ddtxn.Query
				if len(retries) > 0 && retries[0].TS.Before(tm) {
					t = heap.Pop(&retries).(ddtxn.Query)
				} else {
					rubis.MakeBid(w.ID, &local_seed, &t)
					if *ddtxn.Latency {
						t.S = time.Now()
					}
				}
				if *doValidate {
					t.W = make(chan struct {
						R *ddtxn.Result
						E error
					})
				}
				committed := false
				_, err := w.One(t)
				if err == ddtxn.ESTASH {
					if *doValidate {
						x := <-t.W
						err = x.E
					}
					committed = true
				} else if err == ddtxn.EABORT {
					committed = false
				} else {
					committed = true
				}
				t.I++
				if !committed {
					t.TS = tm.Add(time.Duration(ddtxn.RandN(&local_seed, exp.Exp(t.I))) * time.Microsecond)
					if t.TS.Before(end_time) {
						heap.Push(&retries, t)
					} else {
						gave_up[n]++
					}
				}

				if committed && *doValidate {
					rubis.Add(t)
				}
			}
			wg.Done()
			if len(retries) > 0 {
				dlog.Printf("[%v] Length of retry queue on exit: %v\n", n, len(retries))
			}
			gave_up[n] = gave_up[n] + int64(len(retries))
		}(i)
	}
	wg.Wait()
	coord.Finish()
	end := time.Since(start)
	p.Stop()
	stats := make([]int64, ddtxn.LAST_STAT)
	nitr, nwait, nwait2 := ddtxn.CollectCounts(coord, stats)
	_ = nwait2

	if *doValidate {
		rubis.Validate(s, int(nitr))
	}

	for i := 1; i < *clientGoRoutines; i++ {
		gave_up[0] = gave_up[0] + gave_up[i]
	}

	if !*ddtxn.Allocate {
		keys := []rune{'b', 'c', 'd', 'i', 'k', 'u'}
		for i := 0; i < *nworkers; i++ {
			dlog.Printf("w: %v ", i)
			for _, k := range keys {
				dlog.Printf("%v %v/%v \t", strconv.QuoteRuneToASCII(k), coord.Workers[i].CurrKey[k], coord.Workers[i].LastKey[k])
			}
			dlog.Printf("\n")
		}
	}

	out := fmt.Sprintf("  nworkers: %v, nwmoved: %v, nrmoved: %v, sys: %v, total/sec: %v, abortrate: %.2f, stashrate: %.2f, nbidders: %v, nitems: %v, contention: %v, done: %v, actual time: %v, throughput: ns/txn: %v, naborts: %v, coord time: %v, coord stats time: %v, total worker time transitioning: %v, nstashed: %v, rlock: %v, wrratio: %v, nsamples: %v, getkeys: %v, ddwrites: %v, nolock: %v, failv: %v, stashdone: %v, nfast: %v, gaveup: %v,  epoch changes: %v, potential: %v, coordtotaltime %v, mergetime: %v, readtime: %v, gotime: %v ", *nworkers, ddtxn.WMoved, ddtxn.RMoved, *ddtxn.SysType, float64(nitr)/end.Seconds(), 100*float64(stats[ddtxn.NABORTS])/float64(nitr+stats[ddtxn.NABORTS]), 100*float64(stats[ddtxn.NSTASHED])/float64(nitr+stats[ddtxn.NABORTS]), *nbidders, nproducts, *contention, nitr, end, end.Nanoseconds()/nitr, stats[ddtxn.NABORTS], ddtxn.Time_in_IE, ddtxn.Time_in_IE1, nwait, stats[ddtxn.NSTASHED], *ddtxn.UseRLocks, *ddtxn.WRRatio, stats[ddtxn.NSAMPLES], stats[ddtxn.NGETKEYCALLS], stats[ddtxn.NDDWRITES], stats[ddtxn.NO_LOCK], stats[ddtxn.NFAIL_VERIFY], stats[ddtxn.NDIDSTASHED], ddtxn.Nfast, gave_up[0], ddtxn.NextEpoch, coord.PotentialPhaseChanges, coord.TotalCoordTime, coord.MergeTime, coord.ReadTime, coord.GoTime)
	fmt.Printf(out)
	fmt.Printf("\n")

	fmt.Printf("DD: %v\n", coord.Workers[0].Store().DD())
	f, err := os.OpenFile(*dataFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
	if err != nil {
		panic(err)
	}
	defer f.Close()

	ddtxn.PrintStats(out, stats, f, coord, s, *nbidders)

	x, y := coord.Latency()
	f.WriteString(x)
	f.WriteString(y)
	f.WriteString("\n")
}
Example #4
0
File: buy.go Project: ngaut/ddtxn
func main() {
	flag.Parse()
	runtime.GOMAXPROCS(*nprocs)
	if *clientGoRoutines == 0 {
		*clientGoRoutines = *nprocs
	}
	if *nworkers == 0 {
		*nworkers = *nprocs
	}

	if *doValidate {
		if !*ddtxn.Allocate {
			log.Fatalf("Cannot correctly validate without waiting for results; add -allocate\n")
		}
	}
	if *contention == -1.0 && *ZipfDist == -1.0 {
		log.Fatalf("Should use zipf or contention")
	}
	var nproducts int
	if *contention > 0 {
		nproducts = *nbidders / int(*contention)
	} else {
		nproducts = *nbidders
	}
	s := ddtxn.NewStore()
	buy_app := &apps.Buy{}
	buy_app.Init(nproducts, *nbidders, *nworkers, *readrate, *clientGoRoutines, *notcontended_readrate, *ZipfDist)
	dlog.Printf("Starting to initialize buy\n")
	buy_app.Populate(s, nil)

	coord := ddtxn.NewCoordinator(*nworkers, s)

	if *ddtxn.CountKeys {
		for i := 0; i < *nworkers; i++ {
			w := coord.Workers[i]
			w.NKeyAccesses = make([]int64, *nbidders)
		}
	}

	dlog.Printf("Done initializing buy\n")

	p := prof.StartProfile()
	start := time.Now()

	var wg sync.WaitGroup

	gave_upr := make([]int64, *clientGoRoutines)
	gave_upw := make([]int64, *clientGoRoutines)
	var ending_retries int64
	for i := 0; i < *clientGoRoutines; i++ {
		wg.Add(1)
		go func(n int) {
			exp := ddtxn.MakeExp(50)
			retries := make(ddtxn.RetryHeap, 0)
			heap.Init(&retries)
			end_time := time.Now().Add(time.Duration(*nsec) * time.Second)
			var local_seed uint32 = uint32(rand.Intn(10000000))
			var sp uint32 = uint32(*nbidders / *clientGoRoutines)
			w := coord.Workers[n%(*nworkers)]
			var tm time.Time
			for {
				tm = time.Now()
				if !end_time.After(tm) {
					break
				}
				var t ddtxn.Query
				if len(retries) > 0 && retries[0].TS.Before(tm) {
					t = heap.Pop(&retries).(ddtxn.Query)
				} else {
					buy_app.MakeOne(w.ID, &local_seed, sp, &t)
					if *ddtxn.Latency {
						t.S = time.Now()
					}
				}
				if *doValidate {
					t.W = make(chan struct {
						R *ddtxn.Result
						E error
					}, 1)
				}
				committed := false
				_, err := w.One(t)
				if err == ddtxn.ESTASH {
					if *doValidate {
						x := <-t.W
						err = x.E
						if err == ddtxn.EABORT {
							log.Fatalf("Should be run until commitment!\n")
						}
					}
					committed = true // The worker stash code will retry
				} else if err == ddtxn.EABORT {
					committed = false
				} else {
					committed = true
				}
				t.I++
				if !committed {
					e := uint32(exp.Exp(t.I))
					if e < 1 {
						e = 1
					}
					if local_seed < 1 {
						local_seed = 1
					}
					rnd := ddtxn.RandN(&local_seed, e)
					if rnd <= 2 {
						rnd = 2
					}
					t.TS = tm.Add(time.Duration(rnd) * time.Microsecond)
					if t.TS.Before(end_time) {
						heap.Push(&retries, t)
					} else {
						if ddtxn.IsRead(t.TXN) {
							gave_upr[n]++
						} else {
							gave_upw[n]++
						}
					}
				}
				if committed && *doValidate {
					buy_app.Add(t)
				}
			}
			w.Finished()
			wg.Done()
			if len(retries) > 0 {
				dlog.Printf("[%v] Length of retry queue on exit: %v\n", n, len(retries))
			}
			atomic.AddInt64(&ending_retries, int64(len(retries)))
		}(i)
	}
	wg.Wait()
	coord.Finish()
	end := time.Since(start)
	p.Stop()

	stats := make([]int64, ddtxn.LAST_STAT)
	nitr, nwait, nnoticed, nmerge, nmergewait, njoin, njoinwait := ddtxn.CollectCounts(coord, stats)

	if *doValidate {
		buy_app.Validate(s, int(nitr))
	}

	for i := 1; i < *clientGoRoutines; i++ {
		gave_upr[0] = gave_upr[0] + gave_upr[i]
		gave_upw[0] = gave_upw[0] + gave_upw[i]
	}

	if ddtxn.NextEpoch == 0 {
		ddtxn.NextEpoch = 1
	}
	// nitr + NABORTS + ENOKEY is how many requests were issued.  A
	// stashed transaction eventually executes and contributes to
	// nitr.
	out := fmt.Sprintf(" nworkers: %v, nwmoved: %v, nrmoved: %v, sys: %v, total/sec: %v, abortrate: %.2f, stashrate: %.2f, rr: %v, nbids: %v, nproducts: %v, contention: %v, done: %v, actual time: %v, nreads: %v, nbuys: %v, epoch changes: %v, throughput ns/txn: %v, naborts: %v, coord time: %v, coord stats time: %v, nstashed: %v, rlock: %v, wrratio: %v, nsamples: %v, getkeys: %v, ddwrites: %v, nolock: %v, failv: %v, stashdone: %v, nfast: %v, gaveup_reads: %v, gaveup_writes: %v, lenretries: %v, potential: %v, coordtotaltime %v, mergetime: %v, readtime: %v, gotime: %v,  workertransitiontime: %v, workernoticetime: %v, workermergetime: %v, workermergewaittime: %v, workerjointime: %v, workerjoinwaittime: %v, readaborts: %v  ", *nworkers, ddtxn.WMoved, ddtxn.RMoved, *ddtxn.SysType, float64(nitr)/end.Seconds(), 100*float64(stats[ddtxn.NABORTS])/float64(nitr+stats[ddtxn.NABORTS]), 100*float64(stats[ddtxn.NSTASHED])/float64(nitr+stats[ddtxn.NABORTS]), *readrate, *nbidders, nproducts, *contention, nitr, end, stats[ddtxn.D_READ_TWO], stats[ddtxn.D_BUY], ddtxn.NextEpoch, end.Nanoseconds()/nitr, stats[ddtxn.NABORTS], ddtxn.Time_in_IE, ddtxn.Time_in_IE1, stats[ddtxn.NSTASHED], *ddtxn.UseRLocks, *ddtxn.WRRatio, stats[ddtxn.NSAMPLES], stats[ddtxn.NGETKEYCALLS], stats[ddtxn.NDDWRITES], stats[ddtxn.NO_LOCK], stats[ddtxn.NFAIL_VERIFY], stats[ddtxn.NDIDSTASHED], ddtxn.Nfast, gave_upr[0], gave_upw[0], ending_retries, coord.PotentialPhaseChanges, coord.TotalCoordTime, coord.MergeTime, coord.ReadTime, coord.GoTime, nwait, nnoticed, nmerge, nmergewait, njoin, njoinwait, stats[ddtxn.NREADABORTS])
	fmt.Printf(out)
	fmt.Printf("\n")
	f, err := os.OpenFile(*dataFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
	if err != nil {
		panic(err)
	}
	defer f.Close()
	fmt.Printf("DD: %v\n", coord.Workers[0].Store().DD())
	ddtxn.PrintStats(out, stats, f, coord, s, *nbidders)

	x, y := coord.Latency()
	f.WriteString(x)
	f.WriteString(y)
	f.WriteString("\n")
}