func ViewUserInfoTxn(t Query, tx ETransaction) (*Result, error) { uk := UserKey(t.U1) urec, err := tx.Read(uk) if err != nil { if err == ESTASH { dlog.Printf("User %v stashed\n", t.U1) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("No user? %v\n", t.U1) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("view user err: %v\n", err) } } _ = urec.Value().(*User) if tx.Commit() == 0 { return nil, EABORT } var r *Result = nil if *Allocate { r = &Result{urec.Value()} } return r, nil }
func BenchmarkRead(b *testing.B) { runtime.GOMAXPROCS(4) b.StopTimer() nb := 10000 np := 100 n := 4 s := NewStore() // Load for i := 0; i < np; i++ { s.CreateKey(ProductKey(i), int32(0), SUM) } for i := 0; i < nb; i++ { s.CreateKey(UserKey(uint64(i)), "x", WRITE) } c := NewCoordinator(n, s) val := make([]int32, np) read_rate := 50 var wg sync.WaitGroup b.StartTimer() for p := 0; p < n; p++ { wg.Add(1) go func(id int) { w := c.Workers[id] for i := 0; i < b.N/3; i++ { p := ProductKey(i % np) u := UserKey(uint64(i % nb)) amt := int32(rand.Intn(100)) var tx Query rr := rand.Intn(100) if rr >= read_rate { tx = Query{TXN: D_BUY, K1: u, K2: p, A: amt, W: nil, T: 0} _, err := w.One(tx) if err == nil { atomic.AddInt32(&val[i%np], amt) } } else { tx = Query{TXN: D_READ_ONE, K1: p, W: make(chan struct { R *Result E error }), T: 0} _, err := w.One(tx) if err == ESTASH { dlog.Printf("client [%v] waiting for %v; epoch %v\n", w.ID, i%np, w.epoch) <-tx.W } } } wg.Done() }(p) } dlog.Printf("Waiting on outer\n") wg.Wait() b.StopTimer() c.Finish() Validate(c, s, nb, np, val, b.N) }
func PutCommentTxn(t Query, tx ETransaction) (*Result, error) { var r *Result = nil touser := t.U1 item := t.U2 tok := UserKey(touser) torec, err := tx.Read(tok) if err != nil { if err == ESTASH { dlog.Printf("User key for user %v stashed\n", touser) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("No user? %v\n", touser) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err %v\n", err) } } nickname := torec.Value().(*User).Nickname ik := ItemKey(item) irec, err := tx.Read(ik) if err != nil { if err == ESTASH { dlog.Printf("Item key %v stashed\n", item) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("PutCommentTxn: No item? %v\n", item) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err %v\n", err) } } itemname := irec.Value().(*Item).Name if tx.Commit() == 0 { return r, EABORT } if *Allocate { r = &Result{ &struct { nick string iname string }{nickname, itemname}, } } return r, nil }
func (b *Buy) Populate(s *ddtxn.Store, ex *ddtxn.ETransaction) { for i := 0; i < b.nbidders; i++ { k := ddtxn.ProductKey(i) s.CreateKey(k, int32(0), ddtxn.SUM) } dlog.Printf("Created %v products; np: %v\n", b.nbidders, b.nproducts) for i := 0; i < b.nbidders; i++ { k := ddtxn.UserKey(uint64(i)) s.CreateKey(k, "x", ddtxn.WRITE) } dlog.Printf("Created %v bidders\n", b.nbidders) dlog.Printf("Done with Populate") }
func RegisterUserTxn(t Query, tx ETransaction) (*Result, error) { region := t.U1 nickname := t.U2 var r *Result = nil var n uint64 var nick Key if !*Allocate || nickname == 0 { n = tx.UID('u') nick = NicknameKey(tx.UID('d')) } else { n = nickname nick = NicknameKey(n) } u := UserKey(n) user := &User{ ID: n, Name: "xxxxxxx", Nickname: string(nickname), Region: region, } tx.MaybeWrite(nick) br, err := tx.Read(nick) var val uint64 = 0 if br != nil && br.exists { val = br.Value().(uint64) } if err != ENOKEY && val != 0 { // Someone else is using this nickname dlog.Printf("Nickname taken %v %v\n", nickname, nick) tx.Abort() return nil, ENORETRY } tx.Write(u, user, WRITE) tx.Write(nick, nickname, WRITE) if tx.Commit() == 0 { dlog.Printf("RegisterUser() Abort\n") return nil, EABORT } if *Allocate { r = &Result{uint64(n)} // dlog.Printf("Registered user %v %v\n", nickname, n) } return r, nil }
// Used during "merge" phase, along with br.mu func (br *BRecord) Apply(val Value) { if br == nil { dlog.Printf("Nil record %v %v\n", val, br) } switch br.key_type { case SUM: delta := val.(int32) atomic.AddInt32(&br.int_value, delta) case MAX: delta := val.(int32) br.mu.Lock() defer br.mu.Unlock() if br.int_value < delta { br.int_value = delta } case WRITE: br.mu.Lock() defer br.mu.Unlock() br.value = val case LIST: br.mu.Lock() defer br.mu.Unlock() entries := val.([]Entry) br.listApply(entries) case OOWRITE: br.mu.Lock() defer br.mu.Unlock() x := val.(Overwrite) if br.int_value < x.i { br.int_value = x.i br.value = x.v } } }
func NewCoordinator(n int, s *Store) *Coordinator { c := &Coordinator{ n: n, Workers: make([]*Worker, n), epochTID: EPOCH_INCR, wepoch: make([]chan TID, n), wsafe: make([]chan TID, n), wgo: make([]chan TID, n), wdone: make([]chan TID, n), Done: make(chan chan bool), Accelerate: make(chan bool), Coordinate: false, PotentialPhaseChanges: 0, to_remove: make(map[Key]bool), Finished: make([]bool, n), } for i := 0; i < n; i++ { c.wepoch[i] = make(chan TID) c.wsafe[i] = make(chan TID) c.wgo[i] = make(chan TID) c.wdone[i] = make(chan TID) c.Finished[i] = false c.Workers[i] = NewWorker(i, s, c) } c.Finished = make([]bool, n) dlog.Printf("[coordinator] %v workers\n", n) go c.Process() return c }
func (b *Buy) Validate(s *ddtxn.Store, nitr int) bool { good := true zero_cnt := 0 for j := 0; j < b.nproducts; j++ { var x int32 k := ddtxn.ProductKey(j) v, err := s.Get(k) if err != nil { if b.validate[j] != 0 { fmt.Printf("Validating key %v failed; store: none should have: %v\n", k, b.validate[j]) good = false } continue } x = v.Value().(int32) if x != b.validate[j] { fmt.Printf("Validating key %v failed; store: %v should have: %v\n", k, x, b.validate[j]) good = false } if x == 0 { zero_cnt++ } } if zero_cnt == b.nproducts && nitr > 10 { fmt.Printf("Bad: all zeroes!\n") dlog.Printf("Bad: all zeroes!\n") good = false } return good }
func BenchmarkBuy(b *testing.B) { runtime.GOMAXPROCS(8) b.StopTimer() nb := 10000 np := 100 n := 8 s := NewStore() // Load for i := 0; i < np; i++ { s.CreateKey(ProductKey(i), int32(0), MAX) } for i := 0; i < nb; i++ { s.CreateKey(UserKey(uint64(i)), "x", WRITE) } c := NewCoordinator(n, s) val := make([]int32, np) var wg sync.WaitGroup b.StartTimer() for p := 0; p < n; p++ { wg.Add(1) go func(id int) { w := c.Workers[id] for i := 0; i < b.N/3; i++ { p := ProductKey(i % np) u := UserKey(uint64(i % nb)) amt := int32(rand.Intn(100)) tx := Query{TXN: D_BUY, K1: u, A: amt, K2: p, W: nil, T: 0} _, err := w.One(tx) if err == nil { atomic.AddInt32(&val[i%np], amt) } } wg.Done() }(p) } dlog.Printf("Waiting on outer\n") wg.Wait() dlog.Printf("done\n") b.StopTimer() c.Finish() Validate(c, s, nb, np, val, b.N) //PrintLockCounts(s, nb, np, false) }
func TestRandN(t *testing.T) { var seed uint32 = uint32(1) dlog.Printf("seed %v\n", seed) for i := 0; i < 1000; i++ { x := RandN(&seed, 10) // No idea how to test a random number generator, just look at the results for now. dlog.Println(x, seed) _ = x } }
func StoreCommentTxn(t Query, tx ETransaction) (*Result, error) { touser := t.U1 fromuser := t.U2 item := t.U3 comment_s := t.S1 rating := t.U4 n := tx.UID('c') com := CommentKey(n) comment := &Comment{ ID: n, From: fromuser, To: touser, Rating: rating, Comment: comment_s, Item: item, Date: 11, } tx.Write(com, comment, WRITE) rkey := RatingKey(touser) err := tx.WriteInt32(rkey, int32(rating), SUM) if err != nil { dlog.Printf("Comment abort %v\n", t) tx.Abort() return nil, err } if tx.Commit() == 0 { dlog.Printf("Comment abort %v\n", t) return nil, EABORT } var r *Result = nil if *Allocate { r = &Result{uint64(n)} dlog.Printf("%v Comment %v %v\n", touser, fromuser, item) } return r, nil }
func Test2RandN(t *testing.T) { n := 0 var local_seed uint32 = uint32(n + 1) portion_sz := 100 dlog.Printf("LOCAL: %v\n", local_seed) j := 0 for { select { default: var bidder int rand := RandN(&local_seed, uint32(portion_sz)) lb := int(rand) bidder = lb + n*portion_sz amt := int(RandN(&local_seed, 10)) dlog.Printf("%v rand: %v bidder: %v local: %v amt: %v\n", n, rand, bidder, local_seed, amt) j++ if j > 100 { return } } } }
func (s *Store) CreateMuRLockedKey(k Key, kt KeyType) (*BRecord, error) { br := MakeBR(k, nil, kt) br.SRLock() if *GStore { ok := s.gstore.PutIfMissing(gotomic.Key(k), unsafe.Pointer(br)) if !ok { dlog.Printf("Key already exists %v\n", k) return nil, EEXISTS } } else { chunk := s.store[k[0]] chunk.Lock() _, ok := chunk.rows[k] if ok { chunk.Unlock() dlog.Printf("Key already exists %v\n", k) return nil, EEXISTS } chunk.rows[k] = br chunk.Unlock() } return br, nil }
func Validate(c *Coordinator, s *Store, nkeys int, nproducts int, val []int32, n int) bool { good := true dlog.Printf("Validate start, store at %x\n", c.GetEpoch()) zero_cnt := 0 for j := 0; j < nproducts; j++ { var x int32 k := ProductKey(j) v, err := s.getKey(k, nil) if err != nil { if val[j] != 0 { fmt.Printf("Validating key %v failed; store: none should have: %v\n", k, val[j]) dlog.Printf("Validating key %v failed; store: none should have: %v\n", k, val[j]) good = false } continue } x = v.Value().(int32) if x != val[j] { dlog.Printf("Validating key %v failed; store: %v should have: %v\n", k, x, val[j]) good = false } if x == 0 { //dlog.Printf("Saying x is zero %v %v\n", x, zero_cnt) zero_cnt++ } } if zero_cnt == nproducts && n > 10 { fmt.Printf("Bad: all zeroes!\n") dlog.Printf("Bad: all zeroes!\n") good = false } dlog.Printf("Done validating\n") if !good { fmt.Printf("Validating failed\n") } return good }
func compute(w *Worker, txn int) (int64, int64) { var total int64 var sum int64 var i int64 for i = 0; i < TIMES; i++ { total = total + w.times[txn][i] sum = sum + (w.times[txn][i] * i) } total = total + w.tooLong[txn] sum = sum + w.tooLong[txn]*10000000 var x99 int64 = int64(float64(total) * .99) var y99 int64 var v99 int64 var buckets [TIMES / 1000]int64 for i = 0; i < TIMES; i++ { buckets[i/1000] += w.times[txn][i] y99 = y99 + w.times[txn][i] if y99 >= x99 { v99 = i break } } if total == 0 { log.Fatalf("No latency recorded\n") } dlog.Printf("%v avg: %v us; 99: %v us, x99: %v, sum: %v, total: %v \n", txn, sum/total, v99, x99, sum, total) var one int64 var ten int64 var hundred int64 var more int64 for i = 0; i < TIMES/1000; i++ { if i == 0 { one += buckets[i] } else if i < 10 { ten += buckets[i] } else if i < 100 { hundred += buckets[i] } else { more += buckets[i] } } fmt.Printf("Txn %v\n Less than 1ms: %v\n 1-10ms: %v\n 10-100ms: %v\n 100ms-10s: %v\n Greater than 10s: %v\n", txn, one, ten, hundred, more, w.tooLong[txn]) total_time_in_ms := (one/2 + 5*ten + 55*100 + 15000*more) fmt.Printf("Rough total time in ms: %v\n", total_time_in_ms) return sum / total, v99 }
func (b *Buy) Init(np, nb, nw, rr, ngo int, ncrr, zipfd float64) { b.nproducts = np b.nbidders = nb b.nworkers = nw b.ngo = ngo b.read_rate = rr b.ncontended_rate = int(ncrr * float64(rr)) b.validate = make([]int32, nb) b.sp = uint32(nb / nw) if zipfd != -1 { b.z = make([]*ddtxn.Zipf, nw) for i := 0; i < nw; i++ { r := rand.New(rand.NewSource(int64(i * 38748767))) b.z[i] = ddtxn.NewZipf(r, zipfd, 1, uint64(b.nbidders-1)) } } b.zipfd = zipfd dlog.Printf("Read rate %v, not contended: %v\n", b.read_rate, b.ncontended_rate) }
func (s *Store) getKey(k Key, ld *gotomic.LocalData) (*BRecord, error) { if len(k) == 0 { debug.PrintStack() log.Fatalf("[store] getKey(): Empty key\n") } if *GStore { var x unsafe.Pointer var ok bool hc, present := s.hash_codes[k] if ld == nil { x, ok = s.gstore.Get(gotomic.Key(k)) } else if !present { x, ok = s.gstore.GetHC(gotomic.Key(k).HashCode(), gotomic.Key(k), ld) } else { x, ok = s.gstore.GetHC(hc, gotomic.Key(k), ld) } if !ok { dlog.Printf("Not in hash map. %v %v %v\n", k, x, ok) return nil, ENOKEY } else { if x == nil { fmt.Printf("Nil brecord! %v\n", k) } return (*BRecord)(x), nil } } if !*UseRLocks { x, err := s.getKeyStatic(k) return x, err } chunk := s.store[k[0]] if chunk == nil { log.Fatalf("[store] Didn't initialize chunk for key %v byte %v\n", k, k[0]) } chunk.RLock() vr, ok := chunk.rows[k] if !ok || vr == nil { chunk.RUnlock() return vr, ENOKEY } chunk.RUnlock() return vr, nil }
func (c *Coordinator) Process() { tm := time.NewTicker(time.Duration(*PhaseLength) * time.Millisecond).C // More frequently, check if the workers are demanding a phase // change due to long stashed queue lengths. check_trigger := time.NewTicker(time.Duration(*PhaseLength) * time.Microsecond * 10).C for { select { case x := <-c.Done: if *SysType == DOPPEL && c.n > 1 && c.Workers[0].store.any_dd { c.IncrementEpoch(true) } for i := 0; i < c.n; i++ { c.Workers[i].done <- true } x <- true return case <-tm: if *SysType == DOPPEL && c.n > 1 { c.IncrementEpoch(false) } case <-check_trigger: if *SysType == DOPPEL && c.n > 1 { x := atomic.LoadInt32(&c.trigger) if x == int32(c.n) { Nfast++ atomic.StoreInt32(&c.trigger, 0) c.IncrementEpoch(true) } } case <-c.Accelerate: if *SysType == DOPPEL && c.n > 1 { dlog.Printf("Accelerating\n") c.IncrementEpoch(true) } } } }
func TestBasic(t *testing.T) { s := NewStore() c := NewCoordinator(1, s) w := c.Workers[0] s.CreateKey(ProductKey(4), int32(0), SUM) s.CreateKey(ProductKey(5), int32(0), WRITE) s.CreateKey(UserKey(1), "u1", WRITE) s.CreateKey(UserKey(2), "u2", WRITE) s.CreateKey(UserKey(3), "u3", WRITE) tx := Query{TXN: D_BUY, K1: UserKey(1), A: int32(5), K2: ProductKey(4), W: nil, T: 0} r, err := w.One(tx) _ = err // Fresh read test tx = Query{TXN: D_READ_ONE, K1: ProductKey(4), W: make(chan struct { R *Result E error }), T: 0} r, err = w.One(tx) dlog.Printf("[test] Returned from one\n") if r.V.(int32) != 5 { t.Errorf("Wrong answer %v\n", r) } }
// TODO: Check and see if I need more tx.MaybeWrite()s func StoreBidTxn(t Query, tx ETransaction) (*Result, error) { var r *Result = nil user := t.U1 item := t.U2 price := int32(t.U3) if price < 0 { log.Fatalf("price %v %v", price, t.U3) } // insert bid n := tx.UID('b') bid_key := BidKey(n) bid := &Bid{ ID: uint64(n), Item: item, Bidder: user, Price: price, } tx.Write(bid_key, bid, WRITE) // update # bids per item err := tx.WriteInt32(NumBidsKey(item), 1, SUM) if err != nil { tx.RelinquishKey(n, 'b') tx.Abort() dlog.Printf("StoreBidTxn(): Couldn't write numbids for item %v; %v\n", item, err) return nil, err } // update max bid? high := MaxBidKey(item) tx.MaybeWrite(high) err = tx.WriteInt32(high, price, MAX) if err != nil { tx.RelinquishKey(n, 'b') dlog.Println("Aborting because of max") tx.Abort() dlog.Printf("StoreBidTxn(): Couldn't write maxbid for item %v; %v\n", item, err) return nil, err } bidder := MaxBidBidderKey(item) err = tx.WriteOO(bidder, price, user, OOWRITE) if err != nil { tx.RelinquishKey(n, 'b') dlog.Println("Aborting because of max oowrite") tx.Abort() dlog.Printf("StoreBidTxn(): Couldn't write maxbidder for item %v; %v\n", item, err) return nil, err } // add to item's bid list e := Entry{int(bid.Price), bid_key, 0} err = tx.WriteList(BidsPerItemKey(item), e, LIST) if err != nil { tx.RelinquishKey(n, 'b') tx.Abort() dlog.Printf("StoreBidTxn(): Error adding to bids per item key %v! %v\n", item, err) return nil, err } if tx.Commit() == 0 { tx.RelinquishKey(n, 'b') dlog.Printf("StoreBidTxn(): Abort item %v\n", item) return r, EABORT } if *Allocate { r = &Result{uint64(n)} // dlog.Printf("User %v Bid on item %v for %v dollars\n", user, item, price) } return r, nil }
func SearchItemsRegionTxn(t Query, tx ETransaction) (*Result, error) { region := t.U1 categ := t.U2 num := t.U3 var r *Result = nil if num > 10 { log.Fatalf("Only 10 search items are currently supported.\n") } ibrk := ItemsByRegKey(region, categ) ibrrec, err := tx.Read(ibrk) if err != nil { if err == ESTASH { return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("No index for cat/region %v/%v %v\n", region, categ, ibrk) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err: %v\n", err) } } listy := ibrrec.entries _ = listy if len(listy) > 10 { dlog.Printf("Only 10 search items are currently supported %v %v\n", len(listy), listy) } var ret []*Item var maxb []int32 var numb []int32 if *Allocate { ret = make([]*Item, len(listy)) maxb = make([]int32, len(listy)) numb = make([]int32, len(listy)) } var br *BRecord for i := 0; i < len(listy); i++ { k := uint64(listy[i].top) br, err = tx.Read(ItemKey(k)) if err != nil { if err == ESTASH { return nil, ESTASH } if err == EABORT { return nil, EABORT } if err == ENOKEY { dlog.Printf("Item in list doesn't exist %v; %v\n", k, listy[i]) continue } else { log.Fatalf("err: %v\n", err) } } else { val2 := br.Value().(*Item) _ = *val2 if *Allocate { ret[i] = val2 } } br, err = tx.Read(NumBidsKey(k)) if err != nil { if err == ESTASH { return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("No number of bids key %v\n", k) } else { log.Fatalf("err: %v\n", err) } } else { val4 := br.int_value _ = val4 if *Allocate { numb[i] = val4 } } br, err = tx.Read(MaxBidKey(k)) if err != nil { if err == ESTASH { return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("No max bid key %v\n", k) } else { log.Fatalf("err: %v\n", err) } } else { val3 := br.int_value _ = val3 if *Allocate { maxb[i] = val3 } } } if tx.Commit() == 0 { return r, EABORT } if *Allocate { r = &Result{ &struct { items []*Item maxbids []int32 numbids []int32 }{ret, maxb, numb}, } } return r, nil }
func ViewBidHistoryTxn(t Query, tx ETransaction) (*Result, error) { item := t.U1 ik := ItemKey(item) br, err := tx.Read(ik) if err != nil { if err == ESTASH { dlog.Printf("Item key %v stashed\n", item) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("ViewBidTxn: No item? %v err: %v\n", item, err) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("view bid err %v\n", err) } } _ = br.Value().(*Item) bids := BidsPerItemKey(item) brec, err := tx.Read(bids) if err != nil { if err == ESTASH { dlog.Printf("BidsPerItem key %v stashed\n", item) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("No bids for item %v\n", item) if tx.Commit() == 0 { return nil, EABORT } else { return nil, nil } } else { log.Fatalf("err %v\n", err) } } listy := brec.entries var rbids []Bid var rnn []string if *Allocate { rbids = make([]Bid, len(listy)) rnn = make([]string, len(listy)) } for i := 0; i < len(listy); i++ { b, err := tx.Read(listy[i].key) if err != nil { if err == ESTASH { dlog.Printf("ViewBidHist() key stashed %v\n", listy[i].key) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("ViewBidHist() No such key %v\n", listy[i].key) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err %v\n", err) } } bid := b.Value().(*Bid) if *Allocate { rbids[i] = *bid } uk := UserKey(bid.Bidder) u, err := tx.Read(uk) if err != nil { if err == ESTASH { dlog.Printf("ViewBidHist() user stashed %v\n", uk) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("ViewBidHist() Viewing bid %v and user doesn't exist?! %v\n", listy[i].key, uk) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err %v\n", err) } } if *Allocate { rnn[i] = u.Value().(*User).Nickname } } if tx.Commit() == 0 { return nil, EABORT } var r *Result = nil if *Allocate { r = &Result{ &struct { bids []Bid nns []string }{rbids, rnn}} } return r, nil }
func StoreBuyNowTxn(t Query, tx ETransaction) (*Result, error) { now := 1 user := t.U1 item := t.U2 qty := t.U3 bnrec := &BuyNow{ BuyerID: user, ItemID: item, Qty: qty, Date: now, } uk := UserKey(t.U1) br, err := tx.Read(uk) if err != nil { if err == ESTASH { dlog.Printf("User %v stashed\n", t.U1) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("StoreBuyNowTxn(): No user? %v\n", t.U1) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err: %v\n", err) } } _ = br.Value().(*User) ik := ItemKey(item) tx.MaybeWrite(ik) irec, err := tx.Read(ik) if err != nil { if err == ESTASH { dlog.Printf("StoreBuyNowTxn(): Item key %v stashed\n", item) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("StoreBuyNowTxn(): No item? %v\n", item) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err: %v\n", err) } } itemv := irec.Value().(*Item) maxqty := itemv.Qty newq := maxqty - qty if maxqty < qty { dlog.Printf("StoreBuyNowTxn(): Req quantity > quantity %v %v\n", qty, maxqty) tx.Abort() return nil, ENORETRY } bnk := BuyNowKey(tx.UID('k')) tx.Write(bnk, bnrec, WRITE) if newq == 0 { itemv.Enddate = now itemv.Qty = 0 } else { itemv.Qty = newq } tx.Write(ik, itemv, WRITE) if tx.Commit() == 0 { return nil, EABORT } var r *Result = nil if *Allocate { r = &Result{qty} } return r, nil }
func (w *Worker) Finished() { dlog.Printf("%v FINISHED (e=%v)\n", w.ID, w.epoch) w.coordinator.Finished[w.ID] = true }
func PutBidTxn(t Query, tx ETransaction) (*Result, error) { item := t.U1 ik := ItemKey(item) irec, err := tx.Read(ik) if err != nil { if err == ESTASH { dlog.Printf("Item key %v stashed\n", item) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("PutBidTxn: No item? %v\n", item) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err %v\n", err) } } tok := UserKey(irec.Value().(*Item).Seller) torec, err := tx.Read(tok) if err != nil { if err == ESTASH { dlog.Printf("User key for user %v stashed\n", tok) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("No user? %v\n", tok) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err %v\n", err) } } nickname := torec.Value().(*User).Nickname numbk := NumBidsKey(item) numbrec, err := tx.Read(numbk) if err != nil { if err == ESTASH { dlog.Printf("Num bids key for item %v stashed\n", item) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("No num bids? %v\n", item) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err %v\n", err) } } nb := numbrec.int_value maxbk := MaxBidKey(item) maxbrec, err := tx.Read(maxbk) if err != nil { if err == ESTASH { dlog.Printf("Max bid key for item %v stashed\n", item) return nil, ESTASH } else if err == EABORT { return nil, EABORT } else if err == ENOKEY { dlog.Printf("No max bid? %v\n", item) if tx.Commit() == 0 { return nil, EABORT } else { return nil, ENORETRY } } else { log.Fatalf("err %v\n", err) } } maxb := maxbrec.int_value if tx.Commit() == 0 { return nil, EABORT } var r *Result = nil if *Allocate { r = &Result{ &struct { nick string max int32 numb int32 }{nickname, maxb, nb}, } } return r, nil }
func main() { flag.Parse() runtime.GOMAXPROCS(*nprocs) if *clientGoRoutines == 0 { *clientGoRoutines = *nprocs } if *nworkers == 0 { *nworkers = *nprocs } if *prob == -1 && *ZipfDist < 0 { log.Fatalf("Zipf distribution must be positive") } if *ZipfDist >= 0 && *prob > -1 { log.Fatalf("Set contention to -1 to use Zipf distribution of keys") } s := ddtxn.NewStore() sp := uint32(*nbidders / *nworkers) for i := 0; i < *nbidders; i++ { k := ddtxn.ProductKey(i) s.CreateKey(k, int32(0), ddtxn.SUM) } dlog.Printf("Done with Populate") coord := ddtxn.NewCoordinator(*nworkers, s) if *ddtxn.CountKeys { for i := 0; i < *nworkers; i++ { w := coord.Workers[i] w.NKeyAccesses = make([]int64, *nbidders) } } dlog.Printf("Done initializing single\n") p := prof.StartProfile() start := time.Now() var wg sync.WaitGroup pkey := int(sp - 1) dlog.Printf("Partition size: %v; Contended key %v\n", sp/2, pkey) gave_up := make([]int64, *clientGoRoutines) goZipf := make([]*ddtxn.Zipf, *clientGoRoutines) if *prob == -1 && *ZipfDist >= 0 { for i := 0; i < *clientGoRoutines; i++ { rnd := rand.New(rand.NewSource(int64(i * 12467))) goZipf[i] = ddtxn.NewZipf(rnd, *ZipfDist, 1, uint64(*nbidders)-1) if goZipf[i] == nil { panic("nil zipf") } } } for i := 0; i < *clientGoRoutines; i++ { wg.Add(1) go func(n int) { exp := ddtxn.MakeExp(50) retries := make(ddtxn.RetryHeap, 0) heap.Init(&retries) var local_seed uint32 = uint32(rand.Intn(10000000)) wi := n % (*nworkers) w := coord.Workers[wi] top := (wi + 1) * int(sp) bottom := wi * int(sp) dlog.Printf("%v: Noncontended section: %v to %v\n", n, bottom, top) end_time := time.Now().Add(time.Duration(*nsec) * time.Second) for { tm := time.Now() if !end_time.After(tm) { break } var t ddtxn.Query if len(retries) > 0 && retries[0].TS.Before(tm) { t = heap.Pop(&retries).(ddtxn.Query) } else { x := float64(ddtxn.RandN(&local_seed, 100)) if *prob == -1 { x := goZipf[n].Uint64() if x >= uint64(*nbidders) || x < 0 { log.Fatalf("x not in bounds: %v\n", x) } t.K1 = ddtxn.ProductKey(int(x)) } else if x < *prob { // contended txn t.K1 = ddtxn.ProductKey(pkey) } else { // uncontended k := pkey for k == pkey { if *partition { rnd := ddtxn.RandN(&local_seed, sp-1) lb := int(rnd) k = lb + wi*int(sp) + 1 if k < bottom || k >= top+1 { log.Fatalf("%v: outside my range %v [%v-%v]\n", n, k, bottom, top) } } else { k = int(ddtxn.RandN(&local_seed, uint32(*nbidders))) } } t.K1 = ddtxn.ProductKey(k) } t.TXN = ddtxn.D_INCR_ONE if *atomicIncr { t.TXN = ddtxn.D_ATOMIC_INCR_ONE } y := int(ddtxn.RandN(&local_seed, 100)) if y < *readrate { t.TXN = ddtxn.D_READ_ONE } } committed := false _, err := w.One(t) if err == ddtxn.EABORT { committed = false } else { committed = true } t.I++ if !committed { e := exp.Exp(t.I) if e <= 0 { e = 1 } rnd := ddtxn.RandN(&local_seed, e) if rnd <= 0 { rnd = 1 } t.TS = tm.Add(time.Duration(rnd) * time.Microsecond) if t.TS.Before(end_time) { heap.Push(&retries, t) } else { gave_up[n]++ } } } w.Finished() wg.Done() if len(retries) > 0 { dlog.Printf("[%v] Length of retry queue on exit: %v\n", n, len(retries)) } gave_up[n] = gave_up[n] + int64(len(retries)) }(i) } wg.Wait() coord.Finish() end := time.Since(start) p.Stop() stats := make([]int64, ddtxn.LAST_STAT) nitr, nwait, _, _, _, _, _ := ddtxn.CollectCounts(coord, stats) for i := 1; i < *clientGoRoutines; i++ { gave_up[0] = gave_up[0] + gave_up[i] } // nitr + NABORTS + ENOKEY is how many requests were issued. A // stashed transaction eventually executes and contributes to // nitr. out := fmt.Sprintf(" nworkers: %v, nwmoved: %v, nrmoved: %v, sys: %v, total/sec: %v, abortrate: %.2f, stashrate: %.2f, rr: %v, nkeys: %v, contention: %v, zipf: %v, done: %v, actual time: %v, nreads: %v, nincrs: %v, epoch changes: %v, throughput ns/txn: %v, naborts: %v, coord time: %v, coord stats time: %v, total worker time transitioning: %v, nstashed: %v, rlock: %v, wrratio: %v, nsamples: %v, getkeys: %v, ddwrites: %v, nolock: %v, failv: %v, nlocked: %v, stashdone: %v, nfast: %v, gaveup: %v, potential: %v ", *nworkers, ddtxn.WMoved, ddtxn.RMoved, *ddtxn.SysType, float64(nitr)/end.Seconds(), 100*float64(stats[ddtxn.NABORTS])/float64(nitr+stats[ddtxn.NABORTS]), 100*float64(stats[ddtxn.NSTASHED])/float64(nitr+stats[ddtxn.NABORTS]), *readrate, *nbidders, *prob, *ZipfDist, nitr, end, stats[ddtxn.D_READ_ONE], stats[ddtxn.D_INCR_ONE], ddtxn.NextEpoch, end.Nanoseconds()/nitr, stats[ddtxn.NABORTS], ddtxn.Time_in_IE, ddtxn.Time_in_IE1, nwait, stats[ddtxn.NSTASHED], *ddtxn.UseRLocks, *ddtxn.WRRatio, stats[ddtxn.NSAMPLES], stats[ddtxn.NGETKEYCALLS], stats[ddtxn.NDDWRITES], stats[ddtxn.NO_LOCK], stats[ddtxn.NFAIL_VERIFY], stats[ddtxn.NLOCKED], stats[ddtxn.NDIDSTASHED], ddtxn.Nfast, gave_up[0], coord.PotentialPhaseChanges) fmt.Printf(out) fmt.Printf("\n") f, err := os.OpenFile(*dataFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) if err != nil { panic(err) } defer f.Close() ddtxn.PrintStats(out, stats, f, coord, s, *nbidders) }
func main() { flag.Parse() runtime.GOMAXPROCS(*nprocs) if *clientGoRoutines == 0 { *clientGoRoutines = *nprocs } if *nworkers == 0 { *nworkers = *nprocs } nproducts := *nbidders / *contention if *doValidate { if !*ddtxn.Allocate { log.Fatalf("Cannot correctly validate without waiting for results; add -allocate\n") } } s := ddtxn.NewStore() coord := ddtxn.NewCoordinator(*nworkers, s) if *ddtxn.CountKeys { for i := 0; i < *nworkers; i++ { w := coord.Workers[i] w.NKeyAccesses = make([]int64, *nbidders) } } big_app := &apps.Big{} big_app.Init(*nbidders, nproducts, *nworkers, *readrate, *clientGoRoutines, *notcontended_readrate) big_app.Populate(s, coord.Workers[0].E) dlog.Printf("Done initializing buy\n") p := prof.StartProfile() start := time.Now() var wg sync.WaitGroup for i := 0; i < *clientGoRoutines; i++ { wg.Add(1) go func(n int) { duration := time.Now().Add(time.Duration(*nsec) * time.Second) var local_seed uint32 = uint32(rand.Intn(10000000)) wi := n % (*nworkers) w := coord.Workers[wi] // It's ok to reuse t because it gets copied in // w.One(), and if we're actually reading from t later // we pause and don't re-write it until it's done. var t ddtxn.Query for duration.After(time.Now()) { big_app.MakeOne(w.ID, &local_seed, &t) if *apps.Latency || *doValidate { t.W = make(chan struct { R *ddtxn.Result E error }) txn_start := time.Now() _, err := w.One(t) if err == ddtxn.ESTASH { x := <-t.W err = x.E } txn_end := time.Since(txn_start) if *apps.Latency { big_app.Time(&t, txn_end, n) } if *doValidate { if err == nil { big_app.Add(t) } } } else { w.One(t) } } wg.Done() }(i) } wg.Wait() coord.Finish() end := time.Since(start) p.Stop() stats := make([]int64, ddtxn.LAST_STAT) nitr, nwait, nwait2 := ddtxn.CollectCounts(coord, stats) if *doValidate { big_app.Validate(s, int(nitr)) } out := fmt.Sprintf(" sys: %v, contention: %v, nworkers: %v, rr: %v, ncrr: %v, nusers: %v, done: %v, actual time: %v, epoch changes: %v, total/sec: %v, throughput ns/txn: %v, naborts: %v, nwmoved: %v, nrmoved: %v, ietime: %v, ietime1: %v, etime: %v, etime2: %v, nstashed: %v, rlock: %v, wrratio: %v, nsamples: %v ", *ddtxn.SysType, *contention, *nworkers, *readrate, *notcontended_readrate*float64(*readrate), *nbidders, nitr, end, ddtxn.NextEpoch, float64(nitr)/end.Seconds(), end.Nanoseconds()/nitr, stats[ddtxn.NABORTS], ddtxn.WMoved, ddtxn.RMoved, ddtxn.Time_in_IE.Seconds(), ddtxn.Time_in_IE1.Seconds(), nwait.Seconds()/float64(*nworkers), nwait2.Seconds()/float64(*nworkers), stats[ddtxn.NSTASHED], *ddtxn.UseRLocks, *ddtxn.WRRatio, stats[ddtxn.NSAMPLES]) fmt.Printf(out) fmt.Printf("\n") f, err := os.OpenFile(*dataFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) if err != nil { panic(err) } defer f.Close() ddtxn.PrintStats(out, stats, f, coord, s, *nbidders) x, y := big_app.LatencyString() f.WriteString(x) f.WriteString(y) f.WriteString("\n") }
func main() { flag.Parse() runtime.GOMAXPROCS(*nprocs) if *clientGoRoutines == 0 { *clientGoRoutines = *nprocs } if *nworkers == 0 { *nworkers = *nprocs } if *doValidate { if !*ddtxn.Allocate { log.Fatalf("Cannot correctly validate without waiting for results; add -allocate\n") } } var nproducts int if *contention > 0 { nproducts = *nbidders / int(*contention) } else { nproducts = ddtxn.NUM_ITEMS } s := ddtxn.NewStore() coord := ddtxn.NewCoordinator(*nworkers, s) if *ddtxn.CountKeys { for i := 0; i < *nworkers; i++ { w := coord.Workers[i] w.NKeyAccesses = make([]int64, *nbidders) } } rubis := &apps.Rubis{} rubis.Init(nproducts, *nbidders, *nworkers, *clientGoRoutines, *ZipfDist, 0) rubis.PopulateBids(s, coord) // Just creates items to bid on fmt.Printf("Done populating bids\n") if !*ddtxn.Allocate { prealloc := time.Now() tmp := *ddtxn.UseRLocks *ddtxn.UseRLocks = true // Preallocate keys bids_per_worker := 200000.0 if *nworkers == 20 { bids_per_worker *= 20 } if *rounds { parallelism := 10 rounds := *nworkers / parallelism if rounds == 0 { rounds = 1 } for j := 0; j < rounds; j++ { fmt.Printf("Doing round %v\n", j) var wg sync.WaitGroup for i := j * parallelism; i < (j+1)*parallelism; i++ { if i >= *nworkers { break } wg.Add(1) go func(i int) { coord.Workers[i].PreallocateRubis(0, int(bids_per_worker), ddtxn.NUM_ITEMS) wg.Done() }(i) } wg.Wait() } } else { var wg sync.WaitGroup for i := 0; i < *nworkers; i++ { wg.Add(1) go func(i int) { coord.Workers[i].PreallocateRubis(0, int(bids_per_worker), ddtxn.NUM_ITEMS) wg.Done() }(i) } wg.Wait() } *ddtxn.UseRLocks = tmp fmt.Printf("Allocation took %v\n", time.Since(prealloc)) } fmt.Printf("Done initializing rubis\n") p := prof.StartProfile() start := time.Now() gave_up := make([]int64, *clientGoRoutines) var wg sync.WaitGroup for i := 0; i < *clientGoRoutines; i++ { exp := ddtxn.MakeExp(30) wg.Add(1) go func(n int) { retries := make(ddtxn.RetryHeap, 0) heap.Init(&retries) end_time := time.Now().Add(time.Duration(*nsec) * time.Second) var local_seed uint32 = uint32(rand.Intn(1000000)) wi := n % (*nworkers) w := coord.Workers[wi] for { tm := time.Now() if !end_time.After(tm) { break } var t ddtxn.Query if len(retries) > 0 && retries[0].TS.Before(tm) { t = heap.Pop(&retries).(ddtxn.Query) } else { rubis.MakeBid(w.ID, &local_seed, &t) if *ddtxn.Latency { t.S = time.Now() } } if *doValidate { t.W = make(chan struct { R *ddtxn.Result E error }) } committed := false _, err := w.One(t) if err == ddtxn.ESTASH { if *doValidate { x := <-t.W err = x.E } committed = true } else if err == ddtxn.EABORT { committed = false } else { committed = true } t.I++ if !committed { t.TS = tm.Add(time.Duration(ddtxn.RandN(&local_seed, exp.Exp(t.I))) * time.Microsecond) if t.TS.Before(end_time) { heap.Push(&retries, t) } else { gave_up[n]++ } } if committed && *doValidate { rubis.Add(t) } } wg.Done() if len(retries) > 0 { dlog.Printf("[%v] Length of retry queue on exit: %v\n", n, len(retries)) } gave_up[n] = gave_up[n] + int64(len(retries)) }(i) } wg.Wait() coord.Finish() end := time.Since(start) p.Stop() stats := make([]int64, ddtxn.LAST_STAT) nitr, nwait, nwait2 := ddtxn.CollectCounts(coord, stats) _ = nwait2 if *doValidate { rubis.Validate(s, int(nitr)) } for i := 1; i < *clientGoRoutines; i++ { gave_up[0] = gave_up[0] + gave_up[i] } if !*ddtxn.Allocate { keys := []rune{'b', 'c', 'd', 'i', 'k', 'u'} for i := 0; i < *nworkers; i++ { dlog.Printf("w: %v ", i) for _, k := range keys { dlog.Printf("%v %v/%v \t", strconv.QuoteRuneToASCII(k), coord.Workers[i].CurrKey[k], coord.Workers[i].LastKey[k]) } dlog.Printf("\n") } } out := fmt.Sprintf(" nworkers: %v, nwmoved: %v, nrmoved: %v, sys: %v, total/sec: %v, abortrate: %.2f, stashrate: %.2f, nbidders: %v, nitems: %v, contention: %v, done: %v, actual time: %v, throughput: ns/txn: %v, naborts: %v, coord time: %v, coord stats time: %v, total worker time transitioning: %v, nstashed: %v, rlock: %v, wrratio: %v, nsamples: %v, getkeys: %v, ddwrites: %v, nolock: %v, failv: %v, stashdone: %v, nfast: %v, gaveup: %v, epoch changes: %v, potential: %v, coordtotaltime %v, mergetime: %v, readtime: %v, gotime: %v ", *nworkers, ddtxn.WMoved, ddtxn.RMoved, *ddtxn.SysType, float64(nitr)/end.Seconds(), 100*float64(stats[ddtxn.NABORTS])/float64(nitr+stats[ddtxn.NABORTS]), 100*float64(stats[ddtxn.NSTASHED])/float64(nitr+stats[ddtxn.NABORTS]), *nbidders, nproducts, *contention, nitr, end, end.Nanoseconds()/nitr, stats[ddtxn.NABORTS], ddtxn.Time_in_IE, ddtxn.Time_in_IE1, nwait, stats[ddtxn.NSTASHED], *ddtxn.UseRLocks, *ddtxn.WRRatio, stats[ddtxn.NSAMPLES], stats[ddtxn.NGETKEYCALLS], stats[ddtxn.NDDWRITES], stats[ddtxn.NO_LOCK], stats[ddtxn.NFAIL_VERIFY], stats[ddtxn.NDIDSTASHED], ddtxn.Nfast, gave_up[0], ddtxn.NextEpoch, coord.PotentialPhaseChanges, coord.TotalCoordTime, coord.MergeTime, coord.ReadTime, coord.GoTime) fmt.Printf(out) fmt.Printf("\n") fmt.Printf("DD: %v\n", coord.Workers[0].Store().DD()) f, err := os.OpenFile(*dataFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) if err != nil { panic(err) } defer f.Close() ddtxn.PrintStats(out, stats, f, coord, s, *nbidders) x, y := coord.Latency() f.WriteString(x) f.WriteString(y) f.WriteString("\n") }
func (b *Rubis) Validate(s *ddtxn.Store, nitr int) bool { good := true zero_cnt := 0 for k, rat := range b.ratings { key := ddtxn.RatingKey(k) v, err := s.Get(key) if err != nil { fmt.Printf("Validating key %v failed; store: doesn't have rating for user %v: %v\n", key, k, err) good = false continue } r := v.Value().(int32) if r != rat { fmt.Printf("Validating key %v failed; store: has different rating for user %v (%v vs. %v): %v\n", key, k, rat, r, err) good = false continue } } for i := 0; i < b.nproducts; i++ { j := b.products[i] var x int32 k := ddtxn.MaxBidKey(j) v, err := s.Get(k) if err != nil { if b.maxes[i] != 0 { fmt.Printf("Validating key %v failed; store: none should have: %v\n", k, b.maxes[i]) good = false } continue } x = v.Value().(int32) if x != b.maxes[i] { fmt.Printf("Validating key %v failed; store: %v should have: %v\n", k, x, b.maxes[i]) good = false } if x == 0 { dlog.Printf("Saying x is zero %v %v\n", x, zero_cnt) zero_cnt++ } k = ddtxn.NumBidsKey(j) v, err = s.Get(k) if err != nil { if b.maxes[i] != 0 { fmt.Printf("Validating key %v failed for max bid; store: none should have: %v\n", k, b.num_bids[i]) good = false } continue } x = v.Value().(int32) if x != b.num_bids[i] { fmt.Printf("Validating key %v failed for number of bids; store: %v should have: %v\n", k, x, b.num_bids[i]) good = false } if x == 0 { dlog.Printf("Saying x is zero %v %v\n", x, zero_cnt) zero_cnt++ } } if zero_cnt == 2*b.nproducts && nitr > 10 { fmt.Printf("Bad: all zeroes!\n") dlog.Printf("Bad: all zeroes!\n") good = false } if good { dlog.Printf("Validate succeeded\n") } return good }
func main() { flag.Parse() runtime.GOMAXPROCS(*nprocs) if *clientGoRoutines == 0 { *clientGoRoutines = *nprocs } if *nworkers == 0 { *nworkers = *nprocs } if *doValidate { if !*ddtxn.Allocate { log.Fatalf("Cannot correctly validate without waiting for results; add -allocate\n") } } if *contention == -1.0 && *ZipfDist == -1.0 { log.Fatalf("Should use zipf or contention") } var nproducts int if *contention > 0 { nproducts = *nbidders / int(*contention) } else { nproducts = *nbidders } s := ddtxn.NewStore() buy_app := &apps.Buy{} buy_app.Init(nproducts, *nbidders, *nworkers, *readrate, *clientGoRoutines, *notcontended_readrate, *ZipfDist) dlog.Printf("Starting to initialize buy\n") buy_app.Populate(s, nil) coord := ddtxn.NewCoordinator(*nworkers, s) if *ddtxn.CountKeys { for i := 0; i < *nworkers; i++ { w := coord.Workers[i] w.NKeyAccesses = make([]int64, *nbidders) } } dlog.Printf("Done initializing buy\n") p := prof.StartProfile() start := time.Now() var wg sync.WaitGroup gave_upr := make([]int64, *clientGoRoutines) gave_upw := make([]int64, *clientGoRoutines) var ending_retries int64 for i := 0; i < *clientGoRoutines; i++ { wg.Add(1) go func(n int) { exp := ddtxn.MakeExp(50) retries := make(ddtxn.RetryHeap, 0) heap.Init(&retries) end_time := time.Now().Add(time.Duration(*nsec) * time.Second) var local_seed uint32 = uint32(rand.Intn(10000000)) var sp uint32 = uint32(*nbidders / *clientGoRoutines) w := coord.Workers[n%(*nworkers)] var tm time.Time for { tm = time.Now() if !end_time.After(tm) { break } var t ddtxn.Query if len(retries) > 0 && retries[0].TS.Before(tm) { t = heap.Pop(&retries).(ddtxn.Query) } else { buy_app.MakeOne(w.ID, &local_seed, sp, &t) if *ddtxn.Latency { t.S = time.Now() } } if *doValidate { t.W = make(chan struct { R *ddtxn.Result E error }, 1) } committed := false _, err := w.One(t) if err == ddtxn.ESTASH { if *doValidate { x := <-t.W err = x.E if err == ddtxn.EABORT { log.Fatalf("Should be run until commitment!\n") } } committed = true // The worker stash code will retry } else if err == ddtxn.EABORT { committed = false } else { committed = true } t.I++ if !committed { e := uint32(exp.Exp(t.I)) if e < 1 { e = 1 } if local_seed < 1 { local_seed = 1 } rnd := ddtxn.RandN(&local_seed, e) if rnd <= 2 { rnd = 2 } t.TS = tm.Add(time.Duration(rnd) * time.Microsecond) if t.TS.Before(end_time) { heap.Push(&retries, t) } else { if ddtxn.IsRead(t.TXN) { gave_upr[n]++ } else { gave_upw[n]++ } } } if committed && *doValidate { buy_app.Add(t) } } w.Finished() wg.Done() if len(retries) > 0 { dlog.Printf("[%v] Length of retry queue on exit: %v\n", n, len(retries)) } atomic.AddInt64(&ending_retries, int64(len(retries))) }(i) } wg.Wait() coord.Finish() end := time.Since(start) p.Stop() stats := make([]int64, ddtxn.LAST_STAT) nitr, nwait, nnoticed, nmerge, nmergewait, njoin, njoinwait := ddtxn.CollectCounts(coord, stats) if *doValidate { buy_app.Validate(s, int(nitr)) } for i := 1; i < *clientGoRoutines; i++ { gave_upr[0] = gave_upr[0] + gave_upr[i] gave_upw[0] = gave_upw[0] + gave_upw[i] } if ddtxn.NextEpoch == 0 { ddtxn.NextEpoch = 1 } // nitr + NABORTS + ENOKEY is how many requests were issued. A // stashed transaction eventually executes and contributes to // nitr. out := fmt.Sprintf(" nworkers: %v, nwmoved: %v, nrmoved: %v, sys: %v, total/sec: %v, abortrate: %.2f, stashrate: %.2f, rr: %v, nbids: %v, nproducts: %v, contention: %v, done: %v, actual time: %v, nreads: %v, nbuys: %v, epoch changes: %v, throughput ns/txn: %v, naborts: %v, coord time: %v, coord stats time: %v, nstashed: %v, rlock: %v, wrratio: %v, nsamples: %v, getkeys: %v, ddwrites: %v, nolock: %v, failv: %v, stashdone: %v, nfast: %v, gaveup_reads: %v, gaveup_writes: %v, lenretries: %v, potential: %v, coordtotaltime %v, mergetime: %v, readtime: %v, gotime: %v, workertransitiontime: %v, workernoticetime: %v, workermergetime: %v, workermergewaittime: %v, workerjointime: %v, workerjoinwaittime: %v, readaborts: %v ", *nworkers, ddtxn.WMoved, ddtxn.RMoved, *ddtxn.SysType, float64(nitr)/end.Seconds(), 100*float64(stats[ddtxn.NABORTS])/float64(nitr+stats[ddtxn.NABORTS]), 100*float64(stats[ddtxn.NSTASHED])/float64(nitr+stats[ddtxn.NABORTS]), *readrate, *nbidders, nproducts, *contention, nitr, end, stats[ddtxn.D_READ_TWO], stats[ddtxn.D_BUY], ddtxn.NextEpoch, end.Nanoseconds()/nitr, stats[ddtxn.NABORTS], ddtxn.Time_in_IE, ddtxn.Time_in_IE1, stats[ddtxn.NSTASHED], *ddtxn.UseRLocks, *ddtxn.WRRatio, stats[ddtxn.NSAMPLES], stats[ddtxn.NGETKEYCALLS], stats[ddtxn.NDDWRITES], stats[ddtxn.NO_LOCK], stats[ddtxn.NFAIL_VERIFY], stats[ddtxn.NDIDSTASHED], ddtxn.Nfast, gave_upr[0], gave_upw[0], ending_retries, coord.PotentialPhaseChanges, coord.TotalCoordTime, coord.MergeTime, coord.ReadTime, coord.GoTime, nwait, nnoticed, nmerge, nmergewait, njoin, njoinwait, stats[ddtxn.NREADABORTS]) fmt.Printf(out) fmt.Printf("\n") f, err := os.OpenFile(*dataFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) if err != nil { panic(err) } defer f.Close() fmt.Printf("DD: %v\n", coord.Workers[0].Store().DD()) ddtxn.PrintStats(out, stats, f, coord, s, *nbidders) x, y := coord.Latency() f.WriteString(x) f.WriteString(y) f.WriteString("\n") }