func (hv *historyVerifier) runTxn(txnIdx int, priority int32, isolation enginepb.IsolationType, cmds []*cmd, db *client.DB, t *testing.T) error { var retry int txnName := fmt.Sprintf("txn %d", txnIdx+1) cmdIdx := -1 err := db.Txn(context.TODO(), func(txn *client.Txn) error { // If this is 2nd attempt, and a retry wasn't expected, return a // retry error which results in further histories being enumerated. if retry++; retry > 1 { if !cmds[cmdIdx].expRetry { // Propagate retry error to history execution to enumerate all // histories where this txn retries at this command. return &retryError{txnIdx: txnIdx, cmdIdx: cmdIdx} } // We're expecting a retry, so just send nil down the done channel. cmds[cmdIdx].done(nil) } txn.SetDebugName(txnName, 0) if isolation == enginepb.SNAPSHOT { if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil { return err } } txn.InternalSetPriority(priority) env := map[string]int64{} for cmdIdx+1 < len(cmds) { cmdIdx++ cmds[cmdIdx].env = env _, err := hv.runCmd(txn, txnIdx, retry, cmds[cmdIdx], t) if err != nil { if log.V(1) { log.Infof(context.Background(), "%s: failed running %s: %s", txnName, cmds[cmdIdx], err) } return err } } return nil }) if err != nil { for _, c := range cmds[cmdIdx:] { c.done(err) } } return err }
func (hv *historyVerifier) runCmds(cmds []*cmd, db *client.DB, t *testing.T) (string, map[string]int64, error) { var strs []string env := map[string]int64{} err := db.Txn(context.TODO(), func(txn *client.Txn) error { for _, c := range cmds { c.historyIdx = hv.idx c.env = env c.init(nil) fmtStr, err := c.execute(txn, t) if err != nil { return err } strs = append(strs, fmt.Sprintf(fmtStr, 0, 0)) } return nil }) return strings.Join(strs, " "), env, err }
// purgeOldLeases refreshes the leases on a table. Unused leases older than // minVersion will be released. // If deleted is set, minVersion is ignored; no lease is acquired and all // existing unused leases are released. The table is further marked for // deletion, which will cause existing in-use leases to be eagerly released once // they're not in use any more. // If t has no active leases, nothing is done. func (t *tableState) purgeOldLeases( db *client.DB, deleted bool, minVersion sqlbase.DescriptorVersion, store LeaseStore, ) error { t.mu.Lock() empty := len(t.active.data) == 0 t.mu.Unlock() if empty { // We don't currently have a lease on this table, so no need to refresh // anything. return nil } // Acquire and release a lease on the table at a version >= minVersion. var lease *LeaseState err := db.Txn(context.TODO(), func(txn *client.Txn) error { var err error if !deleted { lease, err = t.acquire(txn, minVersion, store) if err == errTableDeleted { deleted = true } } if err == nil || deleted { t.mu.Lock() defer t.mu.Unlock() var toRelease []*LeaseState if deleted { t.deleted = true } toRelease = append([]*LeaseState(nil), t.active.data...) t.releaseLeasesIfNotActive(toRelease, store) return nil } return err }) if err != nil { return err } if lease == nil { return nil } return t.release(lease, store) }
// startTestWriter creates a writer which initiates a sequence of // transactions, each which writes up to 10 times to random keys with // random values. If not nil, txnChannel is written to non-blockingly // every time a new transaction starts. func startTestWriter(db *client.DB, i int64, valBytes int32, wg *sync.WaitGroup, retries *int32, txnChannel chan struct{}, done <-chan struct{}, t *testing.T) { src := rand.New(rand.NewSource(i)) defer func() { if wg != nil { wg.Done() } }() for j := 0; ; j++ { select { case <-done: return default: first := true err := db.Txn(func(txn *client.Txn) error { if first && txnChannel != nil { select { case txnChannel <- struct{}{}: default: } } else if !first && retries != nil { atomic.AddInt32(retries, 1) } first = false for j := 0; j <= int(src.Int31n(10)); j++ { key := randutil.RandBytes(src, 10) val := randutil.RandBytes(src, int(src.Int31n(valBytes))) if err := txn.Put(key, val); err != nil { log.Infof("experienced an error in routine %d: %s", i, err) return err } } return nil }) if err != nil { t.Error(err) } else { time.Sleep(1 * time.Millisecond) } } } }
// concurrentIncrements starts two Goroutines in parallel, both of which // read the integers stored at the other's key and add it onto their own. // It is checked that the outcome is serializable, i.e. exactly one of the // two Goroutines (the later write) sees the previous write by the other. func concurrentIncrements(db *client.DB, t *testing.T) { // wgStart waits for all transactions to line up, wgEnd has the main // function wait for them to finish. var wgStart, wgEnd sync.WaitGroup wgStart.Add(2 + 1) wgEnd.Add(2) for i := 0; i < 2; i++ { go func(i int) { // Read the other key, write key i. readKey := []byte(fmt.Sprintf(testUser+"/value-%d", (i+1)%2)) writeKey := []byte(fmt.Sprintf(testUser+"/value-%d", i)) defer wgEnd.Done() wgStart.Done() // Wait until the other goroutines are running. wgStart.Wait() if err := db.Txn(func(txn *client.Txn) error { txn.SetDebugName(fmt.Sprintf("test-%d", i), 0) // Retrieve the other key. gr, err := txn.Get(readKey) if err != nil { return err } otherValue := int64(0) if gr.Value != nil { otherValue = gr.ValueInt() } _, err = txn.Inc(writeKey, 1+otherValue) return err }); err != nil { t.Error(err) } }(i) } // Kick the goroutines loose. wgStart.Done() // Wait for the goroutines to finish. wgEnd.Wait() // Verify that both keys contain something and, more importantly, that // one key actually contains the value of the first writer and not only // its own. total := int64(0) results := []int64(nil) for i := 0; i < 2; i++ { readKey := []byte(fmt.Sprintf(testUser+"/value-%d", i)) gr, err := db.Get(readKey) if err != nil { t.Fatal(err) } if gr.Value == nil { t.Fatalf("unexpected empty key: %s=%v", readKey, gr.Value) } total += gr.ValueInt() results = append(results, gr.ValueInt()) } // First writer should have 1, second one 2 if total != 3 { t.Fatalf("got unserializable values %v", results) } }