// incCmd adds one to the value of c.key in the env and writes // it to the db. If c.key isn't in the db, writes 1. func incCmd(c *cmd, db *client.KV, t *testing.T) error { r := &proto.IncrementResponse{} if err := db.Call(proto.Increment, &proto.IncrementRequest{ RequestHeader: proto.RequestHeader{Key: c.getKey()}, Increment: int64(1), }, r); err != nil { return err } c.env[c.key] = r.NewValue c.debug = fmt.Sprintf("[%d ts=%d]", r.NewValue, r.Timestamp.Logical) return nil }
// allocateStoreIDs increments the store id generator key for the // specified node to allocate "inc" new, unique store ids. The // first ID in a contiguous range is returned on success. func allocateStoreIDs(nodeID proto.NodeID, inc int64, db *client.KV) (proto.StoreID, error) { iReply := &proto.IncrementResponse{} if err := db.Call(proto.Increment, &proto.IncrementRequest{ RequestHeader: proto.RequestHeader{ Key: engine.MakeKey(engine.KeyStoreIDGeneratorPrefix, []byte(strconv.Itoa(int(nodeID)))), User: storage.UserRoot, }, Increment: inc, }, iReply); err != nil { return 0, util.Errorf("unable to allocate %d store IDs for node %d: %v", inc, nodeID, err) } return proto.StoreID(iReply.NewValue - inc + 1), nil }
// allocateNodeID increments the node id generator key to allocate // a new, unique node id. func allocateNodeID(db *client.KV) (proto.NodeID, error) { iReply := &proto.IncrementResponse{} if err := db.Call(proto.Increment, &proto.IncrementRequest{ RequestHeader: proto.RequestHeader{ Key: engine.KeyNodeIDGenerator, User: storage.UserRoot, }, Increment: 1, }, iReply); err != nil { return 0, util.Errorf("unable to allocate node ID: %v", err) } return proto.NodeID(iReply.NewValue), nil }
// sumCmd sums the values of all keys read during the transaction // and writes the result to the db. func sumCmd(c *cmd, db *client.KV, t *testing.T) error { sum := int64(0) for _, v := range c.env { sum += v } r := &proto.PutResponse{} err := db.Call(proto.Put, &proto.PutRequest{ RequestHeader: proto.RequestHeader{Key: c.getKey()}, Value: proto.Value{Integer: gogoproto.Int64(sum)}, }, r) c.debug = fmt.Sprintf("[%d ts=%d]", sum, r.Timestamp.Logical) return err }
// readCmd reads a value from the db and stores it in the env. func readCmd(c *cmd, db *client.KV, t *testing.T) error { r := &proto.GetResponse{} if err := db.Call(proto.Get, &proto.GetRequest{ RequestHeader: proto.RequestHeader{Key: c.getKey()}, }, r); err != nil { return err } if r.Value != nil { c.env[c.key] = r.Value.GetInteger() c.debug = fmt.Sprintf("[%d ts=%d]", r.Value.GetInteger(), r.Timestamp.Logical) } return nil }
// scanCmd reads the values from the db from [key, endKey). func scanCmd(c *cmd, db *client.KV, t *testing.T) error { r := &proto.ScanResponse{} if err := db.Call(proto.Scan, &proto.ScanRequest{ RequestHeader: proto.RequestHeader{Key: c.getKey(), EndKey: c.getEndKey()}, }, r); err != nil { return err } var vals []string keyPrefix := []byte(fmt.Sprintf("%d.", c.historyIdx)) for _, kv := range r.Rows { key := bytes.TrimPrefix(kv.Key, keyPrefix) c.env[string(key)] = kv.Value.GetInteger() vals = append(vals, fmt.Sprintf("%d", kv.Value.GetInteger())) } c.debug = fmt.Sprintf("[%s ts=%d]", strings.Join(vals, " "), r.Timestamp.Logical) return nil }
func (hv *historyVerifier) runTxn(txnIdx int, priority int32, isolation proto.IsolationType, cmds []*cmd, db *client.KV, t *testing.T) error { var retry int txnName := fmt.Sprintf("txn%d", txnIdx) txnOpts := &client.TransactionOptions{ Name: txnName, Isolation: isolation, } err := db.RunTransaction(txnOpts, func(txn *client.KV) error { txn.UserPriority = -priority env := map[string]int64{} // TODO(spencer): restarts must create additional histories. They // look like: given the current partial history and a restart on // txn txnIdx, re-enumerate a set of all histories containing the // remaining commands from extant txns and all commands from this // restarted txn. // If this is attempt > 1, reset cmds so no waits. if retry++; retry == 2 { for _, c := range cmds { c.done() } } log.V(1).Infof("%s, retry=%d", txnName, retry) for i := range cmds { cmds[i].env = env if err := hv.runCmd(txn, txnIdx, retry, i, cmds, t); err != nil { return err } } return nil }) hv.wg.Done() return err }
// concurrentIncrements starts two Goroutines in parallel, both of which // read the integers stored at the other's key and add it onto their own. // It is checked that the outcome is serializable, i.e. exactly one of the // two Goroutines (the later write) sees the previous write by the other. func concurrentIncrements(kvClient *client.KV, t *testing.T) { // wgStart waits for all transactions to line up, wgEnd has the main // function wait for them to finish. var wgStart, wgEnd sync.WaitGroup wgStart.Add(2 + 1) wgEnd.Add(2) for i := 0; i < 2; i++ { go func(i int) { // Read the other key, write key i. readKey := []byte(fmt.Sprintf("value-%d", (i+1)%2)) writeKey := []byte(fmt.Sprintf("value-%d", i)) defer wgEnd.Done() wgStart.Done() // Wait until the other goroutines are running. wgStart.Wait() txnOpts := &client.TransactionOptions{ Name: fmt.Sprintf("test-%d", i), } if err := kvClient.RunTransaction(txnOpts, func(txn *client.KV) error { // Retrieve the other key. gr := &proto.GetResponse{} if err := txn.Call(proto.Get, proto.GetArgs(readKey), gr); err != nil { return err } otherValue := int64(0) if gr.Value != nil && gr.Value.Integer != nil { otherValue = *gr.Value.Integer } pr := &proto.IncrementResponse{} pa := proto.IncrementArgs(writeKey, 1+otherValue) if err := txn.Call(proto.Increment, pa, pr); err != nil { return err } return nil }); err != nil { t.Error(err) } }(i) } // Kick the goroutines loose. wgStart.Done() // Wait for the goroutines to finish. wgEnd.Wait() // Verify that both keys contain something and, more importantly, that // one key actually contains the value of the first writer and not only // its own. total := int64(0) results := []int64(nil) for i := 0; i < 2; i++ { readKey := []byte(fmt.Sprintf("value-%d", i)) gr := &proto.GetResponse{} if err := kvClient.Call(proto.Get, proto.GetArgs(readKey), gr); err != nil { log.Fatal(err) } if gr.Value == nil || gr.Value.Integer == nil { t.Fatalf("unexpected empty key: %v=%v", readKey, gr.Value) } total += *gr.Value.Integer results = append(results, *gr.Value.Integer) } // First writer should have 1, second one 2 if total != 3 { t.Fatalf("got unserializable values %v", results) } }
// commitCmd commits the transaction. func commitCmd(c *cmd, db *client.KV, t *testing.T) error { r := &proto.EndTransactionResponse{} err := db.Call(proto.EndTransaction, &proto.EndTransactionRequest{Commit: true}, r) c.debug = fmt.Sprintf("[ts=%d]", r.Timestamp.Logical) return err }
// deleteRngCmd deletes the range of values from the db from [key, endKey). func deleteRngCmd(c *cmd, db *client.KV, t *testing.T) error { return db.Call(proto.DeleteRange, &proto.DeleteRangeRequest{ RequestHeader: proto.RequestHeader{Key: c.getKey(), EndKey: c.getEndKey()}, }, &proto.DeleteRangeResponse{}) }