// sendBatch unrolls a batched command and sends each constituent // command in parallel. // TODO(tschottdorf): modify sendBatch so that it sends truly parallel requests // when outside of a Transaction. This can then be used to address the TODO in // (*TxnCoordSender).resolve(). func (tc *TxnCoordSender) sendBatch(ctx context.Context, batchArgs *proto.BatchRequest, batchReply *proto.BatchResponse) { // Prepare the calls by unrolling the batch. If the batchReply is // pre-initialized with replies, use those; otherwise create replies // as needed. // TODO(spencer): send calls in parallel. batchReply.Txn = batchArgs.Txn for i := range batchArgs.Requests { args := batchArgs.Requests[i].GetValue().(proto.Request) if err := updateForBatch(args, batchArgs.RequestHeader); err != nil { batchReply.Header().SetGoError(err) return } call := proto.Call{Args: args} // Create a reply from the method type and add to batch response. if i >= len(batchReply.Responses) { call.Reply = args.CreateReply() batchReply.Add(call.Reply) } else { call.Reply = batchReply.Responses[i].GetValue().(proto.Response) } tc.sendOne(ctx, call) // Amalgamate transaction updates and propagate first error, if applicable. if batchReply.Txn != nil { batchReply.Txn.Update(call.Reply.Header().Txn) } if call.Reply.Header().Error != nil { batchReply.Error = call.Reply.Header().Error return } } }
func newTestSender(pre, post func(proto.BatchRequest) (*proto.BatchResponse, *proto.Error)) SenderFunc { txnKey := proto.Key("test-txn") txnID := []byte(uuid.NewUUID4()) return func(_ context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) { ba.UserPriority = gogoproto.Int32(-1) if ba.Txn != nil && len(ba.Txn.ID) == 0 { ba.Txn.Key = txnKey ba.Txn.ID = txnID } var br *proto.BatchResponse var pErr *proto.Error if pre != nil { br, pErr = pre(ba) } else { br = &proto.BatchResponse{} } if pErr != nil { return nil, pErr } var writing bool status := proto.PENDING if _, ok := ba.GetArg(proto.Put); ok { br.Add(gogoproto.Clone(testPutResp).(proto.Response)) writing = true } if args, ok := ba.GetArg(proto.EndTransaction); ok { et := args.(*proto.EndTransactionRequest) writing = true if et.Commit { status = proto.COMMITTED } else { status = proto.ABORTED } } br.Txn = gogoproto.Clone(ba.Txn).(*proto.Transaction) if br.Txn != nil && pErr == nil { br.Txn.Writing = writing br.Txn.Status = status } if post != nil { br, pErr = post(ba) } return br, pErr } }
// sendBatch unrolls a batched command and sends each constituent // command in parallel. func (tc *TxnCoordSender) sendBatch(batchArgs *proto.BatchRequest, batchReply *proto.BatchResponse) { // Prepare the calls by unrolling the batch. If the batchReply is // pre-initialized with replies, use those; otherwise create replies // as needed. // TODO(spencer): send calls in parallel. batchReply.Txn = batchArgs.Txn for i := range batchArgs.Requests { // Initialize args header values where appropriate. args := batchArgs.Requests[i].GetValue().(proto.Request) method, err := proto.MethodForRequest(args) call := &client.Call{Method: method, Args: args} if err != nil { batchReply.SetGoError(err) return } if args.Header().User == "" { args.Header().User = batchArgs.User } if args.Header().UserPriority == nil { args.Header().UserPriority = batchArgs.UserPriority } args.Header().Txn = batchArgs.Txn // Create a reply from the method type and add to batch response. if i >= len(batchReply.Responses) { if call.Reply, err = proto.CreateReply(method); err != nil { batchReply.SetGoError(util.Errorf("unsupported method in batch: %s", method)) return } batchReply.Add(call.Reply) } else { call.Reply = batchReply.Responses[i].GetValue().(proto.Response) } tc.sendOne(call) // Amalgamate transaction updates and propagate first error, if applicable. if batchReply.Txn != nil { batchReply.Txn.Update(call.Reply.Header().Txn) } if call.Reply.Header().Error != nil { batchReply.Error = call.Reply.Header().Error return } } }
// TODO(tschottdorf): this method is somewhat awkward but unless we want to // give this error back to the client, our options are limited. We'll have to // run the whole thing for them, or any restart will still end up at the client // which will not be prepared to be handed a Txn. func (tc *TxnCoordSender) resendWithTxn(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) { // Run a one-off transaction with that single command. if log.V(1) { log.Infof("%s: auto-wrapping in txn and re-executing: ", ba) } tmpDB := client.NewDBWithPriority(tc, ba.GetUserPriority()) var br *proto.BatchResponse err := tmpDB.Txn(func(txn *client.Txn) error { txn.SetDebugName("auto-wrap", 0) b := &client.Batch{} for _, arg := range ba.Requests { req := arg.GetInner() b.InternalAddRequest(req) } var err error br, err = txn.CommitInBatchWithResponse(b) return err }) if err != nil { return nil, proto.NewError(err) } br.Txn = nil // hide the evidence return br, nil }
// updateState updates the transaction state in both the success and // error cases, applying those updates to the corresponding txnMeta // object when adequate. It also updates certain errors with the // updated transaction for use by client restarts. func (tc *TxnCoordSender) updateState(ctx context.Context, ba proto.BatchRequest, br *proto.BatchResponse, pErr *proto.Error) *proto.Error { trace := tracer.FromCtx(ctx) newTxn := &proto.Transaction{} newTxn.Update(ba.GetTxn()) err := pErr.GoError() switch t := err.(type) { case nil: newTxn.Update(br.GetTxn()) // Move txn timestamp forward to response timestamp if applicable. // TODO(tschottdorf): see (*Replica).executeBatch and comments within. // Looks like this isn't necessary any more, nor did it prevent a bug // referenced in a TODO there. newTxn.Timestamp.Forward(br.Timestamp) case *proto.TransactionStatusError: // Likely already committed or more obscure errors such as epoch or // timestamp regressions; consider txn dead. defer tc.cleanupTxn(trace, t.Txn) case *proto.OpRequiresTxnError: // TODO(tschottdorf): range-spanning autowrap currently broken. panic("TODO(tschottdorf): disabled") case *proto.ReadWithinUncertaintyIntervalError: // Mark the host as certain. See the protobuf comment for // Transaction.CertainNodes for details. if t.NodeID == 0 { panic("no replica set in header on uncertainty restart") } newTxn.CertainNodes.Add(t.NodeID) // If the reader encountered a newer write within the uncertainty // interval, move the timestamp forward, just past that write or // up to MaxTimestamp, whichever comes first. candidateTS := newTxn.MaxTimestamp candidateTS.Backward(t.ExistingTimestamp.Add(0, 1)) newTxn.Timestamp.Forward(candidateTS) newTxn.Restart(ba.GetUserPriority(), newTxn.Priority, newTxn.Timestamp) t.Txn = *newTxn case *proto.TransactionAbortedError: // Increase timestamp if applicable. newTxn.Timestamp.Forward(t.Txn.Timestamp) newTxn.Priority = t.Txn.Priority t.Txn = *newTxn // Clean up the freshly aborted transaction in defer(), avoiding a // race with the state update below. defer tc.cleanupTxn(trace, t.Txn) case *proto.TransactionPushError: // Increase timestamp if applicable, ensuring that we're // just ahead of the pushee. newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp.Add(0, 1)) newTxn.Restart(ba.GetUserPriority(), t.PusheeTxn.Priority-1, newTxn.Timestamp) t.Txn = newTxn case *proto.TransactionRetryError: // Increase timestamp if applicable. newTxn.Timestamp.Forward(t.Txn.Timestamp) newTxn.Restart(ba.GetUserPriority(), t.Txn.Priority, newTxn.Timestamp) t.Txn = *newTxn case proto.TransactionRestartError: // Assertion: The above cases should exhaust all ErrorDetails which // carry a Transaction. if pErr.Detail != nil { panic(fmt.Sprintf("unhandled TransactionRestartError %T", err)) } } return func() *proto.Error { if len(newTxn.ID) <= 0 { return pErr } id := string(newTxn.ID) tc.Lock() defer tc.Unlock() txnMeta := tc.txns[id] // For successful transactional requests, keep the written intents and // the updated transaction record to be sent along with the reply. // The transaction metadata is created with the first writing operation // TODO(tschottdorf): already computed the intents prior to sending, // consider re-using those. if intents := ba.GetIntents(); len(intents) > 0 && err == nil { if txnMeta == nil { newTxn.Writing = true txnMeta = &txnMetadata{ txn: *newTxn, keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}), firstUpdateNanos: tc.clock.PhysicalNow(), lastUpdateNanos: tc.clock.PhysicalNow(), timeoutDuration: tc.clientTimeout, txnEnd: make(chan struct{}), } tc.txns[id] = txnMeta // If the transaction is already over, there's no point in // launching a one-off coordinator which will shut down right // away. if _, isEnding := ba.GetArg(proto.EndTransaction); !isEnding { trace.Event("coordinator spawns") if !tc.stopper.RunAsyncTask(func() { tc.heartbeatLoop(id) }) { // The system is already draining and we can't start the // heartbeat. We refuse new transactions for now because // they're likely not going to have all intents committed. // In principle, we can relax this as needed though. tc.unregisterTxnLocked(id) return proto.NewError(&proto.NodeUnavailableError{}) } } } for _, intent := range intents { txnMeta.addKeyRange(intent.Key, intent.EndKey) } } // Update our record of this transaction, even on error. if txnMeta != nil { txnMeta.txn.Update(newTxn) // better to replace after #2300 if !txnMeta.txn.Writing { panic("tracking a non-writing txn") } txnMeta.setLastUpdate(tc.clock.PhysicalNow()) } if err == nil { // For successful transactional requests, always send the updated txn // record back. if br.Txn == nil { br.Txn = &proto.Transaction{} } *br.Txn = *newTxn } return pErr }() }