// TODO(tschottdorf): this method is somewhat awkward but unless we want to // give this error back to the client, our options are limited. We'll have to // run the whole thing for them, or any restart will still end up at the client // which will not be prepared to be handed a Txn. func (tc *TxnCoordSender) resendWithTxn( ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { ctx := tc.AnnotateCtx(context.TODO()) // Run a one-off transaction with that single command. if log.V(1) { log.Infof(ctx, "%s: auto-wrapping in txn and re-executing: ", ba) } // TODO(bdarnell): need to be able to pass other parts of DBContext // through here. dbCtx := client.DefaultDBContext() dbCtx.UserPriority = ba.UserPriority tmpDB := client.NewDBWithContext(tc, dbCtx) var br *roachpb.BatchResponse err := tmpDB.Txn(ctx, func(txn *client.Txn) error { txn.SetDebugName("auto-wrap", 0) b := txn.NewBatch() b.Header = ba.Header for _, arg := range ba.Requests { req := arg.GetInner() b.AddRawRequest(req) } err := txn.CommitInBatch(b) br = b.RawResponse() return err }) if err != nil { return nil, roachpb.NewError(err) } br.Txn = nil // hide the evidence return br, nil }
func (n *Node) batchInternal( ctx context.Context, args *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { // TODO(marc): grpc's authentication model (which gives credential access in // the request handler) doesn't really fit with the current design of the // security package (which assumes that TLS state is only given at connection // time) - that should be fixed. if peer, ok := peer.FromContext(ctx); ok { if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok { certUser, err := security.GetCertificateUser(&tlsInfo.State) if err != nil { return nil, err } if certUser != security.NodeUser { return nil, errors.Errorf("user %s is not allowed", certUser) } } } var br *roachpb.BatchResponse type snowballInfo struct { syncutil.Mutex collectedSpans [][]byte done bool } var snowball *snowballInfo if err := n.stopper.RunTaskWithErr(func() error { const opName = "node.Batch" sp, err := tracing.JoinOrNew(n.storeCfg.AmbientCtx.Tracer, args.TraceContext, opName) if err != nil { return err } // If this is a snowball span, it gets special treatment: It skips the // regular tracing machinery, and we instead send the collected spans // back with the response. This is more expensive, but then again, // those are individual requests traced by users, so they can be. if sp.BaggageItem(tracing.Snowball) != "" { sp.LogEvent("delegating to snowball tracing") sp.Finish() snowball = new(snowballInfo) recorder := func(rawSpan basictracer.RawSpan) { snowball.Lock() defer snowball.Unlock() if snowball.done { // This is a late span that we must discard because the request was // already completed. return } encSp, err := tracing.EncodeRawSpan(&rawSpan, nil) if err != nil { log.Warning(ctx, err) } snowball.collectedSpans = append(snowball.collectedSpans, encSp) } if sp, err = tracing.JoinOrNewSnowball(opName, args.TraceContext, recorder); err != nil { return err } } defer sp.Finish() traceCtx := opentracing.ContextWithSpan(ctx, sp) log.Event(traceCtx, args.Summary()) tStart := timeutil.Now() var pErr *roachpb.Error br, pErr = n.stores.Send(traceCtx, *args) if pErr != nil { br = &roachpb.BatchResponse{} log.ErrEventf(traceCtx, "%T", pErr.GetDetail()) } if br.Error != nil { panic(roachpb.ErrorUnexpectedlySet(n.stores, br)) } n.metrics.callComplete(timeutil.Since(tStart), pErr) br.Error = pErr return nil }); err != nil { return nil, err } if snowball != nil { snowball.Lock() br.CollectedSpans = snowball.collectedSpans snowball.done = true snowball.Unlock() } return br, nil }
// updateState updates the transaction state in both the success and // error cases, applying those updates to the corresponding txnMeta // object when adequate. It also updates certain errors with the // updated transaction for use by client restarts. func (tc *TxnCoordSender) updateState( ctx context.Context, startNS int64, ba roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error, ) *roachpb.Error { tc.Lock() defer tc.Unlock() if ba.Txn == nil { // Not a transactional request. return pErr } var newTxn roachpb.Transaction newTxn.Update(ba.Txn) if pErr == nil { newTxn.Update(br.Txn) } else if errTxn := pErr.GetTxn(); errTxn != nil { newTxn.Update(errTxn) } switch t := pErr.GetDetail().(type) { case *roachpb.OpRequiresTxnError: panic("OpRequiresTxnError must not happen at this level") case *roachpb.ReadWithinUncertaintyIntervalError: // If the reader encountered a newer write within the uncertainty // interval, we advance the txn's timestamp just past the last observed // timestamp from the node. restartTS, ok := newTxn.GetObservedTimestamp(pErr.OriginNode) if !ok { pErr = roachpb.NewError(errors.Errorf("no observed timestamp for node %d found on uncertainty restart", pErr.OriginNode)) } else { newTxn.Timestamp.Forward(restartTS) newTxn.Restart(ba.UserPriority, newTxn.Priority, newTxn.Timestamp) } case *roachpb.TransactionAbortedError: // Increase timestamp if applicable. newTxn.Timestamp.Forward(pErr.GetTxn().Timestamp) newTxn.Priority = pErr.GetTxn().Priority // Clean up the freshly aborted transaction in defer(), avoiding a // race with the state update below. defer tc.cleanupTxnLocked(ctx, newTxn) case *roachpb.TransactionPushError: // Increase timestamp if applicable, ensuring that we're // just ahead of the pushee. newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp) newTxn.Restart(ba.UserPriority, t.PusheeTxn.Priority-1, newTxn.Timestamp) case *roachpb.TransactionRetryError: // Increase timestamp so on restart, we're ahead of any timestamp // cache entries or newer versions which caused the restart. newTxn.Restart(ba.UserPriority, pErr.GetTxn().Priority, newTxn.Timestamp) case *roachpb.WriteTooOldError: newTxn.Restart(ba.UserPriority, newTxn.Priority, t.ActualTimestamp) case nil: // Nothing to do here, avoid the default case. default: // Do not clean up the transaction since we're leaving cancellation of // the transaction up to the client. For example, on seeing an error, // like TransactionStatusError or ConditionFailedError, the client // will call Txn.CleanupOnError() which will cleanup the transaction // and its intents. Therefore leave the transaction in the PENDING // state and do not call cleanTxnLocked(). } txnID := *newTxn.ID txnMeta := tc.txns[txnID] // For successful transactional requests, keep the written intents and // the updated transaction record to be sent along with the reply. // The transaction metadata is created with the first writing operation. // A tricky edge case is that of a transaction which "fails" on the // first writing request, but actually manages to write some intents // (for example, due to being multi-range). In this case, there will // be an error, but the transaction will be marked as Writing and the // coordinator must track the state, for the client's retry will be // performed with a Writing transaction which the coordinator rejects // unless it is tracking it (on top of it making sense to track it; // after all, it **has** laid down intents and only the coordinator // can augment a potential EndTransaction call). See #3303. if txnMeta != nil || pErr == nil || newTxn.Writing { // Adding the intents even on error reduces the likelihood of dangling // intents blocking concurrent writers for extended periods of time. // See #3346. var keys []roachpb.Span if txnMeta != nil { keys = txnMeta.keys } ba.IntentSpanIterate(br, func(key, endKey roachpb.Key) { keys = append(keys, roachpb.Span{ Key: key, EndKey: endKey, }) }) if txnMeta != nil { txnMeta.keys = keys } else if len(keys) > 0 { if !newTxn.Writing { panic("txn with intents marked as non-writing") } // If the transaction is already over, there's no point in // launching a one-off coordinator which will shut down right // away. If we ended up here with an error, we'll always start // the coordinator - the transaction has laid down intents, so // we expect it to be committed/aborted at some point in the // future. if _, isEnding := ba.GetArg(roachpb.EndTransaction); pErr != nil || !isEnding { log.Event(ctx, "coordinator spawns") txnMeta = &txnMetadata{ txn: newTxn, keys: keys, firstUpdateNanos: startNS, lastUpdateNanos: tc.clock.PhysicalNow(), timeoutDuration: tc.clientTimeout, txnEnd: make(chan struct{}), } tc.txns[txnID] = txnMeta if err := tc.stopper.RunAsyncTask(ctx, func(ctx context.Context) { tc.heartbeatLoop(ctx, txnID) }); err != nil { // The system is already draining and we can't start the // heartbeat. We refuse new transactions for now because // they're likely not going to have all intents committed. // In principle, we can relax this as needed though. tc.unregisterTxnLocked(txnID) return roachpb.NewError(err) } } else { // If this was a successful one phase commit, update stats // directly as they won't otherwise be updated on heartbeat // loop shutdown. etArgs, ok := br.Responses[len(br.Responses)-1].GetInner().(*roachpb.EndTransactionResponse) tc.updateStats(tc.clock.PhysicalNow()-startNS, 0, newTxn.Status, ok && etArgs.OnePhaseCommit) } } } // Update our record of this transaction, even on error. if txnMeta != nil { txnMeta.txn.Update(&newTxn) if !txnMeta.txn.Writing { panic("tracking a non-writing txn") } txnMeta.setLastUpdate(tc.clock.PhysicalNow()) } if pErr == nil { // For successful transactional requests, always send the updated txn // record back. Note that we make sure not to share data with newTxn // (which may have made it into txnMeta). if br.Txn != nil { br.Txn.Update(&newTxn) } else { clonedTxn := newTxn.Clone() br.Txn = &clonedTxn } } else if pErr.GetTxn() != nil { // Avoid changing existing errors because sometimes they escape into // goroutines and data races can occur. pErrShallow := *pErr pErrShallow.SetTxn(&newTxn) // SetTxn clones newTxn pErr = &pErrShallow } return pErr }
// TestSender mocks out some of the txn coordinator sender's // functionality. It responds to PutRequests using testPutResp. func newTestSender( pre, post func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error), ) SenderFunc { txnID := uuid.MakeV4() return func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if ba.UserPriority == 0 { ba.UserPriority = 1 } if ba.Txn != nil && ba.Txn.ID == nil { ba.Txn.Key = txnKey ba.Txn.ID = &txnID } var br *roachpb.BatchResponse var pErr *roachpb.Error if pre != nil { br, pErr = pre(ba) } else { br = ba.CreateReply() } if pErr != nil { return nil, pErr } var writing bool status := roachpb.PENDING for i, req := range ba.Requests { args := req.GetInner() if _, ok := args.(*roachpb.PutRequest); ok { testPutRespCopy := testPutResp union := &br.Responses[i] // avoid operating on copy union.MustSetInner(&testPutRespCopy) } if roachpb.IsTransactionWrite(args) { writing = true } } if args, ok := ba.GetArg(roachpb.EndTransaction); ok { et := args.(*roachpb.EndTransactionRequest) writing = true if et.Commit { status = roachpb.COMMITTED } else { status = roachpb.ABORTED } } if ba.Txn != nil { txnClone := ba.Txn.Clone() br.Txn = &txnClone if pErr == nil { br.Txn.Writing = writing br.Txn.Status = status } } if post != nil { br, pErr = post(ba) } return br, pErr } }