// rangeLookup dispatches an RangeLookup request for the given // metadata key to the replicas of the given range. Note that we allow // inconsistent reads when doing range lookups for efficiency. Getting // stale data is not a correctness problem but instead may // infrequently result in additional latency as additional range // lookups may be required. Note also that rangeLookup bypasses the // DistSender's Send() method, so there is no error inspection and // retry logic here; this is not an issue since the lookup performs a // single inconsistent read only. func (ds *DistSender) rangeLookup(key proto.Key, options lookupOptions, desc *proto.RangeDescriptor) ([]proto.RangeDescriptor, error) { ba := proto.BatchRequest{} ba.ReadConsistency = proto.INCONSISTENT ba.Add(&proto.RangeLookupRequest{ RequestHeader: proto.RequestHeader{ Key: key, ReadConsistency: proto.INCONSISTENT, }, MaxRanges: ds.rangeLookupMaxRanges, ConsiderIntents: options.considerIntents, Reverse: options.useReverseScan, }) replicas := newReplicaSlice(ds.gossip, desc) // TODO(tschottdorf) consider a Trace here, potentially that of the request // that had the cache miss and waits for the result. reply, err := ds.sendRPC(nil /* Trace */, desc.RangeID, replicas, rpc.OrderRandom, &ba) if err != nil { return nil, err } br := reply.(*proto.BatchResponse) if err := br.GoError(); err != nil { return nil, err } return br.Responses[0].GetInner().(*proto.RangeLookupResponse).Ranges, nil }
// send runs the specified calls synchronously in a single batch and // returns any errors. func (db *DB) send(reqs ...proto.Request) (*proto.BatchResponse, *proto.Error) { if len(reqs) == 0 { return &proto.BatchResponse{}, nil } if len(reqs) == 1 { // We only send BatchRequest. Everything else needs to go into one. if ba, ok := reqs[0].(*proto.BatchRequest); ok { if ba.UserPriority == nil && db.userPriority != 0 { ba.UserPriority = gogoproto.Int32(db.userPriority) } resetClientCmdID(ba) br, pErr := db.sender.Send(context.TODO(), *ba) if pErr != nil { if log.V(1) { log.Infof("failed %s: %s", ba.Method(), pErr) } return nil, pErr } return br, nil } } ba := proto.BatchRequest{} ba.Add(reqs...) br, pErr := db.send(&ba) if pErr != nil { return nil, pErr } return br, nil }
func testPut() proto.BatchRequest { var ba proto.BatchRequest ba.Timestamp = testTS put := &proto.PutRequest{} put.Key = testKey ba.Add(put) return ba }
// SendBatch implements batch.Sender. func (ls *LocalSender) SendBatch(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) { trace := tracer.FromCtx(ctx) var store *storage.Store var err error // If we aren't given a Replica, then a little bending over // backwards here. This case applies exclusively to unittests. if ba.RangeID == 0 || ba.Replica.StoreID == 0 { var repl *proto.Replica var rangeID proto.RangeID rangeID, repl, err = ls.lookupReplica(ba.Key, ba.EndKey) if err == nil { ba.RangeID = rangeID ba.Replica = *repl } } ctx = log.Add(ctx, log.Method, ba.Method(), // TODO(tschottdorf): Method() always `Batch`. log.Key, ba.Key, log.RangeID, ba.RangeID) if err == nil { store, err = ls.GetStore(ba.Replica.StoreID) } var br *proto.BatchResponse if err == nil { // For calls that read data within a txn, we can avoid uncertainty // related retries in certain situations. If the node is in // "CertainNodes", we need not worry about uncertain reads any // more. Setting MaxTimestamp=Timestamp for the operation // accomplishes that. See proto.Transaction.CertainNodes for details. if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) { // MaxTimestamp = Timestamp corresponds to no clock uncertainty. trace.Event("read has no clock uncertainty") ba.Txn.MaxTimestamp = ba.Txn.Timestamp } { var tmpR proto.Response // TODO(tschottdorf): &ba -> ba tmpR, err = store.ExecuteCmd(ctx, &ba) // TODO(tschottdorf): remove this dance once BatchResponse is returned. if tmpR != nil { br = tmpR.(*proto.BatchResponse) if br.Error != nil { panic(proto.ErrorUnexpectedlySet) } } } } // TODO(tschottdorf): Later error needs to be associated to an index // and ideally individual requests don't even have an error in their // header. See #1891. return br, err }
// SendBatch implements the batch.Sender interface. It subdivides // the Batch into batches admissible for sending (preventing certain // illegal mixtures of requests), executes each individual part // (which may span multiple ranges), and recombines the response. func (ds *DistSender) SendBatch(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) { // In the event that timestamp isn't set and read consistency isn't // required, set the timestamp using the local clock. // TODO(tschottdorf): right place for this? if ba.ReadConsistency == proto.INCONSISTENT && ba.Timestamp.Equal(proto.ZeroTimestamp) { // Make sure that after the call, args hasn't changed. defer func(timestamp proto.Timestamp) { ba.Timestamp = timestamp }(ba.Timestamp) ba.Timestamp = ds.clock.Now() } // TODO(tschottdorf): provisional instantiation. return newChunkingSender(ds.sendChunk).SendBatch(ctx, ba) }
func (ts *txnSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) { // Send call through wrapped sender. ba.Txn = &ts.Proto br, pErr := ts.wrapped.Send(ctx, ba) if br != nil && br.Error != nil { panic(proto.ErrorUnexpectedlySet(ts.wrapped, br)) } // TODO(tschottdorf): see about using only the top-level *proto.Error // information for this restart logic (includes adding the Txn). err := pErr.GoError() // Only successful requests can carry an updated Txn in their response // header. Any error (e.g. a restart) can have a Txn attached to them as // well; those update our local state in the same way for the next attempt. // The exception is if our transaction was aborted and needs to restart // from scratch, in which case we do just that. if err == nil { ts.Proto.Update(br.Txn) return br, nil } else if abrtErr, ok := err.(*proto.TransactionAbortedError); ok { // On Abort, reset the transaction so we start anew on restart. ts.Proto = proto.Transaction{ Name: ts.Proto.Name, Isolation: ts.Proto.Isolation, // Acts as a minimum priority on restart. Priority: abrtErr.Transaction().GetPriority(), } } else if txnErr, ok := err.(proto.TransactionRestartError); ok { ts.Proto.Update(txnErr.Transaction()) } return nil, pErr }
// SendBatch implements Sender. // TODO(tschottdorf): We actually don't want to chop EndTransaction off for // single-range requests (but that happens now since EndTransaction has the // isAlone flag). Whether it is one or not is unknown right now (you can only // find out after you've sent to the Range/looked up a descriptor that suggests // that you're multi-range. In those cases, the wrapped sender should return an // error so that we split and retry once the chunk which contains // EndTransaction (i.e. the last one). func (cs *chunkingSender) SendBatch(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) { if len(ba.Requests) < 1 { panic("empty batch") } // Deterministically create ClientCmdIDs for all parts of the batch if // a CmdID is already set (otherwise, leave them empty). var nextID func() proto.ClientCmdID empty := proto.ClientCmdID{} if empty == ba.CmdID { nextID = func() proto.ClientCmdID { return empty } } else { rng := rand.New(rand.NewSource(ba.CmdID.Random)) id := ba.CmdID nextID = func() proto.ClientCmdID { curID := id // copy id.Random = rng.Int63() // adjust for next call return curID } } parts := ba.Split() var rplChunks []*proto.BatchResponse for _, part := range parts { ba.Requests = part ba.CmdID = nextID() rpl, err := cs.f(ctx, ba) if err != nil { return nil, err } // Propagate transaction from last reply to next request. The final // update is taken and put into the response's main header. ba.Txn.Update(rpl.Header().Txn) rplChunks = append(rplChunks, rpl) } reply := rplChunks[0] for _, rpl := range rplChunks[1:] { reply.Responses = append(reply.Responses, rpl.Responses...) } reply.ResponseHeader = rplChunks[len(rplChunks)-1].ResponseHeader reply.Txn = ba.Txn return reply, nil }
// rangeLookup implements the rangeDescriptorDB interface. It looks up // the descriptors for the given (meta) key. func (ls *LocalSender) rangeLookup(key proto.Key, options lookupOptions, _ *proto.RangeDescriptor) ([]proto.RangeDescriptor, error) { ba := proto.BatchRequest{} ba.ReadConsistency = proto.INCONSISTENT ba.Add(&proto.RangeLookupRequest{ RequestHeader: proto.RequestHeader{ Key: key, ReadConsistency: proto.INCONSISTENT, }, MaxRanges: 1, ConsiderIntents: options.considerIntents, Reverse: options.useReverseScan, }) br, pErr := ls.Send(context.Background(), ba) if pErr != nil { return nil, pErr.GoError() } return br.Responses[0].GetInner().(*proto.RangeLookupResponse).Ranges, nil }
// TestBatchPrevNext tests batch.{Prev,Next}. func TestBatchPrevNext(t *testing.T) { defer leaktest.AfterTest(t) span := func(strs ...string) []keys.Span { var r []keys.Span for i, str := range strs { if i%2 == 0 { r = append(r, keys.Span{Start: proto.Key(str)}) } else { r[len(r)-1].End = proto.Key(str) } } return r } max, min := string(proto.KeyMax), string(proto.KeyMin) abc := span("a", "", "b", "", "c", "") testCases := []struct { spans []keys.Span key, expFW, expBW string }{ {spans: span("a", "c", "b", ""), key: "b", expFW: "b", expBW: "b"}, {spans: span("a", "c", "b", ""), key: "a", expFW: "a", expBW: "a"}, {spans: span("a", "c", "d", ""), key: "c", expFW: "d", expBW: "c"}, {spans: span("a", "c\x00", "d", ""), key: "c", expFW: "c", expBW: "c"}, {spans: abc, key: "b", expFW: "b", expBW: "b"}, {spans: abc, key: "b\x00", expFW: "c", expBW: "b\x00"}, {spans: abc, key: "bb", expFW: "c", expBW: "b"}, {spans: span(), key: "whatevs", expFW: max, expBW: min}, } for i, test := range testCases { var ba proto.BatchRequest for _, span := range test.spans { args := &proto.ScanRequest{} args.Key, args.EndKey = span.Start, span.End ba.Add(args) } if next := next(ba, proto.Key(test.key)); !bytes.Equal(next, proto.Key(test.expFW)) { t.Errorf("%d: next: expected %q, got %q", i, test.expFW, next) } if prev := prev(ba, proto.Key(test.key)); !bytes.Equal(prev, proto.Key(test.expBW)) { t.Errorf("%d: prev: expected %q, got %q", i, test.expBW, prev) } } }
// maybeBeginTxn begins a new transaction if a txn has been specified // in the request but has a nil ID. The new transaction is initialized // using the name and isolation in the otherwise uninitialized txn. // The Priority, if non-zero is used as a minimum. func (tc *TxnCoordSender) maybeBeginTxn(ba *proto.BatchRequest) { if ba.Txn == nil { return } if len(ba.Requests) == 0 { panic("empty batch with txn") } if len(ba.Txn.ID) == 0 { // TODO(tschottdorf): should really choose the first txn write here. firstKey := ba.Requests[0].GetInner().Header().Key newTxn := proto.NewTransaction(ba.Txn.Name, keys.KeyAddress(firstKey), ba.GetUserPriority(), ba.Txn.Isolation, tc.clock.Now(), tc.clock.MaxOffset().Nanoseconds()) // Use existing priority as a minimum. This is used on transaction // aborts to ratchet priority when creating successor transaction. if newTxn.Priority < ba.Txn.Priority { newTxn.Priority = ba.Txn.Priority } ba.Txn = newTxn } }
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely // holds the writing portion of a Txn (including EndTransaction) does not // launch a heartbeat goroutine at all. func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) { defer leaktest.AfterTest(t) stopper := stop.NewStopper() manual := hlc.NewManualClock(0) clock := hlc.NewClock(manual.UnixNano) clock.SetMaxOffset(20) ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) { return ba.CreateReply().(*proto.BatchResponse), nil }), clock, false, nil, stopper) // Stop the stopper manually, prior to trying the transaction. This has the // effect of returning a NodeUnavailableError for any attempts at launching // a heartbeat goroutine. stopper.Stop() var ba proto.BatchRequest put := &proto.PutRequest{} put.Key = proto.Key("test") ba.Add(put) ba.Add(&proto.EndTransactionRequest{}) ba.Txn = &proto.Transaction{Name: "test"} _, pErr := ts.Send(context.Background(), ba) if pErr != nil { t.Fatal(pErr) } }
// TODO(tschottdorf): this method is somewhat awkward but unless we want to // give this error back to the client, our options are limited. We'll have to // run the whole thing for them, or any restart will still end up at the client // which will not be prepared to be handed a Txn. func (tc *TxnCoordSender) resendWithTxn(ba proto.BatchRequest) (*proto.BatchResponse, error) { // Run a one-off transaction with that single command. if log.V(1) { log.Infof("%s: auto-wrapping in txn and re-executing: ", ba) } tmpDB := client.NewDBWithPriority(tc, ba.GetUserPriority()) br := &proto.BatchResponse{} if err := tmpDB.Txn(func(txn *client.Txn) error { txn.SetDebugName("auto-wrap", 0) b := &client.Batch{} for _, arg := range ba.Requests { req := arg.GetValue().(proto.Request) call := proto.Call{Args: req, Reply: req.CreateReply()} b.InternalAddCall(call) br.Add(call.Reply) } return txn.CommitInBatch(b) }); err != nil { return nil, err } return br, nil }
// TODO(tschottdorf): this method is somewhat awkward but unless we want to // give this error back to the client, our options are limited. We'll have to // run the whole thing for them, or any restart will still end up at the client // which will not be prepared to be handed a Txn. func (tc *TxnCoordSender) resendWithTxn(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) { // Run a one-off transaction with that single command. if log.V(1) { log.Infof("%s: auto-wrapping in txn and re-executing: ", ba) } tmpDB := client.NewDBWithPriority(tc, ba.GetUserPriority()) var br *proto.BatchResponse err := tmpDB.Txn(func(txn *client.Txn) error { txn.SetDebugName("auto-wrap", 0) b := &client.Batch{} for _, arg := range ba.Requests { req := arg.GetInner() b.InternalAddRequest(req) } var err error br, err = txn.CommitInBatchWithResponse(b) return err }) if err != nil { return nil, proto.NewError(err) } br.Txn = nil // hide the evidence return br, nil }
func (tc *TxnCoordSender) heartbeat(id string, trace *tracer.Trace, ctx context.Context) bool { tc.Lock() proceed := true txnMeta := tc.txns[id] // Before we send a heartbeat, determine whether this transaction // should be considered abandoned. If so, exit heartbeat. if txnMeta.hasClientAbandonedCoord(tc.clock.PhysicalNow()) { // TODO(tschottdorf): should we be more proactive here? // The client might be continuing the transaction // through another coordinator, but in the most likely // case it's just gone and the open transaction record // could block concurrent operations. if log.V(1) { log.Infof("transaction %s abandoned; stopping heartbeat", txnMeta.txn) } proceed = false } // txnMeta.txn is possibly replaced concurrently, // so grab a copy before unlocking. txn := txnMeta.txn tc.Unlock() if !proceed { return false } hb := &proto.HeartbeatTxnRequest{} hb.Key = txn.Key ba := proto.BatchRequest{} ba.Timestamp = tc.clock.Now() ba.Key = txn.Key ba.Txn = &txn ba.Add(hb) epochEnds := trace.Epoch("heartbeat") _, err := tc.wrapped.Send(ctx, ba) epochEnds() // If the transaction is not in pending state, then we can stop // the heartbeat. It's either aborted or committed, and we resolve // write intents accordingly. if err != nil { log.Warningf("heartbeat to %s failed: %s", txn, err) } // TODO(bdarnell): once we have gotten a heartbeat response with // Status != PENDING, future heartbeats are useless. However, we // need to continue the heartbeatLoop until the client either // commits or abandons the transaction. We could save a little // pointless work by restructuring this loop to stop sending // heartbeats between the time that the transaction is aborted and // the client finds out. Furthermore, we could use this information // to send TransactionAbortedErrors to the client so it can restart // immediately instead of running until its EndTransaction. return true }
// sendChunk is in charge of sending an "admissible" piece of batch, i.e. one // which doesn't need to be subdivided further before going to a range (so no // mixing of forward and reverse scans, etc). func (ds *DistSender) sendChunk(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) { // TODO(tschottdorf): prepare for removing Key and EndKey from BatchRequest, // making sure that anything that relies on them goes bust. ba.Key, ba.EndKey = nil, nil isReverse := ba.IsReverse() trace := tracer.FromCtx(ctx) // The minimal key range encompassing all requests contained within. // Local addressing has already been resolved. // TODO(tschottdorf): consider rudimentary validation of the batch here // (for example, non-range requests with EndKey, or empty key ranges). from, to := keys.Range(ba) var br *proto.BatchResponse // Send the request to one range per iteration. for { options := lookupOptions{ useReverseScan: isReverse, } var curReply *proto.BatchResponse var desc *proto.RangeDescriptor var needAnother bool var err error for r := retry.Start(ds.rpcRetryOptions); r.Next(); { // Get range descriptor (or, when spanning range, descriptors). Our // error handling below may clear them on certain errors, so we // refresh (likely from the cache) on every retry. descDone := trace.Epoch("meta descriptor lookup") var evictDesc func() desc, needAnother, evictDesc, err = ds.getDescriptors(from, to, options) descDone() // getDescriptors may fail retryably if the first range isn't // available via Gossip. if err != nil { if rErr, ok := err.(retry.Retryable); ok && rErr.CanRetry() { if log.V(1) { log.Warning(err) } continue } break } // If there's no transaction and op spans ranges, possibly // re-run as part of a transaction for consistency. The // case where we don't need to re-run is if the read // consistency is not required. if needAnother && ba.Txn == nil && ba.IsRange() && ba.ReadConsistency != proto.INCONSISTENT { return nil, &proto.OpRequiresTxnError{} } // It's possible that the returned descriptor misses parts of the // keys it's supposed to scan after it's truncated to match the // descriptor. Example revscan [a,g), first desc lookup for "g" // returns descriptor [c,d) -> [d,g) is never scanned. // We evict and retry in such a case. if (isReverse && !desc.ContainsKeyRange(desc.StartKey, to)) || (!isReverse && !desc.ContainsKeyRange(from, desc.EndKey)) { evictDesc() continue } curReply, err = func() (*proto.BatchResponse, error) { // Truncate the request to our current key range. untruncate, numActive, trErr := truncate(&ba, desc, from, to) if numActive == 0 { untruncate() // This shouldn't happen in the wild, but some tests // exercise it. return nil, util.Errorf("truncation resulted in empty batch on [%s,%s): %s", from, to, ba) } defer untruncate() if trErr != nil { return nil, trErr } // TODO(tschottdorf): make key range on batch redundant. The // requests within dictate it anyways. ba.Key, ba.EndKey = keys.Range(ba) reply, err := ds.sendAttempt(trace, ba, desc) ba.Key, ba.EndKey = nil, nil if err != nil { if log.V(0 /* TODO(tschottdorf): 1 */) { log.Warningf("failed to invoke %s: %s", ba, err) } } return reply, err }() // If sending succeeded, break this loop. if err == nil { break } // Error handling below. // If retryable, allow retry. For range not found or range // key mismatch errors, we don't backoff on the retry, // but reset the backoff loop so we can retry immediately. switch tErr := err.(type) { case *rpc.SendError: // For an RPC error to occur, we must've been unable to contact // any replicas. In this case, likely all nodes are down (or // not getting back to us within a reasonable amount of time). // We may simply not be trying to talk to the up-to-date // replicas, so clearing the descriptor here should be a good // idea. // TODO(tschottdorf): If a replica group goes dead, this // will cause clients to put high read pressure on the first // range, so there should be some rate limiting here. evictDesc() if tErr.CanRetry() { continue } case *proto.RangeNotFoundError, *proto.RangeKeyMismatchError: trace.Event(fmt.Sprintf("reply error: %T", err)) // Range descriptor might be out of date - evict it. evictDesc() // On addressing errors, don't backoff; retry immediately. r.Reset() if log.V(1) { log.Warning(err) } // For the remainder of this call, we'll assume that intents // are fair game. This replaces more complex logic based on // the type of request. options.considerIntents = true continue case *proto.NotLeaderError: trace.Event(fmt.Sprintf("reply error: %T", err)) newLeader := tErr.GetLeader() // Verify that leader is a known replica according to the // descriptor. If not, we've got a stale replica; evict cache. // Next, cache the new leader. if newLeader != nil { if i, _ := desc.FindReplica(newLeader.StoreID); i == -1 { if log.V(1) { log.Infof("error indicates unknown leader %s, expunging descriptor %s", newLeader, desc) } evictDesc() } } else { newLeader = &proto.Replica{} } ds.updateLeaderCache(proto.RangeID(desc.RangeID), *newLeader) if log.V(1) { log.Warning(err) } r.Reset() continue case retry.Retryable: if tErr.CanRetry() { if log.V(1) { log.Warning(err) } trace.Event(fmt.Sprintf("reply error: %T", err)) continue } } break } // Immediately return if querying a range failed non-retryably. if err != nil { return nil, err } first := br == nil if first { // First response from a Range. br = curReply } else { // This was the second or later call in a cross-Range request. // Combine the new response with the existing one. if err := br.Combine(curReply); err != nil { panic(err) // TODO(tschottdorf): return nil, err } } // If this request has a bound (such as MaxResults in // ScanRequest) and we are going to query at least one more range, // check whether enough rows have been retrieved. // TODO(tschottdorf): need tests for executing a multi-range batch // with various bounded requests which saturate at different times. if needAnother { // Start with the assumption that all requests are saturated. // Below, we look at each and decide whether that's true. // Everything that is indeed saturated is "masked out" from the // batch request; only if that's all requests does needAnother // remain false. needAnother = false if first { // Clone ba.Requests. This is because we're multi-range, and // some requests may be bounded, which could lead to them being // masked out once they're saturated. We don't want to risk // removing requests that way in the "master copy" since that // could lead to omitting requests in certain retry scenarios. ba.Requests = append([]proto.RequestUnion(nil), ba.Requests...) } for i, union := range ba.Requests { args := union.GetValue() if _, ok := args.(*proto.NoopRequest); ok { // NoopRequests are skipped. continue } boundedArg, ok := args.(proto.Bounded) if !ok { // Non-bounded request. We will have to query all ranges. needAnother = true continue } prevBound := boundedArg.GetBound() cReply, ok := curReply.Responses[i].GetValue().(proto.Countable) if !ok || prevBound <= 0 { // Request bounded, but without max results. Again, will // need to query everything we can. The case in which the reply // isn't countable occurs when the request wasn't active for // that range (since it didn't apply to it), so the response // is a NoopResponse. needAnother = true continue } nextBound := prevBound - cReply.Count() if nextBound <= 0 { // We've hit max results for this piece of the batch. Mask // it out (we've copied the requests slice above, so this // is kosher). ba.Requests[i].Reset() // necessary (no one-of?) if !ba.Requests[i].SetValue(&proto.NoopRequest{}) { panic("RequestUnion excludes NoopRequest") } continue } // The request isn't saturated yet. needAnother = true boundedArg.SetBound(nextBound) } } // If this was the last range accessed by this call, exit loop. if !needAnother { return br, nil } if isReverse { // In next iteration, query previous range. // We use the StartKey of the current descriptor as opposed to the // EndKey of the previous one since that doesn't have bugs when // stale descriptors come into play. to = prev(ba, desc.StartKey) } else { // In next iteration, query next range. // It's important that we use the EndKey of the current descriptor // as opposed to the StartKey of the next one: if the former is stale, // it's possible that the next range has since merged the subsequent // one, and unless both descriptors are stale, the next descriptor's // StartKey would move us to the beginning of the current range, // resulting in a duplicate scan. from = next(ba, desc.EndKey) } trace.Event("querying next range") } }
// updateState updates the transaction state in both the success and // error cases, applying those updates to the corresponding txnMeta // object when adequate. It also updates certain errors with the // updated transaction for use by client restarts. func (tc *TxnCoordSender) updateState(ctx context.Context, ba proto.BatchRequest, br *proto.BatchResponse, pErr *proto.Error) *proto.Error { trace := tracer.FromCtx(ctx) newTxn := &proto.Transaction{} newTxn.Update(ba.GetTxn()) err := pErr.GoError() switch t := err.(type) { case nil: newTxn.Update(br.GetTxn()) // Move txn timestamp forward to response timestamp if applicable. // TODO(tschottdorf): see (*Replica).executeBatch and comments within. // Looks like this isn't necessary any more, nor did it prevent a bug // referenced in a TODO there. newTxn.Timestamp.Forward(br.Timestamp) case *proto.TransactionStatusError: // Likely already committed or more obscure errors such as epoch or // timestamp regressions; consider txn dead. defer tc.cleanupTxn(trace, t.Txn) case *proto.OpRequiresTxnError: // TODO(tschottdorf): range-spanning autowrap currently broken. panic("TODO(tschottdorf): disabled") case *proto.ReadWithinUncertaintyIntervalError: // Mark the host as certain. See the protobuf comment for // Transaction.CertainNodes for details. if t.NodeID == 0 { panic("no replica set in header on uncertainty restart") } newTxn.CertainNodes.Add(t.NodeID) // If the reader encountered a newer write within the uncertainty // interval, move the timestamp forward, just past that write or // up to MaxTimestamp, whichever comes first. candidateTS := newTxn.MaxTimestamp candidateTS.Backward(t.ExistingTimestamp.Add(0, 1)) newTxn.Timestamp.Forward(candidateTS) newTxn.Restart(ba.GetUserPriority(), newTxn.Priority, newTxn.Timestamp) t.Txn = *newTxn case *proto.TransactionAbortedError: // Increase timestamp if applicable. newTxn.Timestamp.Forward(t.Txn.Timestamp) newTxn.Priority = t.Txn.Priority t.Txn = *newTxn // Clean up the freshly aborted transaction in defer(), avoiding a // race with the state update below. defer tc.cleanupTxn(trace, t.Txn) case *proto.TransactionPushError: // Increase timestamp if applicable, ensuring that we're // just ahead of the pushee. newTxn.Timestamp.Forward(t.PusheeTxn.Timestamp.Add(0, 1)) newTxn.Restart(ba.GetUserPriority(), t.PusheeTxn.Priority-1, newTxn.Timestamp) t.Txn = newTxn case *proto.TransactionRetryError: // Increase timestamp if applicable. newTxn.Timestamp.Forward(t.Txn.Timestamp) newTxn.Restart(ba.GetUserPriority(), t.Txn.Priority, newTxn.Timestamp) t.Txn = *newTxn case proto.TransactionRestartError: // Assertion: The above cases should exhaust all ErrorDetails which // carry a Transaction. if pErr.Detail != nil { panic(fmt.Sprintf("unhandled TransactionRestartError %T", err)) } } return func() *proto.Error { if len(newTxn.ID) <= 0 { return pErr } id := string(newTxn.ID) tc.Lock() defer tc.Unlock() txnMeta := tc.txns[id] // For successful transactional requests, keep the written intents and // the updated transaction record to be sent along with the reply. // The transaction metadata is created with the first writing operation // TODO(tschottdorf): already computed the intents prior to sending, // consider re-using those. if intents := ba.GetIntents(); len(intents) > 0 && err == nil { if txnMeta == nil { newTxn.Writing = true txnMeta = &txnMetadata{ txn: *newTxn, keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}), firstUpdateNanos: tc.clock.PhysicalNow(), lastUpdateNanos: tc.clock.PhysicalNow(), timeoutDuration: tc.clientTimeout, txnEnd: make(chan struct{}), } tc.txns[id] = txnMeta // If the transaction is already over, there's no point in // launching a one-off coordinator which will shut down right // away. if _, isEnding := ba.GetArg(proto.EndTransaction); !isEnding { trace.Event("coordinator spawns") if !tc.stopper.RunAsyncTask(func() { tc.heartbeatLoop(id) }) { // The system is already draining and we can't start the // heartbeat. We refuse new transactions for now because // they're likely not going to have all intents committed. // In principle, we can relax this as needed though. tc.unregisterTxnLocked(id) return proto.NewError(&proto.NodeUnavailableError{}) } } } for _, intent := range intents { txnMeta.addKeyRange(intent.Key, intent.EndKey) } } // Update our record of this transaction, even on error. if txnMeta != nil { txnMeta.txn.Update(newTxn) // better to replace after #2300 if !txnMeta.txn.Writing { panic("tracking a non-writing txn") } txnMeta.setLastUpdate(tc.clock.PhysicalNow()) } if err == nil { // For successful transactional requests, always send the updated txn // record back. if br.Txn == nil { br.Txn = &proto.Transaction{} } *br.Txn = *newTxn } return pErr }() }
// Send implements the batch.Sender interface. If the request is part of a // transaction, the TxnCoordSender adds the transaction to a map of active // transactions and begins heartbeating it. Every subsequent request for the // same transaction updates the lastUpdate timestamp to prevent live // transactions from being considered abandoned and garbage collected. // Read/write mutating requests have their key or key range added to the // transaction's interval tree of key ranges for eventual cleanup via resolved // write intents; they're tagged to an outgoing EndTransaction request, with // the receiving replica in charge of resolving them. func (tc *TxnCoordSender) Send(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) { tc.maybeBeginTxn(&ba) ba.CmdID = ba.GetOrCreateCmdID(tc.clock.PhysicalNow()) var startNS int64 // This is the earliest point at which the request has a ClientCmdID and/or // TxnID (if applicable). Begin a Trace which follows this request. trace := tc.tracer.NewTrace(&ba) defer trace.Finalize() // TODO(tschottdorf): always "Batch" defer trace.Epoch(fmt.Sprintf("sending %s", ba.Method()))() ctx = tracer.ToCtx(ctx, trace) // TODO(tschottdorf): No looping through the batch will be necessary once // we've eliminated all the redundancies. for _, arg := range ba.Requests { trace.Event(fmt.Sprintf("%T", arg.GetValue())) if err := updateForBatch(arg.GetInner(), ba.RequestHeader); err != nil { return nil, proto.NewError(err) } } var id string // optional transaction ID if ba.Txn != nil { // If this request is part of a transaction... id = string(ba.Txn.ID) // Verify that if this Transaction is not read-only, we have it on // file. If not, refuse writes - the client must have issued a write on // another coordinator previously. if ba.Txn.Writing && ba.IsTransactionWrite() { tc.Lock() _, ok := tc.txns[id] tc.Unlock() if !ok { return nil, proto.NewError(util.Errorf("transaction must not write on multiple coordinators")) } } // Set the timestamp to the original timestamp for read-only // commands and to the transaction timestamp for read/write // commands. if ba.IsReadOnly() { ba.Timestamp = ba.Txn.OrigTimestamp } else { ba.Timestamp = ba.Txn.Timestamp } if rArgs, ok := ba.GetArg(proto.EndTransaction); ok { et := rArgs.(*proto.EndTransactionRequest) // Remember when EndTransaction started in case we want to // be linearizable. startNS = tc.clock.PhysicalNow() if len(et.Intents) > 0 { // TODO(tschottdorf): it may be useful to allow this later. // That would be part of a possible plan to allow txns which // write on multiple coordinators. return nil, proto.NewError(util.Errorf("client must not pass intents to EndTransaction")) } if len(et.Key) != 0 { return nil, proto.NewError(util.Errorf("EndTransaction must not have a Key set")) } et.Key = ba.Txn.Key tc.Lock() txnMeta, metaOK := tc.txns[id] if id != "" && metaOK { et.Intents = txnMeta.intents() } tc.Unlock() if intents := ba.GetIntents(); len(intents) > 0 { // Writes in Batch, so EndTransaction is fine. Should add // outstanding intents to EndTransaction, though. // TODO(tschottdorf): possible issues when the batch fails, // but the intents have been added anyways. // TODO(tschottdorf): some of these intents may be covered // by others, for example {[a,b), a}). This can lead to // some extra requests when those are non-local to the txn // record. But it doesn't seem worth optimizing now. et.Intents = append(et.Intents, intents...) } else if !metaOK { // If we don't have the transaction, then this must be a retry // by the client. We can no longer reconstruct a correct // request so we must fail. // // TODO(bdarnell): if we had a GetTransactionStatus API then // we could lookup the transaction and return either nil or // TransactionAbortedError instead of this ambivalent error. return nil, proto.NewError(util.Errorf("transaction is already committed or aborted")) } if len(et.Intents) == 0 { // If there aren't any intents, then there's factually no // transaction to end. Read-only txns have all of their state in // the client. return nil, proto.NewError(util.Errorf("cannot commit a read-only transaction")) } // TODO(tschottdorf): V(1) for _, intent := range et.Intents { trace.Event(fmt.Sprintf("intent: [%s,%s)", intent.Key, intent.EndKey)) } } } // Send the command through wrapped sender, taking appropriate measures // on error. var br *proto.BatchResponse { var pErr *proto.Error br, pErr = tc.wrapped.Send(ctx, ba) if _, ok := pErr.GoError().(*proto.OpRequiresTxnError); ok { br, pErr = tc.resendWithTxn(ba) } if pErr := tc.updateState(ctx, ba, br, pErr); pErr != nil { return nil, pErr } } if br.Txn == nil { return br, nil } if _, ok := ba.GetArg(proto.EndTransaction); !ok { return br, nil } // If the --linearizable flag is set, we want to make sure that // all the clocks in the system are past the commit timestamp // of the transaction. This is guaranteed if either // - the commit timestamp is MaxOffset behind startNS // - MaxOffset ns were spent in this function // when returning to the client. Below we choose the option // that involves less waiting, which is likely the first one // unless a transaction commits with an odd timestamp. if tsNS := br.Txn.Timestamp.WallTime; startNS > tsNS { startNS = tsNS } sleepNS := tc.clock.MaxOffset() - time.Duration(tc.clock.PhysicalNow()-startNS) if tc.linearizable && sleepNS > 0 { defer func() { if log.V(1) { log.Infof("%v: waiting %s on EndTransaction for linearizability", br.Txn.Short(), util.TruncateDuration(sleepNS, time.Millisecond)) } time.Sleep(sleepNS) }() } if br.Txn.Status != proto.PENDING { tc.cleanupTxn(trace, *br.Txn) } return br, nil }
func wrap(args proto.Request) proto.BatchRequest { var ba proto.BatchRequest ba.Add(args) return ba }