// SendWrapped is a convenience function which wraps the request in a batch, // sends it via the provided Sender, and returns the unwrapped response // or an error. func SendWrapped(sender client.Sender, args proto.Request) (proto.Response, error) { ba, unwrap := maybeWrap(args) br, pErr := sender.Send(context.TODO(), *ba) if err := pErr.GoError(); err != nil { return nil, err } return unwrap(br), nil }
// InitSenderForLocalTestCluster initializes a TxnCoordSender that can be used // with LocalTestCluster. func InitSenderForLocalTestCluster( nodeDesc *roachpb.NodeDescriptor, tracer opentracing.Tracer, clock *hlc.Clock, latency time.Duration, stores client.Sender, stopper *stop.Stopper, gossip *gossip.Gossip, ) client.Sender { var rpcSend rpcSendFn = func(_ SendOptions, _ ReplicaSlice, args roachpb.BatchRequest, _ *rpc.Context) (*roachpb.BatchResponse, error) { if latency > 0 { time.Sleep(latency) } sp := tracer.StartSpan("node") defer sp.Finish() ctx := opentracing.ContextWithSpan(context.Background(), sp) log.Trace(ctx, args.String()) br, pErr := stores.Send(ctx, args) if br == nil { br = &roachpb.BatchResponse{} } if br.Error != nil { panic(roachpb.ErrorUnexpectedlySet(stores, br)) } br.Error = pErr if pErr != nil { log.Trace(ctx, "error: "+pErr.String()) } return br, nil } retryOpts := GetDefaultDistSenderRetryOptions() retryOpts.Closer = stopper.ShouldDrain() distSender := NewDistSender(&DistSenderContext{ Clock: clock, RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize, RangeLookupMaxRanges: defaultRangeLookupMaxRanges, LeaderCacheSize: defaultLeaderCacheSize, RPCRetryOptions: &retryOpts, nodeDescriptor: nodeDesc, RPCSend: rpcSend, // defined above RangeDescriptorDB: stores.(RangeDescriptorDB), // for descriptor lookup }, gossip) return NewTxnCoordSender(distSender, clock, false /* !linearizable */, tracer, stopper, NewTxnMetrics(metric.NewRegistry())) }
// close sends resolve intent commands for all key ranges this // transaction has covered, clears the keys cache and closes the // metadata heartbeat. Any keys listed in the resolved slice have // already been resolved and do not receive resolve intent commands. func (tm *txnMetadata) close(txn *proto.Transaction, resolved []proto.Key, sender client.Sender, stopper *util.Stopper) { close(tm.txnEnd) // stop heartbeat if tm.keys.Len() > 0 { if log.V(2) { log.Infof("cleaning up %d intent(s) for transaction %s", tm.keys.Len(), txn) } } for _, o := range tm.keys.GetOverlaps(proto.KeyMin, proto.KeyMax) { // If the op was range based, end key != start key: resolve a range. var call proto.Call key := o.Key.Start().(proto.Key) endKey := o.Key.End().(proto.Key) if !key.Next().Equal(endKey) { call.Args = &proto.InternalResolveIntentRangeRequest{ RequestHeader: proto.RequestHeader{ Timestamp: txn.Timestamp, Key: key, EndKey: endKey, User: storage.UserRoot, Txn: txn, }, } call.Reply = &proto.InternalResolveIntentRangeResponse{} } else { // Check if the key has already been resolved; skip if yes. found := false for _, k := range resolved { if key.Equal(k) { found = true } } if found { continue } call.Args = &proto.InternalResolveIntentRequest{ RequestHeader: proto.RequestHeader{ Timestamp: txn.Timestamp, Key: key, User: storage.UserRoot, Txn: txn, }, } call.Reply = &proto.InternalResolveIntentResponse{} } // We don't care about the reply channel; these are best // effort. We simply fire and forget, each in its own goroutine. if stopper.StartTask() { go func() { if log.V(2) { log.Infof("cleaning up intent %q for txn %s", call.Args.Header().Key, txn) } sender.Send(context.TODO(), call) if call.Reply.Header().Error != nil { log.Warningf("failed to cleanup %q intent: %s", call.Args.Header().Key, call.Reply.Header().GoError()) } stopper.FinishTask() }() } } tm.keys.Clear() }
// close sends resolve intent commands for all key ranges this // transaction has covered, clears the keys cache and closes the // metadata heartbeat. Any keys listed in the resolved slice have // already been resolved and do not receive resolve intent commands. func (tm *txnMetadata) close(trace *tracer.Trace, txn *proto.Transaction, resolved []proto.Key, sender client.Sender, stopper *stop.Stopper) { close(tm.txnEnd) // stop heartbeat trace.Event("coordinator stops") if tm.keys.Len() > 0 { if log.V(2) { log.Infof("cleaning up %d intent(s) for transaction %s", tm.keys.Len(), txn) } } // TODO(tschottdorf): Should create a Batch here. for _, o := range tm.keys.GetOverlaps(proto.KeyMin, proto.KeyMax) { // If the op was range based, end key != start key: resolve a range. var call proto.Call key := o.Key.Start().(proto.Key) endKey := o.Key.End().(proto.Key) if !key.Next().Equal(endKey) { call.Args = &proto.InternalResolveIntentRangeRequest{ RequestHeader: proto.RequestHeader{ Timestamp: txn.Timestamp, Key: key, EndKey: endKey, User: security.RootUser, Txn: txn, }, } call.Reply = &proto.InternalResolveIntentRangeResponse{} } else { // Check if the key has already been resolved; skip if yes. found := false for _, k := range resolved { if key.Equal(k) { found = true } } if found { continue } call.Args = &proto.InternalResolveIntentRequest{ RequestHeader: proto.RequestHeader{ Timestamp: txn.Timestamp, Key: key, User: security.RootUser, Txn: txn, }, } call.Reply = &proto.InternalResolveIntentResponse{} } // We don't care about the reply channel; these are best // effort. We simply fire and forget, each in its own goroutine. ctx := tracer.ToCtx(context.Background(), trace.Fork()) stopper.RunAsyncTask(func() { if log.V(2) { log.Infof("cleaning up intent %q for txn %s", call.Args.Header().Key, txn) } sender.Send(ctx, call) if call.Reply.Header().Error != nil { log.Warningf("failed to cleanup %q intent: %s", call.Args.Header().Key, call.Reply.Header().GoError()) } }) } tm.keys.Clear() }
// resolve sends resolve intent commands for all key ranges this transaction // has covered. Any keys listed in the resolved slice have already been // resolved and are skipped. func (tm *txnMetadata) resolve(trace *tracer.Trace, resolved []proto.Key, sender client.Sender) { txn := &tm.txn if tm.keys.Len() > 0 { if log.V(2) { log.Infof("cleaning up %d intent(s) for transaction %s", tm.keys.Len(), txn) } } // TODO(tschottdorf): Should create a Batch here. However, we're resolving // intents and if those are on meta records, there may be a certain order // in which they need to be resolved so that they can get routed to the // correct range. Since a batch runs its commands one by one and we don't // know the correct order, we prefer to fire them off in parallel. var wg sync.WaitGroup for _, o := range tm.keys.GetOverlaps(proto.KeyMin, proto.KeyMax) { // If the op was range based, end key != start key: resolve a range. var call proto.Call key := o.Key.Start().(proto.Key) endKey := o.Key.End().(proto.Key) if !key.Next().Equal(endKey) { call.Args = &proto.InternalResolveIntentRangeRequest{ RequestHeader: proto.RequestHeader{ Timestamp: txn.Timestamp, Key: key, EndKey: endKey, User: security.RootUser, Txn: txn, }, } call.Reply = &proto.InternalResolveIntentRangeResponse{} } else { // Check if the key has already been resolved; skip if yes. found := false for _, k := range resolved { if key.Equal(k) { if log.V(2) { log.Warningf("skipping previously resolved intent at %q", k) } found = true } } if found { continue } call.Args = &proto.InternalResolveIntentRequest{ RequestHeader: proto.RequestHeader{ Timestamp: txn.Timestamp, Key: key, User: security.RootUser, Txn: txn, }, } call.Reply = &proto.InternalResolveIntentResponse{} } ctx := tracer.ToCtx(context.Background(), trace.Fork()) if log.V(2) { log.Infof("cleaning up intent %q for txn %s", call.Args.Header().Key, txn) } // Each operation gets their own goroutine. We only want to return to // the caller after the operations have finished. wg.Add(1) go func() { sender.Send(ctx, call) wg.Done() if call.Reply.Header().Error != nil { log.Warningf("failed to cleanup %q intent: %s", call.Args.Header().Key, call.Reply.Header().GoError()) } }() } defer trace.Epoch("waiting for intent resolution")() wg.Wait() tm.keys.Clear() }