// process iterates through all keys in a replica's range, calling the garbage // collector for each key and associated set of values. GC'd keys are batched // into GC calls. Extant intents are resolved if intents are older than // intentAgeThreshold. The transaction and abort cache records are also // scanned and old entries evicted. During normal operation, both of these // records are cleaned up when their respective transaction finishes, so the // amount of work done here is expected to be small. // // Some care needs to be taken to avoid cyclic recreation of entries during GC: // * a Push initiated due to an intent may recreate a transaction entry // * resolving an intent may write a new abort cache entry // * obtaining the transaction for a abort cache entry requires a Push // // The following order is taken below: // 1) collect all intents with sufficiently old txn record // 2) collect these intents' transactions // 3) scan the transaction table, collecting abandoned or completed txns // 4) push all of these transactions (possibly recreating entries) // 5) resolve all intents (unless the txn is still PENDING), which will recreate // abort cache entries (but with the txn timestamp; i.e. likely gc'able) // 6) scan the abort cache table for old entries // 7) push these transactions (again, recreating txn entries). // 8) send a GCRequest. func (gcq *gcQueue) process( ctx context.Context, now hlc.Timestamp, repl *Replica, sysCfg config.SystemConfig, ) error { snap := repl.store.Engine().NewSnapshot() desc := repl.Desc() defer snap.Close() // Lookup the GC policy for the zone containing this key range. zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey) if err != nil { return errors.Errorf("could not find zone config for range %s: %s", repl, err) } gcKeys, info, err := RunGC(ctx, desc, snap, now, zone.GC, func(now hlc.Timestamp, txn *roachpb.Transaction, typ roachpb.PushTxnType) { pushTxn(ctx, gcq.store.DB(), now, txn, typ) }, func(intents []roachpb.Intent, poison bool, wait bool) error { return repl.store.intentResolver.resolveIntents(ctx, intents, poison, wait) }) if err != nil { return err } log.VEventf(ctx, 1, "completed with stats %+v", info) info.updateMetrics(gcq.store.metrics) var ba roachpb.BatchRequest var gcArgs roachpb.GCRequest // TODO(tschottdorf): This is one of these instances in which we want // to be more careful that the request ends up on the correct Replica, // and we might have to worry about mixing range-local and global keys // in a batch which might end up spanning Ranges by the time it executes. gcArgs.Key = desc.StartKey.AsRawKey() gcArgs.EndKey = desc.EndKey.AsRawKey() gcArgs.Keys = gcKeys gcArgs.Threshold = info.Threshold gcArgs.TxnSpanGCThreshold = info.TxnSpanGCThreshold // Technically not needed since we're talking directly to the Range. ba.RangeID = desc.RangeID ba.Timestamp = now ba.Add(&gcArgs) if _, pErr := repl.Send(ctx, ba); pErr != nil { log.ErrEvent(ctx, pErr.String()) return pErr.GoError() } return nil }
// sendToReplicas sends one or more RPCs to clients specified by the // slice of replicas. On success, Send returns the first successful // reply. If an error occurs which is not specific to a single // replica, it's returned immediately. Otherwise, when all replicas // have been tried and failed, returns a send error. func (ds *DistSender) sendToReplicas( opts SendOptions, rangeID roachpb.RangeID, replicas ReplicaSlice, args roachpb.BatchRequest, rpcContext *rpc.Context, ) (*roachpb.BatchResponse, error) { if len(replicas) < 1 { return nil, roachpb.NewSendError( fmt.Sprintf("insufficient replicas (%d) to satisfy send request of %d", len(replicas), 1)) } var ambiguousResult bool var haveCommit bool // We only check for committed txns, not aborts because aborts may // be retried without any risk of inconsistencies. if etArg, ok := args.GetArg(roachpb.EndTransaction); ok && etArg.(*roachpb.EndTransactionRequest).Commit { haveCommit = true } done := make(chan BatchCall, len(replicas)) transportFactory := opts.transportFactory if transportFactory == nil { transportFactory = grpcTransportFactory } transport, err := transportFactory(opts, rpcContext, replicas, args) if err != nil { return nil, err } defer transport.Close() if transport.IsExhausted() { return nil, roachpb.NewSendError( fmt.Sprintf("sending to all %d replicas failed", len(replicas))) } // Send the first request. pending := 1 log.VEventf(opts.ctx, 2, "sending RPC for batch: %s", args.Summary()) transport.SendNext(done) // Wait for completions. This loop will retry operations that fail // with errors that reflect per-replica state and may succeed on // other replicas. var sendNextTimer timeutil.Timer defer sendNextTimer.Stop() for { sendNextTimer.Reset(opts.SendNextTimeout) select { case <-sendNextTimer.C: sendNextTimer.Read = true // On successive RPC timeouts, send to additional replicas if available. if !transport.IsExhausted() { log.VEventf(opts.ctx, 2, "timeout, trying next peer") pending++ transport.SendNext(done) } case call := <-done: pending-- err := call.Err if err == nil { if log.V(2) { log.Infof(opts.ctx, "RPC reply: %s", call.Reply) } else if log.V(1) && call.Reply.Error != nil { log.Infof(opts.ctx, "application error: %s", call.Reply.Error) } if call.Reply.Error == nil { return call.Reply, nil } else if !ds.handlePerReplicaError(opts.ctx, transport, rangeID, call.Reply.Error) { // The error received is not specific to this replica, so we // should return it instead of trying other replicas. However, // if we're trying to commit a transaction and there are // still other RPCs outstanding or an ambiguous RPC error // was already received, we must return an ambiguous commit // error instead of returned error. if haveCommit && (pending > 0 || ambiguousResult) { return nil, roachpb.NewAmbiguousResultError() } return call.Reply, nil } // Extract the detail so it can be included in the error // message if this is our last replica. // // TODO(bdarnell): The last error is not necessarily the best // one to return; we may want to remember the "best" error // we've seen (for example, a NotLeaseHolderError conveys more // information than a RangeNotFound). err = call.Reply.Error.GoError() } else { if log.V(1) { log.Warningf(opts.ctx, "RPC error: %s", err) } // All connection errors except for an unavailable node (this // is GRPC's fail-fast error), may mean that the request // succeeded on the remote server, but we were unable to // receive the reply. Set the ambiguous commit flag. // // We retry ambiguous commit batches to avoid returning the // unrecoverable AmbiguousResultError. This is safe because // repeating an already-successfully applied batch is // guaranteed to return either a TransactionReplayError (in // case the replay happens at the original leader), or a // TransactionRetryError (in case the replay happens at a new // leader). If the original attempt merely timed out or was // lost, then the batch will succeed and we can be assured the // commit was applied just once. // // The Unavailable code is used by GRPC to indicate that a // request fails fast and is not sent, so we can be sure there // is no ambiguity on these errors. Note that these are common // if a node is down. // See https://github.com/grpc/grpc-go/blob/52f6504dc290bd928a8139ba94e3ab32ed9a6273/call.go#L182 // See https://github.com/grpc/grpc-go/blob/52f6504dc290bd928a8139ba94e3ab32ed9a6273/stream.go#L158 if haveCommit && grpc.Code(err) != codes.Unavailable { ambiguousResult = true } } // Send to additional replicas if available. if !transport.IsExhausted() { log.VEventf(opts.ctx, 2, "error, trying next peer: %s", err) pending++ transport.SendNext(done) } if pending == 0 { if ambiguousResult { err = roachpb.NewAmbiguousResultError() } else { err = roachpb.NewSendError( fmt.Sprintf("sending to all %d replicas failed; last error: %v", len(replicas), err), ) } if log.V(2) { log.ErrEvent(opts.ctx, err.Error()) } return nil, err } } } }