Esempio n. 1
0
// DecodeRaftCommand splits a raftpb.Entry.Data into its commandID and
// command portions. The caller is responsible for checking that the data
// is not empty (which indicates a dummy entry generated by raft rather
// than a real command). Usage is mostly internal to the storage package
// but is exported for use by debugging tools.
func DecodeRaftCommand(data []byte) (storagebase.CmdIDKey, []byte) {
	if data[0]&raftCommandNoSplitMask != raftCommandEncodingVersion {
		panic(fmt.Sprintf("unknown command encoding version %v", data[0]))
	}
	return storagebase.CmdIDKey(data[1 : 1+raftCommandIDLen]), data[1+raftCommandIDLen:]
}
Esempio n. 2
0
func (r *Replica) handleLocalProposalData(
	ctx context.Context, originReplica roachpb.ReplicaDescriptor, lpd LocalProposalData,
) (shouldAssert bool) {
	// Fields for which no action is taken in this method are zeroed so that
	// they don't trigger an assertion at the end of the method (which checks
	// that all fields were handled).
	{
		lpd.idKey = storagebase.CmdIDKey("")
		lpd.Batch = nil
		lpd.done = nil
		lpd.ctx = nil
		lpd.Err = nil
		lpd.proposedAtTicks = 0
		lpd.Reply = nil
	}

	// ======================
	// Non-state updates and actions.
	// ======================

	if originReplica.StoreID == r.store.StoreID() {
		// On the replica on which this command originated, resolve skipped
		// intents asynchronously - even on failure.
		//
		// TODO(tschottdorf): EndTransaction will use this pathway to return
		// intents which should immediately be resolved. However, there's
		// a slight chance that an error between the origin of that intents
		// slice and here still results in that intent slice arriving here
		// without the EndTransaction having committed. We should clearly
		// separate the part of the ProposalData which also applies on errors.
		if lpd.intents != nil {
			r.store.intentResolver.processIntentsAsync(r, *lpd.intents)
		}
	}
	lpd.intents = nil

	// The above are present too often, so we assert only if there are
	// "nontrivial" actions below.
	shouldAssert = (lpd != LocalProposalData{})

	if lpd.raftLogSize != nil {
		r.mu.Lock()
		r.mu.raftLogSize = *lpd.raftLogSize
		r.mu.Unlock()
		lpd.raftLogSize = nil
	}

	if lpd.gossipFirstRange {
		// We need to run the gossip in an async task because gossiping requires
		// the range lease and we'll deadlock if we try to acquire it while
		// holding processRaftMu. Specifically, Replica.redirectOnOrAcquireLease
		// blocks waiting for the lease acquisition to finish but it can't finish
		// because we're not processing raft messages due to holding
		// processRaftMu (and running on the processRaft goroutine).
		if err := r.store.Stopper().RunAsyncTask(ctx, func(ctx context.Context) {
			hasLease, pErr := r.getLeaseForGossip(ctx)

			if pErr != nil {
				log.Infof(ctx, "unable to gossip first range; hasLease=%t, err=%s", hasLease, pErr)
			} else if !hasLease {
				return
			}
			r.gossipFirstRange(ctx)
		}); err != nil {
			log.Infof(ctx, "unable to gossip first range: %s", err)
		}
		lpd.gossipFirstRange = false
	}

	if lpd.maybeAddToSplitQueue {
		r.store.splitQueue.MaybeAdd(r, r.store.Clock().Now())
		lpd.maybeAddToSplitQueue = false
	}

	if lpd.maybeGossipSystemConfig {
		r.maybeGossipSystemConfig()
		lpd.maybeGossipSystemConfig = false
	}

	if originReplica.StoreID == r.store.StoreID() {
		if lpd.leaseMetricsResult != nil {
			r.store.metrics.leaseRequestComplete(*lpd.leaseMetricsResult)
		}
		if lpd.maybeGossipNodeLiveness != nil {
			r.maybeGossipNodeLiveness(*lpd.maybeGossipNodeLiveness)
		}
	}
	// Satisfy the assertions for all of the items processed only on the
	// proposer (the block just above).
	lpd.leaseMetricsResult = nil
	lpd.maybeGossipNodeLiveness = nil

	if (lpd != LocalProposalData{}) {
		log.Fatalf(ctx, "unhandled field in LocalProposalData: %s", pretty.Diff(lpd, LocalProposalData{}))
	}

	return shouldAssert
}
Esempio n. 3
0
func (r *Replica) handleLocalEvalResult(
	ctx context.Context, originReplica roachpb.ReplicaDescriptor, lResult LocalEvalResult,
) (shouldAssert bool) {
	// Fields for which no action is taken in this method are zeroed so that
	// they don't trigger an assertion at the end of the method (which checks
	// that all fields were handled).
	{
		lResult.idKey = storagebase.CmdIDKey("")
		lResult.Batch = nil
		lResult.endCmds = nil
		lResult.doneCh = nil
		lResult.ctx = nil
		lResult.Err = nil
		lResult.proposedAtTicks = 0
		lResult.Reply = nil
	}

	// ======================
	// Non-state updates and actions.
	// ======================

	// The caller is required to detach and handle intents.
	if lResult.intents != nil {
		log.Fatalf(ctx, "LocalEvalResult.intents should be nil: %+v", lResult.intents)
	}

	// The above are present too often, so we assert only if there are
	// "nontrivial" actions below.
	shouldAssert = (lResult != LocalEvalResult{})

	if lResult.gossipFirstRange {
		// We need to run the gossip in an async task because gossiping requires
		// the range lease and we'll deadlock if we try to acquire it while
		// holding processRaftMu. Specifically, Replica.redirectOnOrAcquireLease
		// blocks waiting for the lease acquisition to finish but it can't finish
		// because we're not processing raft messages due to holding
		// processRaftMu (and running on the processRaft goroutine).
		if err := r.store.Stopper().RunAsyncTask(ctx, func(ctx context.Context) {
			hasLease, pErr := r.getLeaseForGossip(ctx)

			if pErr != nil {
				log.Infof(ctx, "unable to gossip first range; hasLease=%t, err=%s", hasLease, pErr)
			} else if !hasLease {
				return
			}
			r.gossipFirstRange(ctx)
		}); err != nil {
			log.Infof(ctx, "unable to gossip first range: %s", err)
		}
		lResult.gossipFirstRange = false
	}

	if lResult.maybeAddToSplitQueue {
		r.store.splitQueue.MaybeAdd(r, r.store.Clock().Now())
		lResult.maybeAddToSplitQueue = false
	}

	if lResult.maybeGossipSystemConfig {
		r.maybeGossipSystemConfig()
		lResult.maybeGossipSystemConfig = false
	}

	if originReplica.StoreID == r.store.StoreID() {
		if lResult.leaseMetricsResult != nil {
			r.store.metrics.leaseRequestComplete(*lResult.leaseMetricsResult)
		}
		if lResult.maybeGossipNodeLiveness != nil {
			r.maybeGossipNodeLiveness(*lResult.maybeGossipNodeLiveness)
		}
	}
	// Satisfy the assertions for all of the items processed only on the
	// proposer (the block just above).
	lResult.leaseMetricsResult = nil
	lResult.maybeGossipNodeLiveness = nil

	if (lResult != LocalEvalResult{}) {
		log.Fatalf(ctx, "unhandled field in LocalEvalResult: %s", pretty.Diff(lResult, LocalEvalResult{}))
	}

	return shouldAssert
}