Beispiel #1
0
func tryRaftLogEntry(kv engine.MVCCKeyValue) (string, error) {
	var ent raftpb.Entry
	if err := maybeUnmarshalInline(kv.Value, &ent); err != nil {
		return "", err
	}
	if ent.Type == raftpb.EntryNormal {
		if len(ent.Data) > 0 {
			_, cmdData := storage.DecodeRaftCommand(ent.Data)
			var cmd storagebase.RaftCommand
			if err := cmd.Unmarshal(cmdData); err != nil {
				return "", err
			}
			ent.Data = nil
			return fmt.Sprintf("%s by %v\n%s\n%s\n", &ent, cmd.OriginReplica, cmd.Cmd, &cmd), nil
		}
		return fmt.Sprintf("%s: EMPTY\n", &ent), nil
	} else if ent.Type == raftpb.EntryConfChange {
		var cc raftpb.ConfChange
		if err := cc.Unmarshal(ent.Data); err != nil {
			return "", err
		}
		var ctx storage.ConfChangeContext
		if err := ctx.Unmarshal(cc.Context); err != nil {
			return "", err
		}
		var cmd storagebase.ReplicatedProposalData
		if err := cmd.Unmarshal(ctx.Payload); err != nil {
			return "", err
		}
		ent.Data = nil
		return fmt.Sprintf("%s\n%s\n", &ent, &cmd), nil
	}
	return "", fmt.Errorf("Unknown log entry type: %s\n", &ent)
}
Beispiel #2
0
func (r *Replica) handleReplicatedProposalData(
	ctx context.Context, rpd storagebase.ReplicatedProposalData,
) (shouldAssert bool) {
	// Fields for which no action is taken in this method are zeroed so that
	// they don't trigger an assertion at the end of the method (which checks
	// that all fields were handled).
	{
		rpd.IsLeaseRequest = false
		rpd.IsConsistencyRelated = false
		rpd.IsFreeze = false
		rpd.Timestamp = hlc.ZeroTimestamp
	}

	if rpd.BlockReads {
		r.readOnlyCmdMu.Lock()
		defer r.readOnlyCmdMu.Unlock()
		rpd.BlockReads = false
	}

	// Update MVCC stats and Raft portion of ReplicaState.
	r.mu.Lock()
	r.mu.state.Stats.Add(rpd.Delta)
	if rpd.State.RaftAppliedIndex != 0 {
		r.mu.state.RaftAppliedIndex = rpd.State.RaftAppliedIndex
	}
	if rpd.State.LeaseAppliedIndex != 0 {
		r.mu.state.LeaseAppliedIndex = rpd.State.LeaseAppliedIndex
	}
	needsSplitBySize := r.needsSplitBySizeLocked()
	r.mu.Unlock()

	r.store.metrics.addMVCCStats(rpd.Delta)
	rpd.Delta = enginepb.MVCCStats{}

	const raftLogCheckFrequency = 1 + RaftLogQueueStaleThreshold/4
	if rpd.State.RaftAppliedIndex%raftLogCheckFrequency == 1 {
		r.store.raftLogQueue.MaybeAdd(r, r.store.Clock().Now())
	}
	if needsSplitBySize {
		r.store.splitQueue.MaybeAdd(r, r.store.Clock().Now())
	}

	rpd.State.Stats = enginepb.MVCCStats{}
	rpd.State.LeaseAppliedIndex = 0
	rpd.State.RaftAppliedIndex = 0

	// The above are always present, so we assert only if there are
	// "nontrivial" actions below.
	shouldAssert = (rpd != storagebase.ReplicatedProposalData{})

	// Process Split or Merge. This needs to happen after stats update because
	// of the ContainsEstimates hack.

	if rpd.Split != nil {
		// TODO(tschottdorf): We want to let the usual MVCCStats-delta
		// machinery update our stats for the left-hand side. But there is no
		// way to pass up an MVCCStats object that will clear out the
		// ContainsEstimates flag. We should introduce one, but the migration
		// makes this worth a separate effort (ContainsEstimates would need to
		// have three possible values, 'UNCHANGED', 'NO', and 'YES').
		// Until then, we're left with this rather crude hack.
		{
			r.mu.Lock()
			r.mu.state.Stats.ContainsEstimates = false
			stats := r.mu.state.Stats
			r.mu.Unlock()
			if err := setMVCCStats(ctx, r.store.Engine(), r.RangeID, stats); err != nil {
				log.Fatal(ctx, errors.Wrap(err, "unable to write MVCC stats"))
			}
		}

		splitPostApply(
			r.AnnotateCtx(ctx),
			rpd.Split.RHSDelta,
			&rpd.Split.SplitTrigger,
			r,
		)
		rpd.Split = nil
	}

	if rpd.Merge != nil {
		if err := r.store.MergeRange(ctx, r, rpd.Merge.LeftDesc.EndKey,
			rpd.Merge.RightDesc.RangeID,
		); err != nil {
			// Our in-memory state has diverged from the on-disk state.
			log.Fatalf(ctx, "failed to update store after merging range: %s", err)
		}
		rpd.Merge = nil
	}

	// Update the remaining ReplicaState.

	if rpd.State.Frozen != storagebase.ReplicaState_FROZEN_UNSPECIFIED {
		r.mu.Lock()
		r.mu.state.Frozen = rpd.State.Frozen
		r.mu.Unlock()
	}
	rpd.State.Frozen = storagebase.ReplicaState_FROZEN_UNSPECIFIED

	if newDesc := rpd.State.Desc; newDesc != nil {
		if err := r.setDesc(newDesc); err != nil {
			// Log the error. There's not much we can do because the commit may
			// have already occurred at this point.
			log.Fatalf(
				ctx,
				"failed to update range descriptor to %+v: %s",
				newDesc, err,
			)
		}
		rpd.State.Desc = nil
	}

	if change := rpd.ChangeReplicas; change != nil {
		if change.ChangeType == roachpb.REMOVE_REPLICA &&
			r.store.StoreID() == change.Replica.StoreID {
			// This wants to run as late as possible, maximizing the chances
			// that the other nodes have finished this command as well (since
			// processing the removal from the queue looks up the Range at the
			// lease holder, being too early here turns this into a no-op).
			if _, err := r.store.replicaGCQueue.Add(r, replicaGCPriorityRemoved); err != nil {
				// Log the error; the range should still be GC'd eventually.
				log.Errorf(ctx, "unable to add to replica GC queue: %s", err)
			}
		}
		rpd.ChangeReplicas = nil
	}

	if newLease := rpd.State.Lease; newLease != nil {
		rpd.State.Lease = nil // for assertion

		r.mu.Lock()
		replicaID := r.mu.replicaID
		prevLease := r.mu.state.Lease
		r.mu.state.Lease = newLease
		r.mu.Unlock()

		r.leasePostApply(ctx, newLease, replicaID, prevLease)
	}

	if newTruncState := rpd.State.TruncatedState; newTruncState != nil {
		rpd.State.TruncatedState = nil // for assertion
		r.mu.Lock()
		r.mu.state.TruncatedState = newTruncState
		r.mu.Unlock()
		// Clear any entries in the Raft log entry cache for this range up
		// to and including the most recently truncated index.
		r.store.raftEntryCache.clearTo(r.RangeID, newTruncState.Index+1)
	}

	if newThresh := rpd.State.GCThreshold; newThresh != hlc.ZeroTimestamp {
		r.mu.Lock()
		r.mu.state.GCThreshold = newThresh
		r.mu.Unlock()
		rpd.State.GCThreshold = hlc.ZeroTimestamp
	}

	if newThresh := rpd.State.TxnSpanGCThreshold; newThresh != hlc.ZeroTimestamp {
		r.mu.Lock()
		r.mu.state.TxnSpanGCThreshold = newThresh
		r.mu.Unlock()
		rpd.State.TxnSpanGCThreshold = hlc.ZeroTimestamp
	}

	if rpd.ComputeChecksum != nil {
		r.computeChecksumPostApply(ctx, *rpd.ComputeChecksum)
		rpd.ComputeChecksum = nil
	}

	if (rpd != storagebase.ReplicatedProposalData{}) {
		log.Fatalf(ctx, "unhandled field in ReplicatedProposalData: %s", pretty.Diff(rpd, storagebase.ReplicatedProposalData{}))
	}
	return shouldAssert
}