// InitialState implements the raft.Storage interface. func (r *Range) InitialState() (raftpb.HardState, raftpb.ConfState, error) { var hs raftpb.HardState found, err := engine.MVCCGetProto(r.rm.Engine(), keys.RaftHardStateKey(r.Desc().RaftID), proto.ZeroTimestamp, true, nil, &hs) if err != nil { return raftpb.HardState{}, raftpb.ConfState{}, err } if !found { // We don't have a saved HardState, so set up the defaults. if r.isInitialized() { // Set the initial log term. hs.Term = raftInitialLogTerm hs.Commit = raftInitialLogIndex atomic.StoreUint64(&r.lastIndex, raftInitialLogIndex) } else { // This is a new range we are receiving from another node. Start // from zero so we will receive a snapshot. atomic.StoreUint64(&r.lastIndex, 0) } } var cs raftpb.ConfState // For uninitalized ranges, membership is unknown at this point. if found || r.isInitialized() { for _, rep := range r.Desc().Replicas { cs.Nodes = append(cs.Nodes, uint64(proto.MakeRaftNodeID(rep.NodeID, rep.StoreID))) } } return hs, cs, nil }
func updateHardState(eng engine.ReadWriter, s storagebase.ReplicaState) error { // Load a potentially existing HardState as we may need to preserve // information about cast votes. For example, during a Split for which // another node's new right-hand side has contacted us before our left-hand // side called in here to create the group. rangeID := s.Desc.RangeID oldHS, err := loadHardState(eng, rangeID) if err != nil { return err } newHS := raftpb.HardState{ Term: s.TruncatedState.Term, Commit: s.RaftAppliedIndex, } if !raft.IsEmptyHardState(oldHS) { if oldHS.Commit > newHS.Commit { newHS.Commit = oldHS.Commit } if oldHS.Term > newHS.Term { newHS.Term = oldHS.Term } newHS.Vote = oldHS.Vote } return setHardState(eng, rangeID, newHS) }
// writeInitialState bootstraps a new Raft group (i.e. it is called when we // bootstrap a Range, or when setting up the right hand side of a split). // Its main task is to persist a consistent Raft (and associated Replica) state // which does not start from zero but presupposes a few entries already having // applied. // The supplied MVCCStats are used for the Stats field after adjusting for // persisting the state itself, and the updated stats are returned. func writeInitialState( eng engine.ReadWriter, ms enginepb.MVCCStats, desc roachpb.RangeDescriptor, ) (enginepb.MVCCStats, error) { rangeID := desc.RangeID var s storagebase.ReplicaState s.TruncatedState = &roachpb.RaftTruncatedState{ Term: raftInitialLogTerm, Index: raftInitialLogIndex, } s.RaftAppliedIndex = s.TruncatedState.Index s.Desc = &roachpb.RangeDescriptor{ RangeID: rangeID, } s.Stats = ms newMS, err := saveState(eng, s) if err != nil { return enginepb.MVCCStats{}, err } // Load a potentially existing HardState as we may need to preserve // information about cast votes. For example, during a Split for which // another node's new right-hand side has contacted us before our left-hand // side called in here to create the group. oldHS, err := loadHardState(eng, rangeID) if err != nil { return enginepb.MVCCStats{}, err } newHS := raftpb.HardState{ Term: s.TruncatedState.Term, Commit: s.TruncatedState.Index, } if !raft.IsEmptyHardState(oldHS) { if oldHS.Commit > newHS.Commit { newHS.Commit = oldHS.Commit } if oldHS.Term > newHS.Term { newHS.Term = oldHS.Term } newHS.Vote = oldHS.Vote } if err := setHardState(eng, rangeID, newHS); err != nil { return enginepb.MVCCStats{}, err } if err := setLastIndex(eng, rangeID, s.TruncatedState.Index); err != nil { return enginepb.MVCCStats{}, err } return newMS, nil }
// InitialState implements the raft.Storage interface. func (r *Replica) InitialState() (raftpb.HardState, raftpb.ConfState, error) { var hs raftpb.HardState desc := r.Desc() found, err := engine.MVCCGetProto(r.store.Engine(), keys.RaftHardStateKey(desc.RangeID), roachpb.ZeroTimestamp, true, nil, &hs) if err != nil { return raftpb.HardState{}, raftpb.ConfState{}, err } initialized := r.isInitialized() if !found { // We don't have a saved HardState, so set up the defaults. if initialized { // Set the initial log term. hs.Term = raftInitialLogTerm hs.Commit = raftInitialLogIndex atomic.StoreUint64(&r.lastIndex, raftInitialLogIndex) } else { // This is a new range we are receiving from another node. Start // from zero so we will receive a snapshot. atomic.StoreUint64(&r.lastIndex, 0) } } else if initialized && hs.Commit == 0 { // Normally, when the commit index changes, raft gives us a new // commit index to persist, however, during initialization, which // occurs entirely in cockroach, raft has no knowledge of this. // By setting this to the initial log index, we avoid a panic in // raft caused by this inconsistency. hs.Commit = raftInitialLogIndex } var cs raftpb.ConfState // For uninitalized ranges, membership is unknown at this point. if found || initialized { for _, rep := range desc.Replicas { cs.Nodes = append(cs.Nodes, uint64(rep.ReplicaID)) } } return hs, cs, nil }
func (n *Node) readWAL(ctx context.Context, snapshot *raftpb.Snapshot, forceNewCluster bool) (err error) { var ( walsnap walpb.Snapshot metadata []byte st raftpb.HardState ents []raftpb.Entry ) if snapshot != nil { walsnap.Index = snapshot.Metadata.Index walsnap.Term = snapshot.Metadata.Term } repaired := false for { if n.wal, err = wal.Open(n.walDir(), walsnap); err != nil { return fmt.Errorf("open WAL error: %v", err) } if metadata, st, ents, err = n.wal.ReadAll(); err != nil { if err := n.wal.Close(); err != nil { return err } // we can only repair ErrUnexpectedEOF and we never repair twice. if repaired || err != io.ErrUnexpectedEOF { return fmt.Errorf("read WAL error (%v) and cannot be repaired", err) } if !wal.Repair(n.walDir()) { return fmt.Errorf("WAL error (%v) cannot be repaired", err) } log.G(ctx).Infof("repaired WAL error (%v)", err) repaired = true continue } break } defer func() { if err != nil { if walErr := n.wal.Close(); walErr != nil { n.Config.Logger.Errorf("error closing raft WAL: %v", walErr) } } }() var raftNode api.RaftMember if err := raftNode.Unmarshal(metadata); err != nil { return fmt.Errorf("error unmarshalling WAL metadata: %v", err) } n.Config.ID = raftNode.RaftID // All members that are no longer part of the cluster must be added to // the removed list right away, so that we don't try to connect to them // before processing the configuration change entries, which could make // us get stuck. for _, ent := range ents { if ent.Index <= st.Commit && ent.Type == raftpb.EntryConfChange { var cc raftpb.ConfChange if err := cc.Unmarshal(ent.Data); err != nil { return fmt.Errorf("error unmarshalling config change: %v", err) } if cc.Type == raftpb.ConfChangeRemoveNode { n.cluster.RemoveMember(cc.NodeID) } } } if forceNewCluster { // discard the previously uncommitted entries for i, ent := range ents { if ent.Index > st.Commit { log.G(context.Background()).Infof("discarding %d uncommitted WAL entries ", len(ents)-i) ents = ents[:i] break } } // force append the configuration change entries toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(n.Config.ID), st.Term, st.Commit) // All members that are being removed as part of the // force-new-cluster process must be added to the // removed list right away, so that we don't try to // connect to them before processing the configuration // change entries, which could make us get stuck. for _, ccEnt := range toAppEnts { if ccEnt.Type == raftpb.EntryConfChange { var cc raftpb.ConfChange if err := cc.Unmarshal(ccEnt.Data); err != nil { return fmt.Errorf("error unmarshalling force-new-cluster config change: %v", err) } if cc.Type == raftpb.ConfChangeRemoveNode { n.cluster.RemoveMember(cc.NodeID) } } } ents = append(ents, toAppEnts...) // force commit newly appended entries err := n.wal.Save(st, toAppEnts) if err != nil { log.G(context.Background()).Fatalf("%v", err) } if len(toAppEnts) != 0 { st.Commit = toAppEnts[len(toAppEnts)-1].Index } } if snapshot != nil { if err := n.raftStore.ApplySnapshot(*snapshot); err != nil { return err } } if err := n.raftStore.SetHardState(st); err != nil { return err } if err := n.raftStore.Append(ents); err != nil { return err } return nil }
func (n *Node) readWAL(ctx context.Context, snapshot *raftpb.Snapshot, forceNewCluster bool) (err error) { var ( walsnap walpb.Snapshot metadata []byte st raftpb.HardState ents []raftpb.Entry ) if snapshot != nil { walsnap.Index = snapshot.Metadata.Index walsnap.Term = snapshot.Metadata.Term } repaired := false for { if n.wal, err = wal.Open(n.walDir(), walsnap); err != nil { return fmt.Errorf("open wal error: %v", err) } if metadata, st, ents, err = n.wal.ReadAll(); err != nil { if err := n.wal.Close(); err != nil { return err } // we can only repair ErrUnexpectedEOF and we never repair twice. if repaired || err != io.ErrUnexpectedEOF { return fmt.Errorf("read wal error (%v) and cannot be repaired", err) } if !wal.Repair(n.walDir()) { return fmt.Errorf("WAL error (%v) cannot be repaired", err) } log.G(ctx).Infof("repaired WAL error (%v)", err) repaired = true continue } break } defer func() { if err != nil { if walErr := n.wal.Close(); walErr != nil { n.Config.Logger.Errorf("error closing raft WAL: %v", walErr) } } }() var raftNode api.RaftMember if err := raftNode.Unmarshal(metadata); err != nil { return fmt.Errorf("error unmarshalling wal metadata: %v", err) } n.Config.ID = raftNode.RaftID if forceNewCluster { // discard the previously uncommitted entries for i, ent := range ents { if ent.Index > st.Commit { log.G(context.Background()).Infof("discarding %d uncommitted WAL entries ", len(ents)-i) ents = ents[:i] break } } // force append the configuration change entries toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(n.Config.ID), st.Term, st.Commit) ents = append(ents, toAppEnts...) // force commit newly appended entries err := n.wal.Save(st, toAppEnts) if err != nil { log.G(context.Background()).Fatalf("%v", err) } if len(toAppEnts) != 0 { st.Commit = toAppEnts[len(toAppEnts)-1].Index } } if snapshot != nil { if err := n.raftStore.ApplySnapshot(*snapshot); err != nil { return err } } if err := n.raftStore.SetHardState(st); err != nil { return err } if err := n.raftStore.Append(ents); err != nil { return err } return nil }