func NewBallotAccumulator(txnId *common.TxnId, txn *msgs.Txn) *BallotAccumulator { actions := txn.Actions() ba := &BallotAccumulator{ Txn: txn, txnId: txnId, vUUIdToBallots: make(map[common.VarUUId]*varBallot), outcome: nil, incompleteVars: actions.Len(), dirty: false, } vBallots := make([]varBallot, ba.incompleteVars) for idx := 0; idx < ba.incompleteVars; idx++ { action := actions.At(idx) vUUId := common.MakeVarUUId(action.VarId()) vBallot := &vBallots[idx] vBallot.vUUId = vUUId ba.vUUIdToBallots[*vUUId] = vBallot } allocs := txn.Allocations() for idx, l := 0, allocs.Len(); idx < l; idx++ { alloc := allocs.At(idx) if alloc.Active() == 0 { break } indices := alloc.ActionIndices() for idy, m := 0, indices.Len(); idy < m; idy++ { vBallots[int(indices.At(idy))].voters++ } } return ba }
func AllocForRMId(txn *msgs.Txn, rmId common.RMId) *msgs.Allocation { allocs := txn.Allocations() for idx, l := 0, allocs.Len(); idx < l; idx++ { alloc := allocs.At(idx) if common.RMId(alloc.RmId()) == rmId { return &alloc } } return nil }
func MakeAbortBallots(txn *msgs.Txn, alloc *msgs.Allocation) []*eng.Ballot { actions := txn.Actions() actionIndices := alloc.ActionIndices() ballots := make([]*eng.Ballot, actionIndices.Len()) for idx, l := 0, actionIndices.Len(); idx < l; idx++ { action := actions.At(int(actionIndices.At(idx))) vUUId := common.MakeVarUUId(action.VarId()) ballots[idx] = eng.NewBallot(vUUId, eng.AbortDeadlock, nil) } return ballots }
func GetAcceptorsFromTxn(txnCap *msgs.Txn) common.RMIds { fInc := int(txnCap.FInc()) twoFInc := fInc + fInc - 1 acceptors := make([]common.RMId, twoFInc) allocations := txnCap.Allocations() idx := 0 for l := allocations.Len(); idx < l && idx < twoFInc; idx++ { alloc := allocations.At(idx) acceptors[idx] = common.RMId(alloc.RmId()) } // Danger! For the initial topology txns, there are _not_ twoFInc acceptors return acceptors[:idx] }
func TxnFromCap(exe *dispatcher.Executor, vd *VarDispatcher, stateChange TxnLocalStateChange, ourRMId common.RMId, txnCap *msgs.Txn) *Txn { txnId := common.MakeTxnId(txnCap.Id()) actions := txnCap.Actions() txn := &Txn{ Id: txnId, Retry: txnCap.Retry(), writes: make([]*common.VarUUId, 0, actions.Len()), TxnCap: txnCap, exe: exe, vd: vd, stateChange: stateChange, } allocations := txnCap.Allocations() for idx, l := 0, allocations.Len(); idx < l; idx++ { alloc := allocations.At(idx) rmId := common.RMId(alloc.RmId()) if ourRMId == rmId { txn.populate(alloc.ActionIndices(), actions) break } } return txn }
func NewProposer(pm *ProposerManager, txnId *common.TxnId, txnCap *msgs.Txn, mode ProposerMode, topology *configuration.Topology) *Proposer { p := &Proposer{ proposerManager: pm, mode: mode, txnId: txnId, acceptors: GetAcceptorsFromTxn(txnCap), topology: topology, fInc: int(txnCap.FInc()), } if mode == ProposerActiveVoter { p.txn = eng.TxnFromCap(pm.Exe, pm.VarDispatcher, p, pm.RMId, txnCap) } p.init() return p }
func ImmigrationTxnFromCap(exe *dispatcher.Executor, vd *VarDispatcher, stateChange TxnLocalStateChange, ourRMId common.RMId, txnCap *msgs.Txn, varCaps *msgs.Var_List) { txn := TxnFromCap(exe, vd, stateChange, ourRMId, txnCap) txnActions := txnCap.Actions() txn.localActions = make([]localAction, varCaps.Len()) actionsMap := make(map[common.VarUUId]*localAction) for idx, l := 0, varCaps.Len(); idx < l; idx++ { varCap := varCaps.At(idx) action := &txn.localActions[idx] action.Txn = txn action.vUUId = common.MakeVarUUId(varCap.Id()) action.writeTxnActions = &txnActions positions := varCap.Positions() action.createPositions = (*common.Positions)(&positions) action.outcomeClock = VectorClockFromCap(varCap.WriteTxnClock()) action.writesClock = VectorClockFromCap(varCap.WritesClock()) actionsMap[*action.vUUId] = action } for idx, l := 0, txnActions.Len(); idx < l; idx++ { actionCap := txnActions.At(idx) vUUId := common.MakeVarUUId(actionCap.VarId()) if action, found := actionsMap[*vUUId]; found { action.writeAction = &actionCap } } txn.Start(false) txn.nextState() for idx := range txn.localActions { action := &txn.localActions[idx] f := func(v *Var) { if v == nil { panic(fmt.Sprintf("%v immigration error: %v unable to create var!", txn.Id, action.vUUId)) } else { v.ReceiveTxnOutcome(action) } } vd.ApplyToVar(f, true, action.vUUId) } }
func NewProposal(pm *ProposerManager, txnId *common.TxnId, txn *msgs.Txn, fInc int, ballots []*eng.Ballot, instanceRMId common.RMId, acceptors []common.RMId, skipPhase1 bool) *proposal { allocs := txn.Allocations() activeRMIds := make(map[common.RMId]uint32, allocs.Len()) for idx, l := 0, allocs.Len(); idx < l; idx++ { alloc := allocs.At(idx) bootCount := alloc.Active() if bootCount == 0 { break } rmId := common.RMId(alloc.RmId()) activeRMIds[rmId] = bootCount } p := &proposal{ proposerManager: pm, instanceRMId: instanceRMId, acceptors: acceptors, activeRMIds: activeRMIds, fInc: fInc, txn: txn, txnId: txnId, submitter: common.RMId(txn.Submitter()), submitterBootCount: txn.SubmitterBootCount(), skipPhase1: skipPhase1, instances: make(map[common.VarUUId]*proposalInstance, len(ballots)), pending: make([]*proposalInstance, 0, len(ballots)), finished: false, } for _, ballot := range ballots { pi := newProposalInstance(p, ballot) p.instances[*ballot.VarUUId] = pi pi.init() pi.start() } return p }
func (sts *SimpleTxnSubmitter) SubmitTransaction(txnCap *msgs.Txn, activeRMs []common.RMId, continuation TxnCompletionConsumer, delay time.Duration) { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) msg.SetTxnSubmission(*txnCap) txnId := common.MakeTxnId(txnCap.Id()) server.Log(txnId, "Submitting txn") txnSender := paxos.NewRepeatingSender(server.SegToBytes(seg), activeRMs...) var removeSenderCh chan server.EmptyStruct if delay == 0 { sts.connPub.AddServerConnectionSubscriber(txnSender) } else { removeSenderCh = make(chan server.EmptyStruct) go func() { // fmt.Printf("%v ", delay) time.Sleep(delay) sts.connPub.AddServerConnectionSubscriber(txnSender) <-removeSenderCh sts.connPub.RemoveServerConnectionSubscriber(txnSender) }() } acceptors := paxos.GetAcceptorsFromTxn(txnCap) shutdownFun := func(shutdown bool) { delete(sts.outcomeConsumers, *txnId) // fmt.Printf("sts%v ", len(sts.outcomeConsumers)) if delay == 0 { sts.connPub.RemoveServerConnectionSubscriber(txnSender) } else { close(removeSenderCh) } // OSS is safe here - see above. paxos.NewOneShotSender(paxos.MakeTxnSubmissionCompleteMsg(txnId), sts.connPub, acceptors...) if shutdown { if txnCap.Retry() { // If this msg doesn't make it then proposers should // observe our death and tidy up anyway. If it's just this // connection shutting down then there should be no // problem with these msgs getting to the propposers. paxos.NewOneShotSender(paxos.MakeTxnSubmissionAbortMsg(txnId), sts.connPub, activeRMs...) } continuation(txnId, nil, nil) } } shutdownFunPtr := &shutdownFun sts.onShutdown[shutdownFunPtr] = server.EmptyStructVal outcomeAccumulator := paxos.NewOutcomeAccumulator(int(txnCap.FInc()), acceptors) consumer := func(sender common.RMId, txnId *common.TxnId, outcome *msgs.Outcome) { if outcome, _ = outcomeAccumulator.BallotOutcomeReceived(sender, outcome); outcome != nil { delete(sts.onShutdown, shutdownFunPtr) shutdownFun(false) continuation(txnId, outcome, nil) } } sts.outcomeConsumers[*txnId] = consumer // fmt.Printf("sts%v ", len(sts.outcomeConsumers)) }
func (pm *ProposerManager) TxnReceived(sender common.RMId, txnId *common.TxnId, txnCap *msgs.Txn) { // Due to failures, we can actually receive outcomes (2Bs) first, // before we get the txn to vote on it - due to failures, other // proposers will have created abort proposals, and consensus may // have already been reached. If this is the case, it is correct to // ignore this message. if _, found := pm.proposers[*txnId]; !found { server.Log(txnId, "Received") accept := true if pm.topology != nil { accept = (pm.topology.Next() == nil && pm.topology.Version == txnCap.TopologyVersion()) || // Could also do pm.topology.BarrierReached1(sender), but // would need to specialise that to rolls rather than // topology txns, and it's enforced on the sending side // anyway. One the sender has received the next topology, // it'll do the right thing and locally block until it's // in barrier1. (pm.topology.Next() != nil && pm.topology.Next().Version == txnCap.TopologyVersion()) if accept { _, found := pm.topology.RMsRemoved()[sender] accept = !found } } if accept { proposer := NewProposer(pm, txnId, txnCap, ProposerActiveVoter, pm.topology) pm.proposers[*txnId] = proposer proposer.Start() } else { server.Log(txnId, "Aborting received txn due to non-matching topology.", txnCap.TopologyVersion()) acceptors := GetAcceptorsFromTxn(txnCap) fInc := int(txnCap.FInc()) alloc := AllocForRMId(txnCap, pm.RMId) ballots := MakeAbortBallots(txnCap, alloc) pm.NewPaxosProposals(txnId, txnCap, fInc, ballots, acceptors, pm.RMId, true) // ActiveLearner is right - we don't want the proposer to // vote, but it should exist to collect the 2Bs that should // come back. proposer := NewProposer(pm, txnId, txnCap, ProposerActiveLearner, pm.topology) pm.proposers[*txnId] = proposer proposer.Start() } } }
func (pd *ProposerDispatcher) TxnReceived(sender common.RMId, txn *msgs.Txn) { txnId := common.MakeTxnId(txn.Id()) pd.withProposerManager(txnId, func(pm *ProposerManager) { pm.TxnReceived(sender, txnId, txn) }) }
func isDeflated(txn *msgs.Txn) bool { actions := txn.Actions() return actions.Len() != 0 && actions.At(0).Which() == msgs.ACTION_MISSING }
func deflateTxn(txn *msgs.Txn, seg *capn.Segment) *msgs.Txn { if isDeflated(txn) { return txn } deflatedTxn := msgs.NewTxn(seg) deflatedTxn.SetId(txn.Id()) deflatedTxn.SetRetry(txn.Retry()) deflatedTxn.SetSubmitter(txn.Submitter()) deflatedTxn.SetSubmitterBootCount(txn.SubmitterBootCount()) deflatedTxn.SetFInc(txn.FInc()) deflatedTxn.SetTopologyVersion(txn.TopologyVersion()) deflatedTxn.SetAllocations(txn.Allocations()) actionsList := txn.Actions() deflatedActionsList := msgs.NewActionList(seg, actionsList.Len()) deflatedTxn.SetActions(deflatedActionsList) for idx, l := 0, actionsList.Len(); idx < l; idx++ { deflatedAction := deflatedActionsList.At(idx) deflatedAction.SetVarId(actionsList.At(idx).VarId()) deflatedAction.SetMissing() } return &deflatedTxn }
func TxnToRootBytes(txn *msgs.Txn) []byte { seg := capn.NewBuffer(nil) txnCap := msgs.NewRootTxn(seg) txnCap.SetId(txn.Id()) txnCap.SetRetry(txn.Retry()) txnCap.SetSubmitter(txn.Submitter()) txnCap.SetSubmitterBootCount(txn.SubmitterBootCount()) txnCap.SetActions(txn.Actions()) txnCap.SetAllocations(txn.Allocations()) txnCap.SetFInc(txn.FInc()) txnCap.SetTopologyVersion(txn.TopologyVersion()) return server.SegToBytes(seg) }