Пример #1
0
func deflateTxn(txn *msgs.Txn, seg *capn.Segment) *msgs.Txn {
	if isDeflated(txn) {
		return txn
	}
	deflatedTxn := msgs.NewTxn(seg)
	deflatedTxn.SetId(txn.Id())
	deflatedTxn.SetRetry(txn.Retry())
	deflatedTxn.SetSubmitter(txn.Submitter())
	deflatedTxn.SetSubmitterBootCount(txn.SubmitterBootCount())
	deflatedTxn.SetFInc(txn.FInc())
	deflatedTxn.SetTopologyVersion(txn.TopologyVersion())

	deflatedTxn.SetAllocations(txn.Allocations())

	actionsList := txn.Actions()
	deflatedActionsList := msgs.NewActionList(seg, actionsList.Len())
	deflatedTxn.SetActions(deflatedActionsList)
	for idx, l := 0, actionsList.Len(); idx < l; idx++ {
		deflatedAction := deflatedActionsList.At(idx)
		deflatedAction.SetVarId(actionsList.At(idx).VarId())
		deflatedAction.SetMissing()
	}

	return &deflatedTxn
}
Пример #2
0
func (sts *SimpleTxnSubmitter) SubmitTransaction(txnCap *msgs.Txn, activeRMs []common.RMId, continuation TxnCompletionConsumer, delay time.Duration) {
	seg := capn.NewBuffer(nil)
	msg := msgs.NewRootMessage(seg)
	msg.SetTxnSubmission(*txnCap)

	txnId := common.MakeTxnId(txnCap.Id())
	server.Log(txnId, "Submitting txn")
	txnSender := paxos.NewRepeatingSender(server.SegToBytes(seg), activeRMs...)
	var removeSenderCh chan server.EmptyStruct
	if delay == 0 {
		sts.connPub.AddServerConnectionSubscriber(txnSender)
	} else {
		removeSenderCh = make(chan server.EmptyStruct)
		go func() {
			// fmt.Printf("%v ", delay)
			time.Sleep(delay)
			sts.connPub.AddServerConnectionSubscriber(txnSender)
			<-removeSenderCh
			sts.connPub.RemoveServerConnectionSubscriber(txnSender)
		}()
	}
	acceptors := paxos.GetAcceptorsFromTxn(txnCap)

	shutdownFun := func(shutdown bool) {
		delete(sts.outcomeConsumers, *txnId)
		// fmt.Printf("sts%v ", len(sts.outcomeConsumers))
		if delay == 0 {
			sts.connPub.RemoveServerConnectionSubscriber(txnSender)
		} else {
			close(removeSenderCh)
		}
		// OSS is safe here - see above.
		paxos.NewOneShotSender(paxos.MakeTxnSubmissionCompleteMsg(txnId), sts.connPub, acceptors...)
		if shutdown {
			if txnCap.Retry() {
				// If this msg doesn't make it then proposers should
				// observe our death and tidy up anyway. If it's just this
				// connection shutting down then there should be no
				// problem with these msgs getting to the propposers.
				paxos.NewOneShotSender(paxos.MakeTxnSubmissionAbortMsg(txnId), sts.connPub, activeRMs...)
			}
			continuation(txnId, nil, nil)
		}
	}
	shutdownFunPtr := &shutdownFun
	sts.onShutdown[shutdownFunPtr] = server.EmptyStructVal

	outcomeAccumulator := paxos.NewOutcomeAccumulator(int(txnCap.FInc()), acceptors)
	consumer := func(sender common.RMId, txnId *common.TxnId, outcome *msgs.Outcome) {
		if outcome, _ = outcomeAccumulator.BallotOutcomeReceived(sender, outcome); outcome != nil {
			delete(sts.onShutdown, shutdownFunPtr)
			shutdownFun(false)
			continuation(txnId, outcome, nil)
		}
	}
	sts.outcomeConsumers[*txnId] = consumer
	// fmt.Printf("sts%v ", len(sts.outcomeConsumers))
}
Пример #3
0
func GetAcceptorsFromTxn(txnCap *msgs.Txn) common.RMIds {
	fInc := int(txnCap.FInc())
	twoFInc := fInc + fInc - 1
	acceptors := make([]common.RMId, twoFInc)
	allocations := txnCap.Allocations()
	idx := 0
	for l := allocations.Len(); idx < l && idx < twoFInc; idx++ {
		alloc := allocations.At(idx)
		acceptors[idx] = common.RMId(alloc.RmId())
	}
	// Danger! For the initial topology txns, there are _not_ twoFInc acceptors
	return acceptors[:idx]
}
Пример #4
0
func TxnToRootBytes(txn *msgs.Txn) []byte {
	seg := capn.NewBuffer(nil)
	txnCap := msgs.NewRootTxn(seg)
	txnCap.SetId(txn.Id())
	txnCap.SetRetry(txn.Retry())
	txnCap.SetSubmitter(txn.Submitter())
	txnCap.SetSubmitterBootCount(txn.SubmitterBootCount())
	txnCap.SetActions(txn.Actions())
	txnCap.SetAllocations(txn.Allocations())
	txnCap.SetFInc(txn.FInc())
	txnCap.SetTopologyVersion(txn.TopologyVersion())

	return server.SegToBytes(seg)
}
Пример #5
0
func NewProposer(pm *ProposerManager, txnId *common.TxnId, txnCap *msgs.Txn, mode ProposerMode, topology *configuration.Topology) *Proposer {
	p := &Proposer{
		proposerManager: pm,
		mode:            mode,
		txnId:           txnId,
		acceptors:       GetAcceptorsFromTxn(txnCap),
		topology:        topology,
		fInc:            int(txnCap.FInc()),
	}
	if mode == ProposerActiveVoter {
		p.txn = eng.TxnFromCap(pm.Exe, pm.VarDispatcher, p, pm.RMId, txnCap)
	}
	p.init()
	return p
}
Пример #6
0
func (pm *ProposerManager) TxnReceived(sender common.RMId, txnId *common.TxnId, txnCap *msgs.Txn) {
	// Due to failures, we can actually receive outcomes (2Bs) first,
	// before we get the txn to vote on it - due to failures, other
	// proposers will have created abort proposals, and consensus may
	// have already been reached. If this is the case, it is correct to
	// ignore this message.
	if _, found := pm.proposers[*txnId]; !found {
		server.Log(txnId, "Received")
		accept := true
		if pm.topology != nil {
			accept = (pm.topology.Next() == nil && pm.topology.Version == txnCap.TopologyVersion()) ||
				// Could also do pm.topology.BarrierReached1(sender), but
				// would need to specialise that to rolls rather than
				// topology txns, and it's enforced on the sending side
				// anyway. One the sender has received the next topology,
				// it'll do the right thing and locally block until it's
				// in barrier1.
				(pm.topology.Next() != nil && pm.topology.Next().Version == txnCap.TopologyVersion())
			if accept {
				_, found := pm.topology.RMsRemoved()[sender]
				accept = !found
			}
		}
		if accept {
			proposer := NewProposer(pm, txnId, txnCap, ProposerActiveVoter, pm.topology)
			pm.proposers[*txnId] = proposer
			proposer.Start()

		} else {
			server.Log(txnId, "Aborting received txn due to non-matching topology.", txnCap.TopologyVersion())
			acceptors := GetAcceptorsFromTxn(txnCap)
			fInc := int(txnCap.FInc())
			alloc := AllocForRMId(txnCap, pm.RMId)
			ballots := MakeAbortBallots(txnCap, alloc)
			pm.NewPaxosProposals(txnId, txnCap, fInc, ballots, acceptors, pm.RMId, true)
			// ActiveLearner is right - we don't want the proposer to
			// vote, but it should exist to collect the 2Bs that should
			// come back.
			proposer := NewProposer(pm, txnId, txnCap, ProposerActiveLearner, pm.topology)
			pm.proposers[*txnId] = proposer
			proposer.Start()
		}
	}
}