func (p *proposal) maybeSendOneA() { pendingPromises := p.pending[:0] for _, pi := range p.instances { if pi.currentState == &pi.proposalOneA { pendingPromises = append(pendingPromises, pi) } } if len(pendingPromises) == 0 { return } seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) sender := newProposalSender(p, pendingPromises) oneACap := msgs.NewOneATxnVotes(seg) msg.SetOneATxnVotes(oneACap) oneACap.SetTxnId(p.txnId[:]) oneACap.SetRmId(uint32(p.instanceRMId)) proposals := msgs.NewTxnVoteProposalList(seg, len(pendingPromises)) oneACap.SetProposals(proposals) for idx, pi := range pendingPromises { proposal := proposals.At(idx) pi.addOneAToProposal(&proposal, sender) } sender.msg = server.SegToBytes(seg) server.Log(p.txnId, "Adding sender for 1A") p.proposerManager.AddServerConnectionSubscriber(sender) }
func (cr *connectionRun) serverError(err error) error { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) msg.SetConnectionError(err.Error()) cr.sendMessage(server.SegToBytes(seg)) return err }
func (p *proposal) maybeSendTwoA() { pendingAccepts := p.pending[:0] for _, pi := range p.instances { if pi.currentState == &pi.proposalTwoA { pendingAccepts = append(pendingAccepts, pi) } } if len(pendingAccepts) == 0 { return } seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) sender := newProposalSender(p, pendingAccepts) twoACap := msgs.NewTwoATxnVotes(seg) msg.SetTwoATxnVotes(twoACap) twoACap.SetRmId(uint32(p.instanceRMId)) acceptRequests := msgs.NewTxnVoteAcceptRequestList(seg, len(pendingAccepts)) twoACap.SetAcceptRequests(acceptRequests) deflate := false for idx, pi := range pendingAccepts { acceptRequest := acceptRequests.At(idx) deflate = pi.addTwoAToAcceptRequest(seg, &acceptRequest, sender) || deflate } if deflate { deflated := deflateTxn(p.txn, seg) twoACap.SetTxn(*deflated) } else { twoACap.SetTxn(*p.txn) } sender.msg = server.SegToBytes(seg) server.Log(p.txnId, "Adding sender for 2A") p.proposerManager.AddServerConnectionSubscriber(sender) }
func (am *AcceptorManager) OneATxnVotesReceived(sender common.RMId, txnId *common.TxnId, oneATxnVotes *msgs.OneATxnVotes) { instanceRMId := common.RMId(oneATxnVotes.RmId()) server.Log(txnId, "1A received from", sender, "; instance:", instanceRMId) instId := instanceId([instanceIdLen]byte{}) instIdSlice := instId[:] copy(instIdSlice, txnId[:]) binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], uint32(instanceRMId)) replySeg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(replySeg) oneBTxnVotes := msgs.NewOneBTxnVotes(replySeg) msg.SetOneBTxnVotes(oneBTxnVotes) oneBTxnVotes.SetRmId(oneATxnVotes.RmId()) oneBTxnVotes.SetTxnId(oneATxnVotes.TxnId()) proposals := oneATxnVotes.Proposals() promises := msgs.NewTxnVotePromiseList(replySeg, proposals.Len()) oneBTxnVotes.SetPromises(promises) for idx, l := 0, proposals.Len(); idx < l; idx++ { proposal := proposals.At(idx) vUUId := common.MakeVarUUId(proposal.VarId()) copy(instIdSlice[common.KeyLen+4:], vUUId[:]) promise := promises.At(idx) promise.SetVarId(vUUId[:]) am.ensureInstance(txnId, &instId, vUUId).OneATxnVotesReceived(&proposal, &promise) } // The proposal senders are repeating, so this use of OSS is fine. NewOneShotSender(server.SegToBytes(replySeg), am, sender) }
func MakeTxnSubmissionAbortMsg(txnId *common.TxnId) []byte { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) tsa := msgs.NewTxnSubmissionAbort(seg) msg.SetSubmissionAbort(tsa) tsa.SetTxnId(txnId[:]) return server.SegToBytes(seg) }
func MakeTxnSubmissionCompleteMsg(txnId *common.TxnId) []byte { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) tsc := msgs.NewTxnSubmissionComplete(seg) msg.SetSubmissionComplete(tsc) tsc.SetTxnId(txnId[:]) return server.SegToBytes(seg) }
func MakeTxnLocallyCompleteMsg(txnId *common.TxnId) []byte { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) tlc := msgs.NewTxnLocallyComplete(seg) msg.SetTxnLocallyComplete(tlc) tlc.SetTxnId(txnId[:]) return server.SegToBytes(seg) }
func (sts *SimpleTxnSubmitter) SubmitTransaction(txnCap *msgs.Txn, activeRMs []common.RMId, continuation TxnCompletionConsumer, delay time.Duration) { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) msg.SetTxnSubmission(*txnCap) txnId := common.MakeTxnId(txnCap.Id()) server.Log(txnId, "Submitting txn") txnSender := paxos.NewRepeatingSender(server.SegToBytes(seg), activeRMs...) var removeSenderCh chan server.EmptyStruct if delay == 0 { sts.connPub.AddServerConnectionSubscriber(txnSender) } else { removeSenderCh = make(chan server.EmptyStruct) go func() { // fmt.Printf("%v ", delay) time.Sleep(delay) sts.connPub.AddServerConnectionSubscriber(txnSender) <-removeSenderCh sts.connPub.RemoveServerConnectionSubscriber(txnSender) }() } acceptors := paxos.GetAcceptorsFromTxn(txnCap) shutdownFun := func(shutdown bool) { delete(sts.outcomeConsumers, *txnId) // fmt.Printf("sts%v ", len(sts.outcomeConsumers)) if delay == 0 { sts.connPub.RemoveServerConnectionSubscriber(txnSender) } else { close(removeSenderCh) } // OSS is safe here - see above. paxos.NewOneShotSender(paxos.MakeTxnSubmissionCompleteMsg(txnId), sts.connPub, acceptors...) if shutdown { if txnCap.Retry() { // If this msg doesn't make it then proposers should // observe our death and tidy up anyway. If it's just this // connection shutting down then there should be no // problem with these msgs getting to the propposers. paxos.NewOneShotSender(paxos.MakeTxnSubmissionAbortMsg(txnId), sts.connPub, activeRMs...) } continuation(txnId, nil, nil) } } shutdownFunPtr := &shutdownFun sts.onShutdown[shutdownFunPtr] = server.EmptyStructVal outcomeAccumulator := paxos.NewOutcomeAccumulator(int(txnCap.FInc()), acceptors) consumer := func(sender common.RMId, txnId *common.TxnId, outcome *msgs.Outcome) { if outcome, _ = outcomeAccumulator.BallotOutcomeReceived(sender, outcome); outcome != nil { delete(sts.onShutdown, shutdownFunPtr) shutdownFun(false) continuation(txnId, outcome, nil) } } sts.outcomeConsumers[*txnId] = consumer // fmt.Printf("sts%v ", len(sts.outcomeConsumers)) }
func (am *AcceptorManager) TwoATxnVotesReceived(sender common.RMId, txnId *common.TxnId, twoATxnVotes *msgs.TwoATxnVotes) { instanceRMId := common.RMId(twoATxnVotes.RmId()) server.Log(txnId, "2A received from", sender, "; instance:", instanceRMId) instId := instanceId([instanceIdLen]byte{}) instIdSlice := instId[:] copy(instIdSlice, txnId[:]) binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], uint32(instanceRMId)) txnCap := twoATxnVotes.Txn() a := am.ensureAcceptor(txnId, &txnCap) requests := twoATxnVotes.AcceptRequests() failureInstances := make([]*instance, 0, requests.Len()) failureRequests := make([]*msgs.TxnVoteAcceptRequest, 0, requests.Len()) for idx, l := 0, requests.Len(); idx < l; idx++ { request := requests.At(idx) vUUId := common.MakeVarUUId(request.Ballot().VarId()) copy(instIdSlice[common.KeyLen+4:], vUUId[:]) inst := am.ensureInstance(txnId, &instId, vUUId) accepted, rejected := inst.TwoATxnVotesReceived(&request) if accepted { a.BallotAccepted(instanceRMId, inst, vUUId, &txnCap) } else if rejected { failureInstances = append(failureInstances, inst) failureRequests = append(failureRequests, &request) } } if len(failureInstances) != 0 { replySeg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(replySeg) twoBTxnVotes := msgs.NewTwoBTxnVotes(replySeg) msg.SetTwoBTxnVotes(twoBTxnVotes) twoBTxnVotes.SetFailures() failuresCap := twoBTxnVotes.Failures() failuresCap.SetTxnId(txnId[:]) failuresCap.SetRmId(uint32(instanceRMId)) nacks := msgs.NewTxnVoteTwoBFailureList(replySeg, len(failureInstances)) failuresCap.SetNacks(nacks) for idx, inst := range failureInstances { failure := nacks.At(idx) failure.SetVarId(inst.vUUId[:]) failure.SetRoundNumber(failureRequests[idx].RoundNumber()) failure.SetRoundNumberTooLow(uint32(inst.promiseNum >> 32)) } server.Log(txnId, "Sending 2B failures to", sender, "; instance:", instanceRMId) // The proposal senders are repeating, so this use of OSS is fine. NewOneShotSender(server.SegToBytes(replySeg), am, sender) } }
func (cr *connectionRun) start() (bool, error) { log.Printf("Connection established to %v (%v)\n", cr.remoteHost, cr.remoteRMId) cr.restart = true seg := capn.NewBuffer(nil) if cr.isClient { message := cmsgs.NewRootClientMessage(seg) message.SetHeartbeat() } else { message := msgs.NewRootMessage(seg) message.SetHeartbeat() } cr.beatBytes = server.SegToBytes(seg) if cr.isServer { cr.connectionManager.ServerEstablished(cr.Connection, cr.remoteHost, cr.remoteRMId, cr.remoteBootCount, cr.combinedTieBreak, cr.remoteRootId) } if cr.isClient { servers := cr.connectionManager.ClientEstablished(cr.ConnectionNumber, cr.Connection) cr.submitter = client.NewClientTxnSubmitter(cr.connectionManager.RMId, cr.connectionManager.BootCount, cr.connectionManager) cr.submitter.TopologyChanged(cr.topology) cr.submitter.ServerConnectionsChanged(servers) } cr.mustSendBeat = true cr.missingBeats = 0 cr.beater = newConnectionBeater(cr.Connection) go cr.beater.beat() cr.reader = newConnectionReader(cr.Connection) if cr.isClient { go cr.reader.readClient() } else { go cr.reader.readServer() } return false, nil }
func (am *AcceptorManager) TxnLocallyCompleteReceived(sender common.RMId, txnId *common.TxnId, tlc *msgs.TxnLocallyComplete) { if aInst, found := am.acceptors[*txnId]; found && aInst.acceptor != nil { server.Log(txnId, "TLC received from", sender, "(acceptor found)") aInst.acceptor.TxnLocallyCompleteReceived(sender) } else { // We must have deleted the acceptor state from disk, // immediately prior to sending TGC, and then died. Now we're // back up, the proposers have sent us more TLCs, and we should // just reply with TGCs. server.Log(txnId, "TLC received from", sender, "(acceptor not found)") seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) tgc := msgs.NewTxnGloballyComplete(seg) msg.SetTxnGloballyComplete(tgc) tgc.SetTxnId(txnId[:]) server.Log(txnId, "Sending single TGC to", sender) // Use of OSS here is ok because this is the default action on // not finding state. NewOneShotSender(server.SegToBytes(seg), am, sender) } }