func newTwoBTxnVotesSender(outcome *msgs.Outcome, txnId *common.TxnId, submitter common.RMId, recipients ...common.RMId) *twoBTxnVotesSender { submitterSeg := capn.NewBuffer(nil) submitterMsg := msgs.NewRootMessage(submitterSeg) submitterMsg.SetSubmissionOutcome(*outcome) if outcome.Which() == msgs.OUTCOME_ABORT { abort := outcome.Abort() abort.SetResubmit() // nuke out the updates as proposers don't need them. } seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) twoB := msgs.NewTwoBTxnVotes(seg) msg.SetTwoBTxnVotes(twoB) twoB.SetOutcome(*outcome) server.Log(txnId, "Sending 2B to", recipients) return &twoBTxnVotesSender{ msg: server.SegToBytes(seg), recipients: recipients, submitterMsg: server.SegToBytes(submitterSeg), submitter: submitter, } }
func (cr *connectionRun) start() (bool, error) { log.Printf("Connection established to %v (%v)\n", cr.remoteHost, cr.remoteRMId) seg := capn.NewBuffer(nil) message := msgs.NewRootMessage(seg) message.SetHeartbeat() cr.beatBytes = server.SegToBytes(seg) if cr.isServer { cr.connectionManager.ServerEstablished(cr.Connection) } if cr.isClient { topology, servers := cr.connectionManager.ClientEstablished(cr.ConnectionNumber, cr.Connection) cr.connectionManager.AddSender(cr.Connection) cr.submitter = client.NewClientTxnSubmitter(cr.connectionManager.RMId, cr.connectionManager.BootCount, topology, cr.connectionManager) cr.submitter.TopologyChange(nil, servers) } cr.mustSendBeat = true cr.missingBeats = 0 cr.beater = newConnectionBeater(cr.Connection) go cr.beater.beat() cr.reader = newConnectionReader(cr.Connection) go cr.reader.read() return false, nil }
func (cach *connectionAwaitClientHandshake) start() (bool, error) { if seg, err := cach.readAndDecryptOne(); err == nil { hello := msgs.ReadRootHelloFromClient(seg) topology := cach.connectionManager.Topology() un := hello.Username() if pw, found := topology.Accounts[un]; !found { return false, fmt.Errorf("Unknown user '%s'", un) } else if err = bcrypt.CompareHashAndPassword([]byte(pw), hello.Password()); err != nil { return false, fmt.Errorf("Incorrect password for '%s': %v", un, err) } else { log.Printf("User '%s' authenticated", un) } helloFromServer := cach.makeHelloFromServer(topology) if err := cach.send(server.SegToBytes(helloFromServer)); err != nil { return cach.connectionAwaitHandshake.maybeRestartConnection(err) } cach.Lock() cach.established = true cach.remoteHost = cach.socket.RemoteAddr().String() cach.Unlock() cach.nextState(nil) return false, nil } else { return cach.connectionAwaitHandshake.maybeRestartConnection(err) } }
func (cash *connectionAwaitServerHandshake) start() (bool, error) { topology := cash.connectionManager.Topology() helloFromServer := cash.makeHelloFromServer(topology) if err := cash.send(server.SegToBytes(helloFromServer)); err != nil { return cash.connectionAwaitHandshake.maybeRestartConnection(err) } if seg, err := cash.readAndDecryptOne(); err == nil { hello := msgs.ReadRootHelloFromServer(seg) if verified, remoteTopology := cash.verifyTopology(topology, &hello); verified { cash.Lock() cash.established = true cash.remoteHost = hello.LocalHost() ns := hello.Namespace() cash.remoteBootCount = binary.BigEndian.Uint32(ns[4:8]) cash.remoteRMId = common.RMId(binary.BigEndian.Uint32(ns[8:12])) cash.combinedTieBreak = cash.combinedTieBreak ^ hello.TieBreak() cash.remoteTopology = remoteTopology cash.Unlock() cash.nextState(nil) return false, nil } else { return cash.connectionAwaitHandshake.maybeRestartConnection(fmt.Errorf("Unequal remote topology")) } } else { return cash.connectionAwaitHandshake.maybeRestartConnection(err) } }
func (cr *connectionRun) handleMsgFromPeer(msg *msgs.Message) error { if cr.currentState != cr { // probably just draining the queue from the reader after a restart return nil } cr.missingBeats = 0 switch which := msg.Which(); which { case msgs.MESSAGE_HEARTBEAT: // do nothing case msgs.MESSAGE_CLIENTTXNSUBMISSION: ctxn := msg.ClientTxnSubmission() origTxnId := common.MakeTxnId(ctxn.Id()) cr.submitter.SubmitClientTransaction(&ctxn, func(clientOutcome *msgs.ClientTxnOutcome, err error) { switch { case err != nil: cr.clientTxnError(&ctxn, err, origTxnId) case clientOutcome == nil: // shutdown return default: seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) msg.SetClientTxnOutcome(*clientOutcome) cr.sendMessage(server.SegToBytes(msg.Segment)) } }) default: cr.connectionManager.Dispatchers.DispatchMessage(cr.remoteRMId, which, msg) } return nil }
func (cr *connectionRun) serverError(err error) error { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) msg.SetConnectionError(err.Error()) cr.sendMessage(server.SegToBytes(seg)) return err }
func (palc *proposerAwaitLocallyComplete) maybeWriteToDisk() { if !(palc.currentState == palc && palc.callbackInvoked && palc.allAcceptorsAgreed) { return } stateSeg := capn.NewBuffer(nil) state := msgs.NewRootProposerState(stateSeg) acceptorsCap := stateSeg.NewUInt32List(len(palc.acceptors)) state.SetAcceptors(acceptorsCap) for idx, rmId := range palc.acceptors { acceptorsCap.Set(idx, uint32(rmId)) } data := server.SegToBytes(stateSeg) future := palc.proposerManager.Disk.ReadWriteTransaction(false, func(rwtxn *mdbs.RWTxn) (interface{}, error) { return nil, rwtxn.Put(db.DB.Proposers, palc.txnId[:], data, 0) }) go func() { if _, err := future.ResultError(); err != nil { log.Printf("Error: %v when writing proposer to disk: %v\n", palc.txnId, err) return } palc.proposerManager.Exe.Enqueue(palc.writeDone) }() }
func (cah *connectionAwaitHandshake) start() (bool, error) { helloSeg := cah.makeHello() if err := cah.send(server.SegToBytes(helloSeg)); err != nil { return cah.maybeRestartConnection(err) } if seg, err := cah.readOne(); err == nil { hello := cmsgs.ReadRootHello(seg) if cah.verifyHello(&hello) { if hello.IsClient() { cah.isClient = true cah.nextState(&cah.connectionAwaitClientHandshake) } else { cah.isServer = true cah.nextState(&cah.connectionAwaitServerHandshake) } return false, nil } else { return cah.maybeRestartConnection(fmt.Errorf("Received erroneous hello from peer")) } } else { return cah.maybeRestartConnection(err) } }
func (awtd *acceptorWriteToDisk) start() { outcome := awtd.outcome outcomeCap := (*msgs.Outcome)(outcome) awtd.sendToAll = awtd.sendToAll || outcomeCap.Which() == msgs.OUTCOME_COMMIT sendToAll := awtd.sendToAll stateSeg := capn.NewBuffer(nil) state := msgs.NewRootAcceptorState(stateSeg) state.SetTxn(*awtd.ballotAccumulator.Txn) state.SetOutcome(*outcomeCap) state.SetSendToAll(awtd.sendToAll) state.SetInstances(awtd.ballotAccumulator.AddInstancesToSeg(stateSeg)) data := server.SegToBytes(stateSeg) // to ensure correct order of writes, schedule the write from // the current go-routine... server.Log(awtd.txnId, "Writing 2B to disk...") future := awtd.acceptorManager.Disk.ReadWriteTransaction(false, func(rwtxn *mdbs.RWTxn) (interface{}, error) { return nil, rwtxn.Put(db.DB.BallotOutcomes, awtd.txnId[:], data, 0) }) go func() { // ... but process the result in a new go-routine to avoid blocking the executor. if _, err := future.ResultError(); err != nil { log.Printf("Error: %v Acceptor Write error: %v", awtd.txnId, err) return } server.Log(awtd.txnId, "Writing 2B to disk...done.") awtd.acceptorManager.Exe.Enqueue(func() { awtd.writeDone(outcome, sendToAll) }) }() }
func (cr *connectionRun) handleMsgFromClient(msg *cmsgs.ClientMessage) error { if cr.currentState != cr { // probably just draining the queue from the reader after a restart return nil } cr.missingBeats = 0 switch which := msg.Which(); which { case cmsgs.CLIENTMESSAGE_HEARTBEAT: // do nothing case cmsgs.CLIENTMESSAGE_CLIENTTXNSUBMISSION: ctxn := msg.ClientTxnSubmission() origTxnId := common.MakeTxnId(ctxn.Id()) cr.submitter.SubmitClientTransaction(&ctxn, func(clientOutcome *cmsgs.ClientTxnOutcome, err error) { switch { case err != nil: cr.clientTxnError(&ctxn, err, origTxnId) case clientOutcome == nil: // shutdown return default: seg := capn.NewBuffer(nil) msg := cmsgs.NewRootClientMessage(seg) msg.SetClientTxnOutcome(*clientOutcome) cr.sendMessage(server.SegToBytes(msg.Segment)) } }) default: return cr.maybeRestartConnection(fmt.Errorf("Unexpected message type received from client: %v", which)) } return nil }
func (am *AcceptorManager) OneATxnVotesReceived(sender common.RMId, txnId *common.TxnId, oneATxnVotes *msgs.OneATxnVotes) { instanceRMId := common.RMId(oneATxnVotes.RmId()) server.Log(txnId, "1A received from", sender, "; instance:", instanceRMId) instId := instanceId([instanceIdLen]byte{}) instIdSlice := instId[:] copy(instIdSlice, txnId[:]) binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], uint32(instanceRMId)) replySeg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(replySeg) oneBTxnVotes := msgs.NewOneBTxnVotes(replySeg) msg.SetOneBTxnVotes(oneBTxnVotes) oneBTxnVotes.SetRmId(oneATxnVotes.RmId()) oneBTxnVotes.SetTxnId(oneATxnVotes.TxnId()) proposals := oneATxnVotes.Proposals() promises := msgs.NewTxnVotePromiseList(replySeg, proposals.Len()) oneBTxnVotes.SetPromises(promises) for idx, l := 0, proposals.Len(); idx < l; idx++ { proposal := proposals.At(idx) vUUId := common.MakeVarUUId(proposal.VarId()) copy(instIdSlice[common.KeyLen+4:], vUUId[:]) promise := promises.At(idx) promise.SetVarId(vUUId[:]) am.ensureInstance(txnId, &instId, vUUId).OneATxnVotesReceived(&proposal, &promise) } NewOneShotSender(server.SegToBytes(replySeg), am.ConnectionManager, sender) }
func (cach *connectionAwaitClientHandshake) start() (bool, error) { config := cach.commonTLSConfig() config.ClientAuth = tls.RequireAnyClientCert socket := tls.Server(cach.socket, config) cach.socket = socket if err := socket.Handshake(); err != nil { return false, err } if cach.topology.Root.VarUUId == nil { return false, errors.New("Root not yet known") } peerCerts := socket.ConnectionState().PeerCertificates if authenticated, hashsum := cach.verifyPeerCerts(cach.topology, peerCerts); authenticated { cach.peerCerts = peerCerts log.Printf("User '%s' authenticated", hex.EncodeToString(hashsum[:])) } else { return false, errors.New("Client connection rejected: No client certificate known") } helloFromServer := cach.makeHelloClientFromServer(cach.topology) if err := cach.send(server.SegToBytes(helloFromServer)); err != nil { return false, err } cach.remoteHost = cach.socket.RemoteAddr().String() cach.nextState(nil) return false, nil }
func (p *proposal) maybeSendOneA() { pendingPromises := p.pending[:0] for _, pi := range p.instances { if pi.currentState == &pi.proposalOneA { pendingPromises = append(pendingPromises, pi) } } if len(pendingPromises) == 0 { return } seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) sender := newProposalSender(p, pendingPromises) oneACap := msgs.NewOneATxnVotes(seg) msg.SetOneATxnVotes(oneACap) oneACap.SetTxnId(p.txnId[:]) oneACap.SetRmId(uint32(p.instanceRMId)) proposals := msgs.NewTxnVoteProposalList(seg, len(pendingPromises)) oneACap.SetProposals(proposals) for idx, pi := range pendingPromises { proposal := proposals.At(idx) pi.addOneAToProposal(&proposal, sender) } sender.msg = server.SegToBytes(seg) server.Log(p.txnId, "Adding sender for 1A") p.proposerManager.AddServerConnectionSubscriber(sender) }
func (p *proposal) maybeSendTwoA() { pendingAccepts := p.pending[:0] for _, pi := range p.instances { if pi.currentState == &pi.proposalTwoA { pendingAccepts = append(pendingAccepts, pi) } } if len(pendingAccepts) == 0 { return } seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) sender := newProposalSender(p, pendingAccepts) twoACap := msgs.NewTwoATxnVotes(seg) msg.SetTwoATxnVotes(twoACap) twoACap.SetRmId(uint32(p.instanceRMId)) acceptRequests := msgs.NewTxnVoteAcceptRequestList(seg, len(pendingAccepts)) twoACap.SetAcceptRequests(acceptRequests) deflate := false for idx, pi := range pendingAccepts { acceptRequest := acceptRequests.At(idx) deflate = pi.addTwoAToAcceptRequest(seg, &acceptRequest, sender) || deflate } if deflate { deflated := deflateTxn(p.txn, seg) twoACap.SetTxn(*deflated) } else { twoACap.SetTxn(*p.txn) } sender.msg = server.SegToBytes(seg) server.Log(p.txnId, "Adding sender for 2A") p.proposerManager.AddServerConnectionSubscriber(sender) }
func MakeTxnSubmissionAbortMsg(txnId *common.TxnId) []byte { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) tsa := msgs.NewTxnSubmissionAbort(seg) msg.SetSubmissionAbort(tsa) tsa.SetTxnId(txnId[:]) return server.SegToBytes(seg) }
func MakeTxnSubmissionCompleteMsg(txnId *common.TxnId) []byte { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) tsc := msgs.NewTxnSubmissionComplete(seg) msg.SetSubmissionComplete(tsc) tsc.SetTxnId(txnId[:]) return server.SegToBytes(seg) }
func MakeTxnLocallyCompleteMsg(txnId *common.TxnId) []byte { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) tlc := msgs.NewTxnLocallyComplete(seg) msg.SetTxnLocallyComplete(tlc) tlc.SetTxnId(txnId[:]) return server.SegToBytes(seg) }
func (sts *SimpleTxnSubmitter) SubmitTransaction(txnCap *msgs.Txn, activeRMs []common.RMId, continuation TxnCompletionConsumer, delay time.Duration) { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) msg.SetTxnSubmission(*txnCap) txnId := common.MakeTxnId(txnCap.Id()) server.Log(txnId, "Submitting txn") txnSender := paxos.NewRepeatingSender(server.SegToBytes(seg), activeRMs...) var removeSenderCh chan server.EmptyStruct if delay == 0 { sts.connPub.AddServerConnectionSubscriber(txnSender) } else { removeSenderCh = make(chan server.EmptyStruct) go func() { // fmt.Printf("%v ", delay) time.Sleep(delay) sts.connPub.AddServerConnectionSubscriber(txnSender) <-removeSenderCh sts.connPub.RemoveServerConnectionSubscriber(txnSender) }() } acceptors := paxos.GetAcceptorsFromTxn(txnCap) shutdownFun := func(shutdown bool) { delete(sts.outcomeConsumers, *txnId) // fmt.Printf("sts%v ", len(sts.outcomeConsumers)) if delay == 0 { sts.connPub.RemoveServerConnectionSubscriber(txnSender) } else { close(removeSenderCh) } // OSS is safe here - see above. paxos.NewOneShotSender(paxos.MakeTxnSubmissionCompleteMsg(txnId), sts.connPub, acceptors...) if shutdown { if txnCap.Retry() { // If this msg doesn't make it then proposers should // observe our death and tidy up anyway. If it's just this // connection shutting down then there should be no // problem with these msgs getting to the propposers. paxos.NewOneShotSender(paxos.MakeTxnSubmissionAbortMsg(txnId), sts.connPub, activeRMs...) } continuation(txnId, nil, nil) } } shutdownFunPtr := &shutdownFun sts.onShutdown[shutdownFunPtr] = server.EmptyStructVal outcomeAccumulator := paxos.NewOutcomeAccumulator(int(txnCap.FInc()), acceptors) consumer := func(sender common.RMId, txnId *common.TxnId, outcome *msgs.Outcome) { if outcome, _ = outcomeAccumulator.BallotOutcomeReceived(sender, outcome); outcome != nil { delete(sts.onShutdown, shutdownFunPtr) shutdownFun(false) continuation(txnId, outcome, nil) } } sts.outcomeConsumers[*txnId] = consumer // fmt.Printf("sts%v ", len(sts.outcomeConsumers)) }
func (cr *connectionRun) clientTxnError(ctxn *cmsgs.ClientTxn, err error, origTxnId *common.TxnId) error { seg := capn.NewBuffer(nil) msg := cmsgs.NewRootClientMessage(seg) outcome := cmsgs.NewClientTxnOutcome(seg) msg.SetClientTxnOutcome(outcome) if origTxnId == nil { outcome.SetId(ctxn.Id()) } else { outcome.SetId(origTxnId[:]) } outcome.SetFinalId(ctxn.Id()) outcome.SetError(err.Error()) return cr.sendMessage(server.SegToBytes(seg)) }
func (am *AcceptorManager) TwoATxnVotesReceived(sender common.RMId, txnId *common.TxnId, twoATxnVotes *msgs.TwoATxnVotes) { instanceRMId := common.RMId(twoATxnVotes.RmId()) server.Log(txnId, "2A received from", sender, "; instance:", instanceRMId) instId := instanceId([instanceIdLen]byte{}) instIdSlice := instId[:] copy(instIdSlice, txnId[:]) binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], uint32(instanceRMId)) txnCap := twoATxnVotes.Txn() a := am.ensureAcceptor(txnId, &txnCap) requests := twoATxnVotes.AcceptRequests() failureInstances := make([]*instance, 0, requests.Len()) failureRequests := make([]*msgs.TxnVoteAcceptRequest, 0, requests.Len()) for idx, l := 0, requests.Len(); idx < l; idx++ { request := requests.At(idx) vUUId := common.MakeVarUUId(request.Ballot().VarId()) copy(instIdSlice[common.KeyLen+4:], vUUId[:]) inst := am.ensureInstance(txnId, &instId, vUUId) accepted, rejected := inst.TwoATxnVotesReceived(&request) if accepted { a.BallotAccepted(instanceRMId, inst, vUUId, &txnCap) } else if rejected { failureInstances = append(failureInstances, inst) failureRequests = append(failureRequests, &request) } } if len(failureInstances) != 0 { replySeg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(replySeg) twoBTxnVotes := msgs.NewTwoBTxnVotes(replySeg) msg.SetTwoBTxnVotes(twoBTxnVotes) twoBTxnVotes.SetFailures() failuresCap := twoBTxnVotes.Failures() failuresCap.SetTxnId(txnId[:]) failuresCap.SetRmId(uint32(instanceRMId)) nacks := msgs.NewTxnVoteTwoBFailureList(replySeg, len(failureInstances)) failuresCap.SetNacks(nacks) for idx, inst := range failureInstances { failure := nacks.At(idx) failure.SetVarId(inst.vUUId[:]) failure.SetRoundNumber(failureRequests[idx].RoundNumber()) failure.SetRoundNumberTooLow(uint32(inst.promiseNum >> 32)) } server.Log(txnId, "Sending 2B failures to", sender, "; instance:", instanceRMId) // The proposal senders are repeating, so this use of OSS is fine. NewOneShotSender(server.SegToBytes(replySeg), am, sender) } }
func (cah *connectionAwaitHandshake) start() (bool, error) { helloSeg, err := cah.makeHello() if err != nil { return cah.maybeRestartConnection(err) } if err := cah.send(server.SegToBytes(helloSeg)); err != nil { return cah.maybeRestartConnection(err) } cah.nonce = 0 if seg, err := capn.ReadFromStream(cah.socket, nil); err == nil { hello := msgs.ReadRootHello(seg) if cah.verifyHello(&hello) { sessionKey := [32]byte{} remotePublicKey := [32]byte{} copy(remotePublicKey[:], hello.PublicKey()) box.Precompute(&sessionKey, &remotePublicKey, cah.privateKey) if hello.IsClient() { cah.Lock() cah.isClient = true cah.sessionKey = &sessionKey cah.Unlock() cah.nonceAryIn[0] = 128 cah.nextState(&cah.connectionAwaitClientHandshake) } else { extendedKey := make([]byte, 64) copy(extendedKey[:32], sessionKey[:]) copy(extendedKey[32:], cah.connectionManager.passwordHash[:]) sessionKey = sha256.Sum256(extendedKey) cah.Lock() cah.isServer = true cah.sessionKey = &sessionKey cah.Unlock() if cah.remoteHost == "" { cah.nonceAryIn[0] = 128 } else { cah.nonceAryOut[0] = 128 } cah.nextState(&cah.connectionAwaitServerHandshake) } return false, nil } else { return cah.maybeRestartConnection(fmt.Errorf("Received erroneous hello from peer")) } } else { return cah.maybeRestartConnection(err) } }
func TxnToRootBytes(txn *msgs.Txn) []byte { seg := capn.NewBuffer(nil) txnCap := msgs.NewRootTxn(seg) txnCap.SetId(txn.Id()) txnCap.SetRetry(txn.Retry()) txnCap.SetSubmitter(txn.Submitter()) txnCap.SetSubmitterBootCount(txn.SubmitterBootCount()) txnCap.SetActions(txn.Actions()) txnCap.SetAllocations(txn.Allocations()) txnCap.SetFInc(txn.FInc()) txnCap.SetTopologyVersion(txn.TopologyVersion()) return server.SegToBytes(seg) }
func (adfd *acceptorDeleteFromDisk) deletionDone() { if adfd.currentState == adfd { adfd.nextState(nil) adfd.acceptorManager.AcceptorFinished(adfd.txnId) seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) tgc := msgs.NewTxnGloballyComplete(seg) msg.SetTxnGloballyComplete(tgc) tgc.SetTxnId(adfd.txnId[:]) server.Log(adfd.txnId, "Sending TGC to", adfd.tgcRecipients) NewOneShotSender(server.SegToBytes(seg), adfd.acceptorManager.ConnectionManager, adfd.tgcRecipients...) } }
func (sts *SimpleTxnSubmitter) SubmitTransaction(txnCap *msgs.Txn, activeRMs []common.RMId, continuation TxnCompletionConsumer, delay time.Duration) { seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) msg.SetTxnSubmission(*txnCap) txnId := common.MakeTxnId(txnCap.Id()) server.Log(txnId, "Submitting txn") txnSender := paxos.NewRepeatingSender(server.SegToBytes(seg), activeRMs...) if delay == 0 { sts.connectionManager.AddSender(txnSender) } else { go func() { // fmt.Printf("%v ", delay) time.Sleep(delay) sts.connectionManager.AddSender(txnSender) }() } acceptors := paxos.GetAcceptorsFromTxn(txnCap) shutdownFun := func(shutdown bool) { delete(sts.outcomeConsumers, *txnId) // fmt.Printf("sts%v ", len(sts.outcomeConsumers)) sts.connectionManager.RemoveSenderAsync(txnSender) paxos.NewOneShotSender(paxos.MakeTxnSubmissionCompleteMsg(txnId), sts.connectionManager, acceptors...) if shutdown { if txnCap.Retry() { paxos.NewOneShotSender(paxos.MakeTxnSubmissionAbortMsg(txnId), sts.connectionManager, activeRMs...) } continuation(txnId, nil) } } shutdownFunPtr := &shutdownFun sts.onShutdown[shutdownFunPtr] = server.EmptyStructVal outcomeAccumulator := paxos.NewOutcomeAccumulator(int(txnCap.FInc()), acceptors) consumer := func(sender common.RMId, txnId *common.TxnId, outcome *msgs.Outcome) { if outcome, _ = outcomeAccumulator.BallotOutcomeReceived(sender, outcome); outcome != nil { delete(sts.onShutdown, shutdownFunPtr) shutdownFun(false) continuation(txnId, outcome) } } sts.outcomeConsumers[*txnId] = consumer // fmt.Printf("sts%v ", len(sts.outcomeConsumers)) }
func (am *AcceptorManager) TxnLocallyCompleteReceived(sender common.RMId, txnId *common.TxnId, tlc *msgs.TxnLocallyComplete) { if aInst, found := am.acceptors[*txnId]; found && aInst.acceptor != nil { server.Log(txnId, "TLC received from", sender, "(acceptor found)") aInst.acceptor.TxnLocallyCompleteReceived(sender) } else { // We must have deleted the acceptor state from disk, // immediately prior to sending TGC, and then died. Now we're // back up, the proposers have sent us more TLCs, and we should // just reply with TGCs. server.Log(txnId, "TLC received from", sender, "(acceptor not found)") seg := capn.NewBuffer(nil) msg := msgs.NewRootMessage(seg) tgc := msgs.NewTxnGloballyComplete(seg) msg.SetTxnGloballyComplete(tgc) tgc.SetTxnId(txnId[:]) server.Log(txnId, "Sending single TGC to", sender) NewOneShotSender(server.SegToBytes(seg), am.ConnectionManager, sender) } }
func (cr *connectionRun) start() (bool, error) { log.Printf("Connection established to %v (%v)\n", cr.remoteHost, cr.remoteRMId) cr.restart = true seg := capn.NewBuffer(nil) if cr.isClient { message := cmsgs.NewRootClientMessage(seg) message.SetHeartbeat() } else { message := msgs.NewRootMessage(seg) message.SetHeartbeat() } cr.beatBytes = server.SegToBytes(seg) if cr.isServer { cr.connectionManager.ServerEstablished(cr.Connection, cr.remoteHost, cr.remoteRMId, cr.remoteBootCount, cr.combinedTieBreak, cr.remoteRootId) } if cr.isClient { servers := cr.connectionManager.ClientEstablished(cr.ConnectionNumber, cr.Connection) cr.submitter = client.NewClientTxnSubmitter(cr.connectionManager.RMId, cr.connectionManager.BootCount, cr.connectionManager) cr.submitter.TopologyChanged(cr.topology) cr.submitter.ServerConnectionsChanged(servers) } cr.mustSendBeat = true cr.missingBeats = 0 cr.beater = newConnectionBeater(cr.Connection) go cr.beater.beat() cr.reader = newConnectionReader(cr.Connection) if cr.isClient { go cr.reader.readClient() } else { go cr.reader.readServer() } return false, nil }
func (cash *connectionAwaitServerHandshake) start() (bool, error) { // TLS seems to require us to pick one end as the client and one // end as the server even though in a server-server connection we // really don't care which is which. config := cash.commonTLSConfig() if cash.remoteHost == "" { // We came from the listener, so we're going to act as the server. config.ClientAuth = tls.RequireAndVerifyClientCert cash.socket = tls.Server(cash.socket, config) } else { config.InsecureSkipVerify = true socket := tls.Client(cash.socket, config) cash.socket = socket // This is nuts: as a server, we can demand the client cert and // verify that without any concept of a client name. But as the // client, if we don't have a server name, then we have to do // the verification ourself. Why is TLS asymmetric?! if err := socket.Handshake(); err != nil { return cash.connectionAwaitHandshake.maybeRestartConnection(err) } opts := x509.VerifyOptions{ Roots: config.RootCAs, DNSName: "", // disable server name checking Intermediates: x509.NewCertPool(), } certs := socket.ConnectionState().PeerCertificates for i, cert := range certs { if i == 0 { continue } opts.Intermediates.AddCert(cert) } if _, err := certs[0].Verify(opts); err != nil { return cash.connectionAwaitHandshake.maybeRestartConnection(err) } } helloFromServer := cash.makeHelloServerFromServer(cash.topology) if err := cash.send(server.SegToBytes(helloFromServer)); err != nil { return cash.connectionAwaitHandshake.maybeRestartConnection(err) } if seg, err := cash.readOne(); err == nil { hello := msgs.ReadRootHelloServerFromServer(seg) if cash.verifyTopology(cash.topology, &hello) { cash.remoteHost = hello.LocalHost() cash.remoteRMId = common.RMId(hello.RmId()) if _, found := cash.topology.RMsRemoved()[cash.remoteRMId]; found { return false, cash.serverError( fmt.Errorf("%v has been removed from topology and may not rejoin.", cash.remoteRMId)) } rootId := hello.RootId() if len(rootId) == common.KeyLen { cash.remoteRootId = common.MakeVarUUId(rootId) } cash.remoteBootCount = hello.BootCount() cash.combinedTieBreak = cash.combinedTieBreak ^ hello.TieBreak() cash.nextState(nil) return false, nil } else { return cash.connectionAwaitHandshake.maybeRestartConnection(fmt.Errorf("Unequal remote topology")) } } else { return cash.connectionAwaitHandshake.maybeRestartConnection(err) } }
func (config *Configuration) Serialize() []byte { seg := capn.NewBuffer(nil) config.AddToSegAutoRoot(seg) return server.SegToBytes(seg) }
func (v *Var) maybeWriteFrame(f *frame, action *localAction, positions *common.Positions) { if v.writeInProgress != nil { v.writeInProgress = func() { v.writeInProgress = nil v.maybeWriteFrame(f, action, positions) } return } v.writeInProgress = func() { v.writeInProgress = nil v.maybeMakeInactive() } oldVarCap := *v.varCap varSeg := capn.NewBuffer(nil) varCap := msgs.NewRootVar(varSeg) v.varCap = &varCap varCap.SetId(oldVarCap.Id()) if positions != nil { varCap.SetPositions(capn.UInt8List(*positions)) } else { varCap.SetPositions(oldVarCap.Positions()) } varCap.SetWriteTxnId(f.frameTxnId[:]) varCap.SetWriteTxnClock(f.frameTxnClock.AddToSeg(varSeg)) varCap.SetWritesClock(f.frameWritesClock.AddToSeg(varSeg)) varData := server.SegToBytes(varSeg) txnBytes := action.TxnRootBytes() // to ensure correct order of writes, schedule the write from // the current go-routine... future := v.disk.ReadWriteTransaction(false, func(rwtxn *mdbs.RWTxn) (interface{}, error) { if err := db.WriteTxnToDisk(rwtxn, f.frameTxnId, txnBytes); err != nil { return nil, err } if err := rwtxn.Put(db.DB.Vars, v.UUId[:], varData, 0); err != nil { return nil, err } if v.curFrameOnDisk != nil { return nil, db.DeleteTxnFromDisk(rwtxn, v.curFrameOnDisk.frameTxnId) } return nil, nil }) go func() { // ... but process the result in a new go-routine to avoid blocking the executor. if _, err := future.ResultError(); err != nil { log.Println("Var error when writing to disk:", err) return } // Switch back to the right go-routine v.applyToVar(func() { server.Log(v.UUId, "Wrote", f.frameTxnId) v.curFrameOnDisk = f for ancestor := f.parent; ancestor != nil && ancestor.DescendentOnDisk(); ancestor = ancestor.parent { } v.writeInProgress() }) }() }