Beispiel #1
0
func (fo *frameOpen) maybeStartRoll() {
	if !fo.rollActive && fo.currentState == fo && fo.child == nil && fo.writes.Len() == 0 && fo.v.positions != nil &&
		(fo.reads.Len() > fo.uncommittedReads || (len(fo.frameTxnClock.Clock) > fo.frameTxnActions.Len() && fo.parent == nil && fo.reads.Len() == 0 && len(fo.learntFutureReads) == 0)) {
		fo.rollActive = true
		ctxn, varPosMap := fo.createRollClientTxn()
		go func() {
			server.Log(fo.frame, "Starting roll")
			outcome, err := fo.v.vm.RunClientTransaction(ctxn, varPosMap, true)
			ow := ""
			if outcome != nil {
				ow = fmt.Sprint(outcome.Which())
				if outcome.Which() == msgs.OUTCOME_ABORT {
					ow += fmt.Sprintf("-%v", outcome.Abort().Which())
				}
			}
			// fmt.Printf("r%v ", ow)
			server.Log(fo.frame, "Roll finished: outcome", ow, "; err:", err)
			if outcome == nil || outcome.Which() != msgs.OUTCOME_COMMIT {
				fo.v.applyToVar(func() {
					fo.rollActive = false
					if outcome != nil {
						fo.maybeScheduleRoll()
					}
				})
			}
		}()
	} else {
		fo.maybeScheduleRoll()
	}
}
Beispiel #2
0
func (sts *SimpleTxnSubmitter) TopologyChange(topology *server.Topology, servers map[common.RMId]paxos.Connection) {
	if topology != nil {
		server.Log("TM setting topology to", topology)
		sts.topology = topology
		sts.resolver = ch.NewResolver(sts.rng, topology.AllRMs)
		sts.hashCache.SetResolverDesiredLen(sts.resolver, topology.AllRMs.NonEmptyLen())
		if topology.RootVarUUId != nil {
			sts.hashCache.AddPosition(topology.RootVarUUId, topology.RootPositions)
		}

		if !topology.Equal(server.BlankTopology) && sts.bufferedSubmissions != nil {
			funcs := sts.bufferedSubmissions
			sts.bufferedSubmissions = nil
			for _, fun := range funcs {
				fun()
			}
		}
	}
	if servers != nil {
		sts.disabledHashCodes = make(map[common.RMId]server.EmptyStruct, len(sts.topology.AllRMs))
		for _, rmId := range sts.topology.AllRMs {
			if _, found := servers[rmId]; !found {
				sts.disabledHashCodes[rmId] = server.EmptyStructVal
			}
		}
		sts.connections = servers
		server.Log("TM disabled hash codes", sts.disabledHashCodes)
	}
}
Beispiel #3
0
func (awtd *acceptorWriteToDisk) start() {
	outcome := awtd.outcome
	outcomeCap := (*msgs.Outcome)(outcome)
	awtd.sendToAll = awtd.sendToAll || outcomeCap.Which() == msgs.OUTCOME_COMMIT
	sendToAll := awtd.sendToAll
	stateSeg := capn.NewBuffer(nil)
	state := msgs.NewRootAcceptorState(stateSeg)
	state.SetTxn(*awtd.ballotAccumulator.Txn)
	state.SetOutcome(*outcomeCap)
	state.SetSendToAll(awtd.sendToAll)
	state.SetInstances(awtd.ballotAccumulator.AddInstancesToSeg(stateSeg))

	data := server.SegToBytes(stateSeg)

	// to ensure correct order of writes, schedule the write from
	// the current go-routine...
	server.Log(awtd.txnId, "Writing 2B to disk...")
	future := awtd.acceptorManager.Disk.ReadWriteTransaction(false, func(rwtxn *mdbs.RWTxn) (interface{}, error) {
		return nil, rwtxn.Put(db.DB.BallotOutcomes, awtd.txnId[:], data, 0)
	})
	go func() {
		// ... but process the result in a new go-routine to avoid blocking the executor.
		if _, err := future.ResultError(); err != nil {
			log.Printf("Error: %v Acceptor Write error: %v", awtd.txnId, err)
			return
		}
		server.Log(awtd.txnId, "Writing 2B to disk...done.")
		awtd.acceptorManager.Exe.Enqueue(func() { awtd.writeDone(outcome, sendToAll) })
	}()
}
Beispiel #4
0
// topologySubscribers
func (subs topologySubscribers) TopologyChanged(topology *configuration.Topology, callbacks map[eng.TopologyChangeSubscriberType]func()) {
	for subType, subsMap := range subs.subscribers {
		subTypeCopy := subType
		subCount := len(subsMap)
		resultChan := make(chan bool, subCount)
		done := func(success bool) { resultChan <- success }
		for sub := range subsMap {
			sub.TopologyChanged(topology, done)
		}
		if cb, found := callbacks[eng.TopologyChangeSubscriberType(subType)]; found {
			cbCopy := cb
			go func() {
				server.Log("CM TopologyChanged", subTypeCopy, "expects", subCount, "Dones")
				for subCount > 0 {
					if result := <-resultChan; result {
						subCount--
					} else {
						server.Log("CM TopologyChanged", subTypeCopy, "failed")
						return
					}
				}
				server.Log("CM TopologyChanged", subTypeCopy, "all done")
				cbCopy()
			}()
		}
	}
}
Beispiel #5
0
// from network
func (pm *ProposerManager) TxnSubmissionAbortReceived(sender common.RMId, txnId *common.TxnId) {
	if proposer, found := pm.proposers[*txnId]; found {
		server.Log(txnId, "TSA received from", sender, "(proposer found)")
		proposer.Abort()
	} else {
		server.Log(txnId, "TSA received from", sender, "(ignored)")
	}
}
Beispiel #6
0
// from network
func (pm *ProposerManager) TxnGloballyCompleteReceived(sender common.RMId, txnId *common.TxnId) {
	if proposer, found := pm.proposers[*txnId]; found {
		server.Log(txnId, "TGC received from", sender, "(proposer found)")
		proposer.TxnGloballyCompleteReceived(sender)
	} else {
		server.Log(txnId, "TGC received from", sender, "(ignored)")
	}
}
Beispiel #7
0
func (fo *frameOpen) ReadLearnt(action *localAction) bool {
	txn := action.Txn
	if fo.currentState != fo {
		panic(fmt.Sprintf("%v ReadLearnt called for %v with frame in state %v", fo.v, txn, fo.currentState))
	}
	actClockElem := action.outcomeClock.Clock[*fo.v.UUId] - 1
	if action.outcomeClock.Clock[*fo.v.UUId] == 0 {
		panic("Just did 0 - 1 in int64")
	}
	reqClockElem := fo.frameTxnClock.Clock[*fo.v.UUId]
	if action.readVsn.Compare(fo.frameTxnId) != common.EQ {
		// The write would be one less than the read. We want to know if
		// this read is of a write before or after our current frame
		// write. If the clock elems are equal then the read _must_ be
		// of a write that is after this frame write and we created this
		// frame "early", so we should store the read. So that means we
		// only should ignore this read if its write clock elem is < our
		// frame write clock elem.
		if actClockElem < reqClockElem {
			server.Log(fo.frame, "ReadLearnt", txn, "ignored, too old")
			return false
		} else {
			server.Log(fo.frame, "ReadLearnt", txn, "of future frame")
			fo.learntFutureReads = append(fo.learntFutureReads, action)
			action.frame = fo.frame
			return true
		}
	}
	if actClockElem != reqClockElem {
		panic(fmt.Sprintf("%v oddness in read learnt: read is of right version, but clocks differ (action=%v != frame=%v) (%v)", fo.frame, actClockElem, reqClockElem, action))
	}
	if fo.reads.Get(action) == nil {
		fo.reads.Insert(action, committed)
		action.frame = fo.frame
		// If we had voted on this txn (rather than learning it), then
		// every element within our readVoteClock we would find within
		// the action.outcomeClock (albeit possibly at a later
		// version). Thus if anything within our readVoteClock is _not_
		// in the action.outcomeClock then we know that we must be
		// missing some TGCs - essentially we can infer TGCs by
		// observing the outcome clocks on future txns we learn.
		for k, v := range fo.readVoteClock.Clock {
			if _, found := action.outcomeClock.Clock[k]; !found {
				fo.mask.SetVarIdMax(k, v)
			}
		}
		server.Log(fo.frame, "ReadLearnt", txn, "uncommittedReads:", fo.uncommittedReads, "uncommittedWrites:", fo.uncommittedWrites)
		fo.maybeScheduleRoll()
		return true
	} else {
		panic(fmt.Sprintf("%v ReadLearnt called for known txn %v", fo.frame, txn))
	}
}
Beispiel #8
0
func (pro *proposerReceiveOutcomes) BallotOutcomeReceived(sender common.RMId, outcome *msgs.Outcome) {
	server.Log(pro.txnId, "Ballot outcome received from", sender)
	if pro.mode == proposerTLCSender {
		// Consensus already reached and we've been to disk. So this
		// *must* be a duplicate: safe to ignore.

		// Even in the case where it's a retry, we actually don't care
		// that we could be receiving this *after* sending a TLC because
		// all we need to know is that it aborted, not the details.
		return
	}

	outcome, allAgreed := pro.outcomeAccumulator.BallotOutcomeReceived(sender, outcome)
	if allAgreed {
		pro.allAcceptorsAgree()
	}
	if outcome == nil && pro.mode == ProposerPassiveLearner {
		if knownAcceptors := pro.outcomeAccumulator.IsAllAborts(); knownAcceptors != nil {
			// As a passiveLearner, we started this proposer through
			// receiving a commit outcome. However, that has changed, due
			// to failures and every outcome we have is for the same
			// abort. Therefore we're abandoning this learner, and
			// sending TLCs immediately to everyone we've received the
			// abort outcome from.
			server.Log(pro.txnId, "abandoning learner with all aborts", knownAcceptors)
			pro.proposerManager.FinishProposers(pro.txnId)
			pro.proposerManager.TxnFinished(pro.txnId)
			tlcMsg := MakeTxnLocallyCompleteMsg(pro.txnId)
			// We are destroying out state here. Thus even if this msg
			// goes missing, if the acceptor sends us further 2Bs then
			// we'll send back further TLCs from proposer manager. So the
			// use of OSS here is correct.
			NewOneShotSender(tlcMsg, pro.proposerManager, knownAcceptors...)
			return
		}
	}
	if pro.outcome == nil && outcome != nil {
		pro.outcome = outcome
		// It's possible that we're an activeVoter, and whilst our vars
		// are figuring out their votes, we receive enough ballot
		// outcomes from acceptors to determine the overall outcome. We
		// should only advance to the next state if we're currently
		// waiting for ballot outcomes.
		if pro.currentState == pro {
			pro.nextState()
		} else if pro.currentState == &pro.proposerAwaitBallots && pro.txn.Retry {
			// Advance currentState to proposerReceiveOutcomes, the
			// start() of which will immediately call nextState() again.
			pro.nextState()
		}
	}
}
Beispiel #9
0
func (am *AcceptorManager) TwoATxnVotesReceived(sender common.RMId, txnId *common.TxnId, twoATxnVotes *msgs.TwoATxnVotes) {
	instanceRMId := common.RMId(twoATxnVotes.RmId())
	server.Log(txnId, "2A received from", sender, "; instance:", instanceRMId)
	instId := instanceId([instanceIdLen]byte{})
	instIdSlice := instId[:]
	copy(instIdSlice, txnId[:])
	binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], uint32(instanceRMId))

	txnCap := twoATxnVotes.Txn()
	a := am.ensureAcceptor(txnId, &txnCap)
	requests := twoATxnVotes.AcceptRequests()
	failureInstances := make([]*instance, 0, requests.Len())
	failureRequests := make([]*msgs.TxnVoteAcceptRequest, 0, requests.Len())

	for idx, l := 0, requests.Len(); idx < l; idx++ {
		request := requests.At(idx)
		vUUId := common.MakeVarUUId(request.Ballot().VarId())
		copy(instIdSlice[common.KeyLen+4:], vUUId[:])
		inst := am.ensureInstance(txnId, &instId, vUUId)
		accepted, rejected := inst.TwoATxnVotesReceived(&request)
		if accepted {
			a.BallotAccepted(instanceRMId, inst, vUUId, &txnCap)
		} else if rejected {
			failureInstances = append(failureInstances, inst)
			failureRequests = append(failureRequests, &request)
		}
	}

	if len(failureInstances) != 0 {
		replySeg := capn.NewBuffer(nil)
		msg := msgs.NewRootMessage(replySeg)
		twoBTxnVotes := msgs.NewTwoBTxnVotes(replySeg)
		msg.SetTwoBTxnVotes(twoBTxnVotes)
		twoBTxnVotes.SetFailures()
		failuresCap := twoBTxnVotes.Failures()
		failuresCap.SetTxnId(txnId[:])
		failuresCap.SetRmId(uint32(instanceRMId))
		nacks := msgs.NewTxnVoteTwoBFailureList(replySeg, len(failureInstances))
		failuresCap.SetNacks(nacks)
		for idx, inst := range failureInstances {
			failure := nacks.At(idx)
			failure.SetVarId(inst.vUUId[:])
			failure.SetRoundNumber(failureRequests[idx].RoundNumber())
			failure.SetRoundNumberTooLow(uint32(inst.promiseNum >> 32))
		}
		server.Log(txnId, "Sending 2B failures to", sender, "; instance:", instanceRMId)
		// The proposal senders are repeating, so this use of OSS is fine.
		NewOneShotSender(server.SegToBytes(replySeg), am, sender)
	}
}
Beispiel #10
0
func (p *proposal) maybeSendOneA() {
	pendingPromises := p.pending[:0]
	for _, pi := range p.instances {
		if pi.currentState == &pi.proposalOneA {
			pendingPromises = append(pendingPromises, pi)
		}
	}
	if len(pendingPromises) == 0 {
		return
	}
	seg := capn.NewBuffer(nil)
	msg := msgs.NewRootMessage(seg)
	sender := newProposalSender(p, pendingPromises)
	oneACap := msgs.NewOneATxnVotes(seg)
	msg.SetOneATxnVotes(oneACap)
	oneACap.SetTxnId(p.txnId[:])
	oneACap.SetRmId(uint32(p.instanceRMId))
	proposals := msgs.NewTxnVoteProposalList(seg, len(pendingPromises))
	oneACap.SetProposals(proposals)
	for idx, pi := range pendingPromises {
		proposal := proposals.At(idx)
		pi.addOneAToProposal(&proposal, sender)
	}
	sender.msg = server.SegToBytes(seg)
	server.Log(p.txnId, "Adding sender for 1A")
	p.proposerManager.AddServerConnectionSubscriber(sender)
}
Beispiel #11
0
func (s *proposalSender) finished() {
	if !s.done {
		s.done = true
		server.Log("Removing proposal sender")
		s.proposerManager.RemoveServerConnectionSubscriber(s)
	}
}
Beispiel #12
0
func (p *proposal) maybeSendTwoA() {
	pendingAccepts := p.pending[:0]
	for _, pi := range p.instances {
		if pi.currentState == &pi.proposalTwoA {
			pendingAccepts = append(pendingAccepts, pi)
		}
	}
	if len(pendingAccepts) == 0 {
		return
	}
	seg := capn.NewBuffer(nil)
	msg := msgs.NewRootMessage(seg)
	sender := newProposalSender(p, pendingAccepts)
	twoACap := msgs.NewTwoATxnVotes(seg)
	msg.SetTwoATxnVotes(twoACap)
	twoACap.SetRmId(uint32(p.instanceRMId))
	acceptRequests := msgs.NewTxnVoteAcceptRequestList(seg, len(pendingAccepts))
	twoACap.SetAcceptRequests(acceptRequests)
	deflate := false
	for idx, pi := range pendingAccepts {
		acceptRequest := acceptRequests.At(idx)
		deflate = pi.addTwoAToAcceptRequest(seg, &acceptRequest, sender) || deflate
	}
	if deflate {
		deflated := deflateTxn(p.txn, seg)
		twoACap.SetTxn(*deflated)
	} else {
		twoACap.SetTxn(*p.txn)
	}
	sender.msg = server.SegToBytes(seg)
	server.Log(p.txnId, "Adding sender for 2A")
	p.proposerManager.AddServerConnectionSubscriber(sender)
}
Beispiel #13
0
func (subs topologySubscribers) AddSubscriber(subType eng.TopologyChangeSubscriberType, ob eng.TopologySubscriber) {
	if _, found := subs.subscribers[subType][ob]; found {
		server.Log(ob, "CM found duplicate add topology subscriber")
	} else {
		subs.subscribers[subType][ob] = server.EmptyStructVal
	}
}
Beispiel #14
0
func (v *Var) ReceiveTxn(action *localAction) {
	server.Log(v.UUId, "ReceiveTxn", action)
	isRead, isWrite := action.IsRead(), action.IsWrite()

	if isRead && action.Retry {
		if voted := v.curFrame.ReadRetry(action); !voted {
			v.AddWriteSubscriber(action.Id,
				func(v *Var, value []byte, refs *msgs.VarIdPos_List, newtxn *Txn) {
					if voted := v.curFrame.ReadRetry(action); voted {
						v.RemoveWriteSubscriber(action.Id)
					}
				})
		}
		return
	}

	switch {
	case isRead && isWrite:
		v.curFrame.AddReadWrite(action)
	case isRead:
		v.curFrame.AddRead(action)
	default:
		v.curFrame.AddWrite(action)
	}
}
Beispiel #15
0
func (p *Proposer) TopologyChange(topology *configuration.Topology) {
	if topology == p.topology {
		return
	}
	p.topology = topology
	rmsRemoved := topology.RMsRemoved()
	server.Log("proposer", p.txnId, "in", p.currentState, "sees loss of", rmsRemoved)
	if _, found := rmsRemoved[p.proposerManager.RMId]; found {
		return
	}
	// create new acceptors slice because the initial slice can be
	// shared with proposals.
	acceptors := make([]common.RMId, 0, len(p.acceptors))
	for _, rmId := range p.acceptors {
		if _, found := rmsRemoved[rmId]; !found {
			acceptors = append(acceptors, rmId)
		}
	}
	p.acceptors = acceptors

	switch p.currentState {
	case &p.proposerAwaitBallots, &p.proposerReceiveOutcomes, &p.proposerAwaitLocallyComplete:
		if p.outcomeAccumulator.TopologyChange(topology) {
			p.allAcceptorsAgree()
		}
	case &p.proposerReceiveGloballyComplete:
		for rmId := range rmsRemoved {
			p.TxnGloballyCompleteReceived(rmId)
		}
	case &p.proposerAwaitFinished:
		// do nothing
	}
}
Beispiel #16
0
func VarFromData(data []byte, exe *dispatcher.Executor, disk *mdbs.MDBServer, vm *VarManager) (*Var, error) {
	seg, _, err := capn.ReadFromMemoryZeroCopy(data)
	if err != nil {
		return nil, err
	}
	varCap := msgs.ReadRootVar(seg)

	v := newVar(common.MakeVarUUId(varCap.Id()), exe, disk, vm)
	positions := varCap.Positions()
	if positions.Len() != 0 {
		v.positions = (*common.Positions)(&positions)
	}

	writeTxnId := common.MakeTxnId(varCap.WriteTxnId())
	writeTxnClock := VectorClockFromCap(varCap.WriteTxnClock())
	writesClock := VectorClockFromCap(varCap.WritesClock())
	server.Log(v.UUId, "Restored", writeTxnId)

	if result, err := disk.ReadonlyTransaction(func(rtxn *mdbs.RTxn) (interface{}, error) {
		return db.ReadTxnFromDisk(rtxn, writeTxnId)
	}).ResultError(); err == nil {
		if result == nil || result.(*msgs.Txn) == nil {
			panic(fmt.Sprintf("%v Unable to find txn %v on disk (%v)", v.UUId, writeTxnId, result))
		}
		actions := result.(*msgs.Txn).Actions()
		v.curFrame = NewFrame(nil, v, writeTxnId, &actions, writeTxnClock, writesClock)
		v.curFrameOnDisk = v.curFrame
	} else {
		return nil, err
	}

	v.varCap = &varCap

	return v, nil
}
Beispiel #17
0
func (palc *proposerAwaitLocallyComplete) TxnLocallyComplete() {
	if palc.currentState == palc && !palc.callbackInvoked {
		server.Log(palc.txnId, "Txn locally completed")
		palc.callbackInvoked = true
		palc.maybeWriteToDisk()
	}
}
Beispiel #18
0
func (vm *VarManager) checkAllDisk() {
	if od := vm.onDisk; od != nil {
		for _, v := range vm.active {
			if v.UUId.Compare(configuration.TopologyVarUUId) != common.EQ && !v.isOnDisk(true) {
				if !vm.RollAllowed {
					server.Log("VarManager", fmt.Sprintf("%p", vm), "WTF?! rolls are banned, but have var", v.UUId, "not on disk!")
				}
				return
			}
		}
		vm.onDisk = nil
		vm.RollAllowed = false
		server.Log("VarManager", fmt.Sprintf("%p", vm), "Rolls banned; calling done", fmt.Sprintf("%p", od))
		od(true)
	}
}
Beispiel #19
0
func (fo *frameOpen) AddReadWrite(action *localAction) {
	txn := action.Txn
	server.Log(fo.frame, "AddReadWrite", txn, action.readVsn)
	switch {
	case fo.currentState != fo:
		panic(fmt.Sprintf("%v AddReadWrite called for %v with frame in state %v", fo.v, txn, fo.currentState))
	case fo.writeVoteClock != nil || fo.writes.Len() != 0 || (fo.maxUncommittedRead != nil && action.LessThan(fo.maxUncommittedRead)) || fo.frameTxnActions == nil || len(fo.learntFutureReads) != 0 || (!action.IsRoll() && fo.isLocked()):
		action.VoteDeadlock(fo.frameTxnClock)
	case !fo.frameTxnId.Equal(action.readVsn):
		action.VoteBadRead(fo.frameTxnClock, fo.frameTxnId, fo.frameTxnActions)
		fo.v.maybeMakeInactive()
	case fo.writes.Get(action) == nil:
		fo.rwPresent = true
		fo.uncommittedWrites++
		action.frame = fo.frame
		if fo.uncommittedReads == 0 {
			fo.writes.Insert(action, uncommitted)
			fo.calculateWriteVoteClock()
			if !action.VoteCommit(fo.writeVoteClock) {
				fo.ReadWriteAborted(action, true)
			}
		} else {
			fo.writes.Insert(action, postponed)
		}
	default:
		panic(fmt.Sprintf("%v AddReadWrite called for known txn %v", fo.frame, txn))
	}
}
Beispiel #20
0
func (fo *frameOpen) AddRead(action *localAction) {
	txn := action.Txn
	server.Log(fo.frame, "AddRead", txn, action.readVsn)
	switch {
	case fo.currentState != fo:
		panic(fmt.Sprintf("%v AddRead called for %v with frame in state %v", fo.v, txn, fo.currentState))
	case fo.writeVoteClock != nil || (fo.writes.Len() != 0 && fo.writes.First().Key.LessThan(action)) || fo.frameTxnActions == nil || fo.isLocked():
		// We could have learnt a write at this point but we're still fine to accept smaller reads.
		action.VoteDeadlock(fo.frameTxnClock)
	case !fo.frameTxnId.Equal(action.readVsn):
		action.VoteBadRead(fo.frameTxnClock, fo.frameTxnId, fo.frameTxnActions)
		fo.v.maybeMakeInactive()
	case fo.reads.Get(action) == nil:
		fo.uncommittedReads++
		fo.reads.Insert(action, uncommitted)
		if fo.maxUncommittedRead == nil || fo.maxUncommittedRead.LessThan(action) {
			fo.maxUncommittedRead = action
		}
		action.frame = fo.frame
		if !action.VoteCommit(fo.readVoteClock) {
			fo.ReadAborted(action)
		}
	default:
		panic(fmt.Sprintf("%v AddRead called for known txn %v", fo.frame, txn))
	}
}
Beispiel #21
0
func (fo *frameOpen) AddWrite(action *localAction) {
	txn := action.Txn
	server.Log(fo.frame, "AddWrite", txn)
	cid := txn.Id.ClientId()
	_, found := fo.clientWrites[cid]
	switch {
	case fo.currentState != fo:
		panic(fmt.Sprintf("%v AddWrite called for %v with frame in state %v", fo.v, txn, fo.currentState))
	case fo.rwPresent || (fo.maxUncommittedRead != nil && action.LessThan(fo.maxUncommittedRead)) || found || len(fo.learntFutureReads) != 0 || fo.isLocked():
		action.VoteDeadlock(fo.frameTxnClock)
	case fo.writes.Get(action) == nil:
		fo.uncommittedWrites++
		fo.clientWrites[cid] = server.EmptyStructVal
		action.frame = fo.frame
		if fo.uncommittedReads == 0 {
			fo.writes.Insert(action, uncommitted)
			fo.calculateWriteVoteClock()
			if !action.VoteCommit(fo.writeVoteClock) {
				fo.WriteAborted(action, true)
			}
		} else {
			fo.writes.Insert(action, postponed)
		}
	default:
		panic(fmt.Sprintf("%v AddWrite called for known txn %v", fo.frame, txn))
	}
}
Beispiel #22
0
// Callback (from network/paxos)
func (trc *txnReceiveCompletion) CompletionReceived() {
	server.Log(trc.Id, "CompletionReceived; already completed?", trc.completed, "state:", trc.currentState, "aborted?", trc.aborted)
	if trc.completed {
		// Be silent in this case.
		return
	}
	if trc.currentState != trc {
		// We've been completed early! Be noisy!
		panic(fmt.Sprintf("%v error: Txn completion received with txn in wrong state: %v\n", trc.Id, trc.currentState))
	}
	trc.completed = true
	trc.maybeFinish()
	if trc.aborted {
		return
	}
	for idx := 0; idx < len(trc.localActions); idx++ {
		action := &trc.localActions[idx]
		if action.frame == nil {
			// Could be the case if !aborted and we're a learner, but
			// when we learnt, we never assigned a frame.
			continue
		}
		f := func(v *Var) {
			if v == nil {
				panic(fmt.Sprintf("%v error (%v, aborted? %v, frame == nil? %v): %v Not found!", trc.Id, trc, trc.aborted, action.frame == nil, action.vUUId))
			} else {
				v.TxnGloballyComplete(action)
			}
		}
		trc.vd.ApplyToVar(f, false, action.vUUId)
	}
}
Beispiel #23
0
func (am *AcceptorManager) OneATxnVotesReceived(sender common.RMId, txnId *common.TxnId, oneATxnVotes *msgs.OneATxnVotes) {
	instanceRMId := common.RMId(oneATxnVotes.RmId())
	server.Log(txnId, "1A received from", sender, "; instance:", instanceRMId)
	instId := instanceId([instanceIdLen]byte{})
	instIdSlice := instId[:]
	copy(instIdSlice, txnId[:])
	binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], uint32(instanceRMId))

	replySeg := capn.NewBuffer(nil)
	msg := msgs.NewRootMessage(replySeg)
	oneBTxnVotes := msgs.NewOneBTxnVotes(replySeg)
	msg.SetOneBTxnVotes(oneBTxnVotes)
	oneBTxnVotes.SetRmId(oneATxnVotes.RmId())
	oneBTxnVotes.SetTxnId(oneATxnVotes.TxnId())

	proposals := oneATxnVotes.Proposals()
	promises := msgs.NewTxnVotePromiseList(replySeg, proposals.Len())
	oneBTxnVotes.SetPromises(promises)
	for idx, l := 0, proposals.Len(); idx < l; idx++ {
		proposal := proposals.At(idx)
		vUUId := common.MakeVarUUId(proposal.VarId())
		copy(instIdSlice[common.KeyLen+4:], vUUId[:])
		promise := promises.At(idx)
		promise.SetVarId(vUUId[:])
		am.ensureInstance(txnId, &instId, vUUId).OneATxnVotesReceived(&proposal, &promise)
	}

	NewOneShotSender(server.SegToBytes(replySeg), am.ConnectionManager, sender)
}
Beispiel #24
0
func newTwoBTxnVotesSender(outcome *msgs.Outcome, txnId *common.TxnId, submitter common.RMId, recipients ...common.RMId) *twoBTxnVotesSender {
	submitterSeg := capn.NewBuffer(nil)
	submitterMsg := msgs.NewRootMessage(submitterSeg)
	submitterMsg.SetSubmissionOutcome(*outcome)

	if outcome.Which() == msgs.OUTCOME_ABORT {
		abort := outcome.Abort()
		abort.SetResubmit() // nuke out the updates as proposers don't need them.
	}

	seg := capn.NewBuffer(nil)
	msg := msgs.NewRootMessage(seg)
	twoB := msgs.NewTwoBTxnVotes(seg)
	msg.SetTwoBTxnVotes(twoB)
	twoB.SetOutcome(*outcome)

	server.Log(txnId, "Sending 2B to", recipients)

	return &twoBTxnVotesSender{
		msg:          server.SegToBytes(seg),
		recipients:   recipients,
		submitterMsg: server.SegToBytes(submitterSeg),
		submitter:    submitter,
	}
}
Beispiel #25
0
func (subs serverConnSubscribers) AddSubscriber(ob paxos.ServerConnectionSubscriber) {
	if _, found := subs.subscribers[ob]; found {
		server.Log(ob, "CM found duplicate add serverConn subscriber")
	} else {
		subs.subscribers[ob] = server.EmptyStructVal
		ob.ConnectedRMs(subs.cloneRMToServer())
	}
}
Beispiel #26
0
func (fo *frameOpen) WriteLearnt(action *localAction) bool {
	txn := action.Txn
	if fo.currentState != fo {
		panic(fmt.Sprintf("%v WriteLearnt called for %v with frame in state %v", fo.v, txn, fo.currentState))
	}
	actClockElem := action.outcomeClock.Clock[*fo.v.UUId]
	reqClockElem := fo.frameTxnClock.Clock[*fo.v.UUId]
	if actClockElem < reqClockElem || (actClockElem == reqClockElem && action.Id.Compare(fo.frameTxnId) == common.LT) {
		server.Log(fo.frame, "WriteLearnt", txn, "ignored, too old")
		return false
	}
	if action.Id.Compare(fo.frameTxnId) == common.EQ {
		server.Log(fo.frame, "WriteLearnt", txn, "is duplicate of current frame")
		return false
	}
	if actClockElem == reqClockElem {
		// ok, so ourself and this txn were actually siblings, but we
		// created this frame before we knew that. By definition, there
		// cannot be any committed reads of us.
		if fo.reads.Len() > fo.uncommittedReads {
			panic(fmt.Sprintf("%v (%v) Found committed reads where there should have been none for action %v (%v)", fo.frame, fo.frameTxnClock, action, action.outcomeClock))
		}
	}
	if fo.writes.Get(action) == nil {
		fo.writes.Insert(action, committed)
		action.frame = fo.frame
		fo.positionsFound = fo.positionsFound || (fo.frameTxnActions == nil && action.createPositions != nil)
		// See corresponding comment in ReadLearnt
		clock := fo.writeVoteClock
		if clock == nil {
			clock = fo.readVoteClock
		}
		for k, v := range clock.Clock {
			if _, found := action.outcomeClock.Clock[k]; !found {
				fo.mask.SetVarIdMax(k, v)
			}
		}
		server.Log(fo.frame, "WriteLearnt", txn, "uncommittedReads:", fo.uncommittedReads, "uncommittedWrites:", fo.uncommittedWrites)
		if fo.uncommittedReads == 0 {
			fo.maybeCreateChild()
		}
		return true
	} else {
		panic(fmt.Sprintf("%v WriteLearnt called for known txn %v", fo.frame, txn))
	}
}
Beispiel #27
0
func (lc *LocalConnection) SubmissionOutcomeReceived(sender common.RMId, txnId *common.TxnId, outcome *msgs.Outcome) {
	server.Log("LC Received submission outcome for", txnId)
	lc.enqueueQuery(localConnectionMsgOutcomeReceived{
		sender:  sender,
		txnId:   txnId,
		outcome: outcome,
	})
}
Beispiel #28
0
func (sts *SimpleTxnSubmitter) SubmitTransaction(txnCap *msgs.Txn, activeRMs []common.RMId, continuation TxnCompletionConsumer, delay time.Duration) {
	seg := capn.NewBuffer(nil)
	msg := msgs.NewRootMessage(seg)
	msg.SetTxnSubmission(*txnCap)

	txnId := common.MakeTxnId(txnCap.Id())
	server.Log(txnId, "Submitting txn")
	txnSender := paxos.NewRepeatingSender(server.SegToBytes(seg), activeRMs...)
	var removeSenderCh chan server.EmptyStruct
	if delay == 0 {
		sts.connPub.AddServerConnectionSubscriber(txnSender)
	} else {
		removeSenderCh = make(chan server.EmptyStruct)
		go func() {
			// fmt.Printf("%v ", delay)
			time.Sleep(delay)
			sts.connPub.AddServerConnectionSubscriber(txnSender)
			<-removeSenderCh
			sts.connPub.RemoveServerConnectionSubscriber(txnSender)
		}()
	}
	acceptors := paxos.GetAcceptorsFromTxn(txnCap)

	shutdownFun := func(shutdown bool) {
		delete(sts.outcomeConsumers, *txnId)
		// fmt.Printf("sts%v ", len(sts.outcomeConsumers))
		if delay == 0 {
			sts.connPub.RemoveServerConnectionSubscriber(txnSender)
		} else {
			close(removeSenderCh)
		}
		// OSS is safe here - see above.
		paxos.NewOneShotSender(paxos.MakeTxnSubmissionCompleteMsg(txnId), sts.connPub, acceptors...)
		if shutdown {
			if txnCap.Retry() {
				// If this msg doesn't make it then proposers should
				// observe our death and tidy up anyway. If it's just this
				// connection shutting down then there should be no
				// problem with these msgs getting to the propposers.
				paxos.NewOneShotSender(paxos.MakeTxnSubmissionAbortMsg(txnId), sts.connPub, activeRMs...)
			}
			continuation(txnId, nil, nil)
		}
	}
	shutdownFunPtr := &shutdownFun
	sts.onShutdown[shutdownFunPtr] = server.EmptyStructVal

	outcomeAccumulator := paxos.NewOutcomeAccumulator(int(txnCap.FInc()), acceptors)
	consumer := func(sender common.RMId, txnId *common.TxnId, outcome *msgs.Outcome) {
		if outcome, _ = outcomeAccumulator.BallotOutcomeReceived(sender, outcome); outcome != nil {
			delete(sts.onShutdown, shutdownFunPtr)
			shutdownFun(false)
			continuation(txnId, outcome, nil)
		}
	}
	sts.outcomeConsumers[*txnId] = consumer
	// fmt.Printf("sts%v ", len(sts.outcomeConsumers))
}
Beispiel #29
0
func (pab *proposerAwaitBallots) Abort() {
	if pab.currentState == pab && !pab.allAcceptorsAgreed {
		server.Log(pab.txnId, "Proposer Aborting")
		txnCap := pab.txn.TxnCap
		alloc := AllocForRMId(txnCap, pab.proposerManager.RMId)
		ballots := MakeAbortBallots(txnCap, alloc)
		pab.TxnBallotsComplete(ballots...)
	}
}
Beispiel #30
0
func (fc *frameClosed) DescendentOnDisk() bool {
	if !fc.onDisk {
		server.Log(fc.frame, "DescendentOnDisk")
		fc.onDisk = true
		fc.MaybeCompleteTxns()
		return true
	}
	return false
}