Beispiel #1
0
func NewProposal(pm *ProposerManager, txnId *common.TxnId, txn *msgs.Txn, fInc int, ballots []*eng.Ballot, instanceRMId common.RMId, acceptors []common.RMId, skipPhase1 bool) *proposal {
	allocs := txn.Allocations()
	activeRMIds := make(map[common.RMId]uint32, allocs.Len())
	for idx, l := 0, allocs.Len(); idx < l; idx++ {
		alloc := allocs.At(idx)
		bootCount := alloc.Active()
		if bootCount == 0 {
			break
		}
		rmId := common.RMId(alloc.RmId())
		activeRMIds[rmId] = bootCount
	}
	p := &proposal{
		proposerManager:    pm,
		instanceRMId:       instanceRMId,
		acceptors:          acceptors,
		activeRMIds:        activeRMIds,
		fInc:               fInc,
		txn:                txn,
		txnId:              txnId,
		submitter:          common.RMId(txn.Submitter()),
		submitterBootCount: txn.SubmitterBootCount(),
		skipPhase1:         skipPhase1,
		instances:          make(map[common.VarUUId]*proposalInstance, len(ballots)),
		pending:            make([]*proposalInstance, 0, len(ballots)),
		finished:           false,
	}
	for _, ballot := range ballots {
		pi := newProposalInstance(p, ballot)
		p.instances[*ballot.VarUUId] = pi
		pi.init()
		pi.start()
	}
	return p
}
Beispiel #2
0
func (aalc *acceptorAwaitLocallyComplete) start() {
	if aalc.twoBSender != nil {
		aalc.acceptorManager.ConnectionManager.RemoveSenderSync(aalc.twoBSender)
		aalc.twoBSender = nil
	}

	// If our outcome changes, it may look here like we're throwing
	// away TLCs received from proposers/learners. However,
	// proposers/learners wait until all acceptors have given the same
	// answer before issuing any TLCs, so if we are here, we cannot
	// have received any TLCs from anyone... unless we're a retry!  If
	// the txn is a retry then proposers start as soon as they have any
	// ballot, and the ballot accumulator will return a result
	// immediately. However, other ballots can continue to arrive even
	// after a proposer has received F+1 equal outcomes from
	// acceptors. In that case, the acceptor can be here, waiting for
	// TLCs, and can even have received some TLCs when it now receives
	// another ballot. It cannot ignore that ballot because to do so
	// opens the possibility that the acceptors do not arrive at the
	// same outcome and the txn will block.

	allocs := aalc.ballotAccumulator.Txn.Allocations()
	aalc.pendingTLC = make(map[common.RMId]server.EmptyStruct, allocs.Len())
	aalc.tgcRecipients = make([]common.RMId, 0, allocs.Len())
	twoBRecipients := make([]common.RMId, 0, allocs.Len())
	aborted := (*msgs.Outcome)(aalc.outcomeOnDisk).Which() == msgs.OUTCOME_ABORT
	for idx, l := 0, allocs.Len(); idx < l; idx++ {
		alloc := allocs.At(idx)
		active := alloc.Active() != 0
		rmId := common.RMId(alloc.RmId())
		if aalc.sendToAllOnDisk || active {
			twoBRecipients = append(twoBRecipients, rmId)
			if _, found := aalc.tlcsReceived[rmId]; !found {
				aalc.pendingTLC[rmId] = server.EmptyStructVal
			}
		}
		if !aborted || active {
			aalc.tgcRecipients = append(aalc.tgcRecipients, rmId)
		}
	}

	if len(aalc.pendingTLC) == 0 && aalc.tscReceived {
		aalc.maybeDelete()

	} else {
		server.Log(aalc.txnId, "Adding sender for 2B")
		submitter := common.RMId(aalc.ballotAccumulator.Txn.Submitter())
		aalc.twoBSender = newTwoBTxnVotesSender((*msgs.Outcome)(aalc.outcomeOnDisk), aalc.txnId, submitter, twoBRecipients...)
		aalc.acceptorManager.ConnectionManager.AddSender(aalc.twoBSender)
	}
}
Beispiel #3
0
func (s *server) ensureRMId() error {
	path := s.dataDir + "/rmid"
	if b, err := ioutil.ReadFile(path); err == nil {
		s.rmId = common.RMId(binary.BigEndian.Uint32(b))
		return nil

	} else {
		rng := rand.New(rand.NewSource(time.Now().UnixNano()))
		for s.rmId == common.RMIdEmpty {
			s.rmId = common.RMId(rng.Uint32())
		}
		b := make([]byte, 4)
		binary.BigEndian.PutUint32(b, uint32(s.rmId))
		return ioutil.WriteFile(path, b, 0400)
	}
}
Beispiel #4
0
func (cash *connectionAwaitServerHandshake) start() (bool, error) {
	topology := cash.connectionManager.Topology()
	helloFromServer := cash.makeHelloFromServer(topology)
	if err := cash.send(server.SegToBytes(helloFromServer)); err != nil {
		return cash.connectionAwaitHandshake.maybeRestartConnection(err)
	}

	if seg, err := cash.readAndDecryptOne(); err == nil {
		hello := msgs.ReadRootHelloFromServer(seg)
		if verified, remoteTopology := cash.verifyTopology(topology, &hello); verified {
			cash.Lock()
			cash.established = true
			cash.remoteHost = hello.LocalHost()
			ns := hello.Namespace()
			cash.remoteBootCount = binary.BigEndian.Uint32(ns[4:8])
			cash.remoteRMId = common.RMId(binary.BigEndian.Uint32(ns[8:12]))
			cash.combinedTieBreak = cash.combinedTieBreak ^ hello.TieBreak()
			cash.remoteTopology = remoteTopology
			cash.Unlock()
			cash.nextState(nil)
			return false, nil
		} else {
			return cash.connectionAwaitHandshake.maybeRestartConnection(fmt.Errorf("Unequal remote topology"))
		}
	} else {
		return cash.connectionAwaitHandshake.maybeRestartConnection(err)
	}
}
Beispiel #5
0
func (am *AcceptorManager) OneATxnVotesReceived(sender common.RMId, txnId *common.TxnId, oneATxnVotes *msgs.OneATxnVotes) {
	instanceRMId := common.RMId(oneATxnVotes.RmId())
	server.Log(txnId, "1A received from", sender, "; instance:", instanceRMId)
	instId := instanceId([instanceIdLen]byte{})
	instIdSlice := instId[:]
	copy(instIdSlice, txnId[:])
	binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], uint32(instanceRMId))

	replySeg := capn.NewBuffer(nil)
	msg := msgs.NewRootMessage(replySeg)
	oneBTxnVotes := msgs.NewOneBTxnVotes(replySeg)
	msg.SetOneBTxnVotes(oneBTxnVotes)
	oneBTxnVotes.SetRmId(oneATxnVotes.RmId())
	oneBTxnVotes.SetTxnId(oneATxnVotes.TxnId())

	proposals := oneATxnVotes.Proposals()
	promises := msgs.NewTxnVotePromiseList(replySeg, proposals.Len())
	oneBTxnVotes.SetPromises(promises)
	for idx, l := 0, proposals.Len(); idx < l; idx++ {
		proposal := proposals.At(idx)
		vUUId := common.MakeVarUUId(proposal.VarId())
		copy(instIdSlice[common.KeyLen+4:], vUUId[:])
		promise := promises.At(idx)
		promise.SetVarId(vUUId[:])
		am.ensureInstance(txnId, &instId, vUUId).OneATxnVotesReceived(&proposal, &promise)
	}

	NewOneShotSender(server.SegToBytes(replySeg), am.ConnectionManager, sender)
}
Beispiel #6
0
func TopologyFromCap(txnId *common.TxnId, root *msgs.VarIdPos, topology *msgs.Topology) *Topology {
	t := &Topology{Configuration: &configuration.Configuration{}}
	t.ClusterId = topology.ClusterId()
	t.Version = topology.Version()
	t.Hosts = topology.Hosts().ToArray()
	t.F = topology.F()
	t.FInc = t.F + 1
	t.TwoFInc = (2 * uint16(t.F)) + 1
	t.MaxRMCount = topology.MaxRMCount()
	t.AsyncFlush = topology.AsyncFlush()
	rms := topology.Rms()
	t.AllRMs = make([]common.RMId, rms.Len())
	for idx := range t.AllRMs {
		t.AllRMs[idx] = common.RMId(rms.At(idx))
	}
	t.DBVersion = txnId
	if root != nil && len(root.Id()) == common.KeyLen {
		t.RootVarUUId = common.MakeVarUUId(root.Id())
		pos := common.Positions(root.Positions())
		t.RootPositions = &pos
	}
	accounts := topology.Accounts()
	t.Accounts = make(map[string]string, accounts.Len())
	for idx, l := 0, accounts.Len(); idx < l; idx++ {
		account := accounts.At(idx)
		t.Accounts[account.Username()] = account.Password()
	}
	return t
}
Beispiel #7
0
func ProposerFromData(pm *ProposerManager, txnId *common.TxnId, data []byte) *Proposer {
	seg, _, err := capn.ReadFromMemoryZeroCopy(data)
	if err != nil {
		log.Println("Unable to decode proposer state", data)
	}
	// If we were on disk, then that means we must be locally complete
	// and just need to send out TLCs.
	state := msgs.ReadRootProposerState(seg)
	acceptorsCap := state.Acceptors()
	acceptors := make([]common.RMId, acceptorsCap.Len())
	for idx := range acceptors {
		acceptors[idx] = common.RMId(acceptorsCap.At(idx))
	}
	// We were on disk. Thus we received outcomes from all
	// acceptors. So we don't need to worry about the outcome
	// accumulator's fInc, hence just use -1 here.
	p := &Proposer{
		proposerManager: pm,
		mode:            proposerTLCSender,
		txnId:           txnId,
		acceptors:       acceptors,
		fInc:            -1,
	}
	p.init()
	p.allAcceptorsAgreed = true
	return p
}
Beispiel #8
0
func BallotAccumulatorFromData(txnId *common.TxnId, txn *msgs.Txn, outcome *outcomeEqualId, instances *msgs.InstancesForVar_List) *BallotAccumulator {
	ba := NewBallotAccumulator(txnId, txn)
	ba.outcome = outcome

	for idx, l := 0, instances.Len(); idx < l; idx++ {
		// All instances that went to disk must be complete. But in the
		// case of a retry, not all instances must be complete before
		// going to disk.
		ba.incompleteVars--
		instancesForVar := instances.At(idx)
		acceptedInstances := instancesForVar.Instances()
		vUUId := common.MakeVarUUId(instancesForVar.VarId())
		vBallot := ba.vUUIdToBallots[*vUUId]
		rmBals := rmBallots(make([]*rmBallot, acceptedInstances.Len()))
		vBallot.rmToBallot = rmBals
		for idy, m := 0, acceptedInstances.Len(); idy < m; idy++ {
			acceptedInstance := acceptedInstances.At(idy)
			ballot := acceptedInstance.Ballot()
			rmBal := &rmBallot{
				instanceRMId: common.RMId(acceptedInstance.RmId()),
				ballot:       eng.BallotFromCap(&ballot),
				roundNumber:  paxosNumber(acceptedInstance.RoundNumber()),
			}
			rmBals[idy] = rmBal
		}
		result := instancesForVar.Result()
		vBallot.result = eng.BallotFromCap(&result)
	}

	return ba
}
Beispiel #9
0
func TxnFromCap(exe *dispatcher.Executor, vd *VarDispatcher, stateChange TxnLocalStateChange, ourRMId common.RMId, txnCap *msgs.Txn) *Txn {
	txnId := common.MakeTxnId(txnCap.Id())
	actions := txnCap.Actions()
	txn := &Txn{
		Id:          txnId,
		Retry:       txnCap.Retry(),
		writes:      make([]*common.VarUUId, 0, actions.Len()),
		TxnCap:      txnCap,
		exe:         exe,
		vd:          vd,
		stateChange: stateChange,
	}

	allocations := txnCap.Allocations()
	for idx, l := 0, allocations.Len(); idx < l; idx++ {
		alloc := allocations.At(idx)
		rmId := common.RMId(alloc.RmId())
		if ourRMId == rmId {
			txn.populate(alloc.ActionIndices(), actions)
			break
		}
	}

	return txn
}
Beispiel #10
0
func conditionFromCap(condCap *msgs.Condition) Cond {
	switch condCap.Which() {
	case msgs.CONDITION_AND:
		condAnd := condCap.And()
		left := condAnd.Left()
		right := condAnd.Right()
		return &Conjunction{
			Left:  conditionFromCap(&left),
			Right: conditionFromCap(&right),
		}
	case msgs.CONDITION_OR:
		condOr := condCap.Or()
		left := condOr.Left()
		right := condOr.Right()
		return &Disjunction{
			Left:  conditionFromCap(&left),
			Right: conditionFromCap(&right),
		}
	case msgs.CONDITION_GENERATOR:
		condGen := condCap.Generator()
		return &Generator{
			RMId:     common.RMId(condGen.RmId()),
			UseNext:  condGen.UseNext(),
			Includes: condGen.Includes(),
		}
	default:
		panic(fmt.Sprintf("Unexpected Condition type (%v)", condCap.Which()))
	}
}
Beispiel #11
0
func ConditionsFromCap(condsCap *msgs.ConditionPair_List) Conds {
	condSups := make(map[common.RMId]*CondSuppliers, condsCap.Len())
	for idx, l := 0, condsCap.Len(); idx < l; idx++ {
		pairCap := condsCap.At(idx)
		condCap := pairCap.Condition()
		suppliersCap := pairCap.Suppliers()
		suppliers := make([]common.RMId, suppliersCap.Len())
		for idx := range suppliers {
			suppliers[idx] = common.RMId(suppliersCap.At(idx))
		}
		condSups[common.RMId(pairCap.RmId())] = &CondSuppliers{
			Cond:      conditionFromCap(&condCap),
			Suppliers: suppliers,
		}
	}
	return condSups
}
Beispiel #12
0
func (s *store) LoadRMId() error {
	rmIdBytes, err := ioutil.ReadFile(s.dir + "/rmid")
	if err != nil {
		return err
	}
	s.rmId = common.RMId(binary.BigEndian.Uint32(rmIdBytes))
	return nil
}
Beispiel #13
0
func (pab *proposerAwaitBallots) start() {
	pab.txn.Start(true)
	pab.submitter = common.RMId(pab.txn.TxnCap.Submitter())
	pab.submitterBootCount = pab.txn.TxnCap.SubmitterBootCount()
	if pab.txn.Retry {
		pab.proposerManager.ConnectionManager.AddSender(pab)
	}
}
Beispiel #14
0
func AllocForRMId(txn *msgs.Txn, rmId common.RMId) *msgs.Allocation {
	allocs := txn.Allocations()
	for idx, l := 0, allocs.Len(); idx < l; idx++ {
		alloc := allocs.At(idx)
		if common.RMId(alloc.RmId()) == rmId {
			return &alloc
		}
	}
	return nil
}
Beispiel #15
0
func GetAcceptorsFromTxn(txnCap *msgs.Txn) common.RMIds {
	fInc := int(txnCap.FInc())
	twoFInc := fInc + fInc - 1
	acceptors := make([]common.RMId, twoFInc)
	allocations := txnCap.Allocations()
	idx := 0
	for l := allocations.Len(); idx < l && idx < twoFInc; idx++ {
		alloc := allocations.At(idx)
		acceptors[idx] = common.RMId(alloc.RmId())
	}
	// Danger! For the initial topology txns, there are _not_ twoFInc acceptors
	return acceptors[:idx]
}
Beispiel #16
0
// from network
func (pm *ProposerManager) OneBTxnVotesReceived(sender common.RMId, txnId *common.TxnId, oneBTxnVotes *msgs.OneBTxnVotes) {
	server.Log(txnId, "1B received from", sender, "; instance:", common.RMId(oneBTxnVotes.RmId()))
	instId := instanceIdPrefix([instanceIdPrefixLen]byte{})
	instIdSlice := instId[:]
	copy(instIdSlice, txnId[:])
	binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], oneBTxnVotes.RmId())
	if prop, found := pm.proposals[instId]; found {
		prop.OneBTxnVotesReceived(sender, oneBTxnVotes)
	}
	// If not found, it should be safe to ignore - it's just a delayed
	// 1B that we clearly don't need to complete the paxos instances
	// anyway.
}
Beispiel #17
0
func (am *AcceptorManager) TwoATxnVotesReceived(sender common.RMId, txnId *common.TxnId, twoATxnVotes *msgs.TwoATxnVotes) {
	instanceRMId := common.RMId(twoATxnVotes.RmId())
	server.Log(txnId, "2A received from", sender, "; instance:", instanceRMId)
	instId := instanceId([instanceIdLen]byte{})
	instIdSlice := instId[:]
	copy(instIdSlice, txnId[:])
	binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], uint32(instanceRMId))

	txnCap := twoATxnVotes.Txn()
	a := am.ensureAcceptor(txnId, &txnCap)
	requests := twoATxnVotes.AcceptRequests()
	failureInstances := make([]*instance, 0, requests.Len())
	failureRequests := make([]*msgs.TxnVoteAcceptRequest, 0, requests.Len())

	for idx, l := 0, requests.Len(); idx < l; idx++ {
		request := requests.At(idx)
		vUUId := common.MakeVarUUId(request.Ballot().VarId())
		copy(instIdSlice[common.KeyLen+4:], vUUId[:])
		inst := am.ensureInstance(txnId, &instId, vUUId)
		accepted, rejected := inst.TwoATxnVotesReceived(&request)
		if accepted {
			a.BallotAccepted(instanceRMId, inst, vUUId, &txnCap)
		} else if rejected {
			failureInstances = append(failureInstances, inst)
			failureRequests = append(failureRequests, &request)
		}
	}

	if len(failureInstances) != 0 {
		replySeg := capn.NewBuffer(nil)
		msg := msgs.NewRootMessage(replySeg)
		twoBTxnVotes := msgs.NewTwoBTxnVotes(replySeg)
		msg.SetTwoBTxnVotes(twoBTxnVotes)
		twoBTxnVotes.SetFailures()
		failuresCap := twoBTxnVotes.Failures()
		failuresCap.SetTxnId(txnId[:])
		failuresCap.SetRmId(uint32(instanceRMId))
		nacks := msgs.NewTxnVoteTwoBFailureList(replySeg, len(failureInstances))
		failuresCap.SetNacks(nacks)
		for idx, inst := range failureInstances {
			failure := nacks.At(idx)
			failure.SetVarId(inst.vUUId[:])
			failure.SetRoundNumber(failureRequests[idx].RoundNumber())
			failure.SetRoundNumberTooLow(uint32(inst.promiseNum >> 32))
		}
		server.Log(txnId, "Sending 2B failures to", sender, "; instance:", instanceRMId)
		// The proposal senders are repeating, so this use of OSS is fine.
		NewOneShotSender(server.SegToBytes(replySeg), am, sender)
	}
}
Beispiel #18
0
func (id *outcomeEqualId) String() string {
	idList := (*msgs.Outcome)(id).Id()
	buf := "OutcomeId["
	for idx, l := 0, idList.Len(); idx < l; idx++ {
		outId := idList.At(idx)
		buf += fmt.Sprintf("%v{", common.MakeVarUUId(outId.VarId()))
		instList := outId.AcceptedInstances()
		for idy, m := 0, instList.Len(); idy < m; idy++ {
			inst := instList.At(idy)
			buf += fmt.Sprintf("(instance %v: vote %v)", common.RMId(inst.RmId()), inst.Vote())
		}
		buf += "} "
	}
	buf += "]"
	return buf
}
Beispiel #19
0
func (cash *connectionAwaitServerHandshake) start() (bool, error) {
	seg := capn.NewBuffer(nil)
	hello := msgs.NewRootHelloFromClient(seg)
	hello.SetUsername(cash.username)
	hello.SetPassword(cash.password)
	cash.username = ""
	cash.password = nil

	buf := new(bytes.Buffer)
	if _, err := seg.WriteTo(buf); err != nil {
		return false, err
	}
	if err := cash.send(buf.Bytes()); err != nil {
		return false, err
	}

	if seg, err := cash.readAndDecryptOne(); err == nil {
		server := msgs.ReadRootHelloFromServer(seg)
		root := server.Root()
		if len(root.Id()) != common.KeyLen {
			return false, fmt.Errorf("Root object VarUUId is of wrong length!")
		}
		cash.lock.Lock()
		cash.rootVUUId = common.MakeVarUUId(root.Id())
		cash.namespace = make([]byte, common.KeyLen)
		copy(cash.namespace[8:], server.Namespace())
		cash.serverHost = server.LocalHost()
		cash.rmId = common.RMId(binary.BigEndian.Uint32(cash.namespace[16:20]))
		cash.lock.Unlock()
		cash.nextState()
		return false, nil

	} else {
		return false, err
	}
}
Beispiel #20
0
func (rn paxosNumber) String() string {
	top := uint32(rn >> 32)
	rmId := common.RMId(uint32(rn))
	return fmt.Sprintf("%v|%v", top, rmId)
}
Beispiel #21
0
func (cash *connectionAwaitServerHandshake) start() (bool, error) {
	// TLS seems to require us to pick one end as the client and one
	// end as the server even though in a server-server connection we
	// really don't care which is which.
	config := cash.commonTLSConfig()
	if cash.remoteHost == "" {
		// We came from the listener, so we're going to act as the server.
		config.ClientAuth = tls.RequireAndVerifyClientCert
		cash.socket = tls.Server(cash.socket, config)

	} else {
		config.InsecureSkipVerify = true
		socket := tls.Client(cash.socket, config)
		cash.socket = socket

		// This is nuts: as a server, we can demand the client cert and
		// verify that without any concept of a client name. But as the
		// client, if we don't have a server name, then we have to do
		// the verification ourself. Why is TLS asymmetric?!

		if err := socket.Handshake(); err != nil {
			return cash.connectionAwaitHandshake.maybeRestartConnection(err)
		}

		opts := x509.VerifyOptions{
			Roots:         config.RootCAs,
			DNSName:       "", // disable server name checking
			Intermediates: x509.NewCertPool(),
		}
		certs := socket.ConnectionState().PeerCertificates
		for i, cert := range certs {
			if i == 0 {
				continue
			}
			opts.Intermediates.AddCert(cert)
		}
		if _, err := certs[0].Verify(opts); err != nil {
			return cash.connectionAwaitHandshake.maybeRestartConnection(err)
		}
	}

	helloFromServer := cash.makeHelloServerFromServer(cash.topology)
	if err := cash.send(server.SegToBytes(helloFromServer)); err != nil {
		return cash.connectionAwaitHandshake.maybeRestartConnection(err)
	}

	if seg, err := cash.readOne(); err == nil {
		hello := msgs.ReadRootHelloServerFromServer(seg)
		if cash.verifyTopology(cash.topology, &hello) {
			cash.remoteHost = hello.LocalHost()
			cash.remoteRMId = common.RMId(hello.RmId())

			if _, found := cash.topology.RMsRemoved()[cash.remoteRMId]; found {
				return false, cash.serverError(
					fmt.Errorf("%v has been removed from topology and may not rejoin.", cash.remoteRMId))
			}

			rootId := hello.RootId()
			if len(rootId) == common.KeyLen {
				cash.remoteRootId = common.MakeVarUUId(rootId)
			}
			cash.remoteBootCount = hello.BootCount()
			cash.combinedTieBreak = cash.combinedTieBreak ^ hello.TieBreak()
			cash.nextState(nil)
			return false, nil
		} else {
			return cash.connectionAwaitHandshake.maybeRestartConnection(fmt.Errorf("Unequal remote topology"))
		}
	} else {
		return cash.connectionAwaitHandshake.maybeRestartConnection(err)
	}
}
Beispiel #22
0
func (instId instanceId) String() string {
	txnId := common.MakeTxnId(instId[0:common.KeyLen])
	rmId := common.RMId(binary.BigEndian.Uint32(instId[common.KeyLen:]))
	vUUId := common.MakeVarUUId(instId[common.KeyLen+4:])
	return fmt.Sprintf("PaxosInstanceId:%v:%v:%v", txnId, rmId, vUUId)
}
Beispiel #23
0
func (s *proposalSender) ConnectionLost(lost common.RMId, conns map[common.RMId]Connection) {
	if !s.proposeAborts {
		return
	}

	if lost == s.proposal.submitter {
		// There's a chance that only we received this txn, so we need
		// to abort for all other active RMs.
		s.proposal.proposerManager.Exe.Enqueue(func() {
			// Only start a new proposal if we're not finished - there's
			// a race otherwise: the final 2b could be on its way to us
			// at the same time as we notice the failure.
			if s.proposal.finished {
				return
			}
			allocs := s.proposal.txn.Allocations()
			for idx, l := 0, allocs.Len(); idx < l; idx++ {
				alloc := allocs.At(idx)
				rmId := common.RMId(alloc.RmId())
				if alloc.Active() == 0 {
					break
				} else if rmId == s.proposal.proposerManager.RMId {
					continue
				} else {
					found := false
					// slightly horrible N^2, but not on critical path. Ok for now.
					for _, alreadyAborted := range s.proposal.abortInstances {
						if found = alreadyAborted == rmId; found {
							break
						}
					}
					if found {
						break
					}
					ballots := MakeAbortBallots(s.proposal.txn, &alloc)
					server.Log(s.proposal.txnId, "Trying to abort", rmId, "due to lost submitter", lost, "Found actions:", len(ballots))
					s.proposal.abortInstances = append(s.proposal.abortInstances, rmId)
					s.proposal.proposerManager.NewPaxosProposals(
						s.txnId, s.txn, s.fInc, ballots, s.proposal.acceptors, rmId, false)
				}
			}
		})
		return
	}

	alloc := AllocForRMId(s.proposal.txn, lost)
	if alloc == nil || alloc.Active() == 0 {
		return
	}
	s.proposal.proposerManager.Exe.Enqueue(func() {
		if s.proposal.finished { // see above equiv
			return
		}
		for _, alreadyAborted := range s.proposal.abortInstances {
			if alreadyAborted == lost {
				return // already done!
			}
		}
		ballots := MakeAbortBallots(s.proposal.txn, alloc)
		server.Log(s.proposal.txnId, "Trying to abort for", lost, "Found actions:", len(ballots))
		s.proposal.abortInstances = append(s.proposal.abortInstances, lost)
		s.proposal.proposerManager.NewPaxosProposals(
			s.txnId, s.txn, s.fInc, ballots, s.proposal.acceptors, lost, false)
	})
}
Beispiel #24
0
func TestMain(m *testing.M) {
	hashcodes = []common.RMId{
		common.RMId(1), common.RMId(2), common.RMId(3), common.RMId(4), common.RMId(5), common.RMId(6), common.RMId(7), common.RMId(8),
		common.RMId(9), common.RMId(10), common.RMId(11), common.RMId(12), common.RMId(13), common.RMId(14), common.RMId(15), common.RMId(16),
		common.RMId(17), common.RMId(18), common.RMId(19), common.RMId(20), common.RMId(21), common.RMId(22), common.RMId(23), common.RMId(24),
		common.RMId(25), common.RMId(26), common.RMId(27), common.RMId(28), common.RMId(29), common.RMId(30), common.RMId(31), common.RMId(32),
	}

	randomPositions = make([][]uint8, positionsCount)
	for idx := range randomPositions {
		positions := make([]uint8, len(hashcodes))
		randomPositions[idx] = positions
		for idy := range positions {
			if idy == 0 {
				positions[idy] = uint8(idy)
			} else {
				positions[idy] = uint8(rand.Intn(idy))
			}
		}
	}
	os.Exit(m.Run())
}
Beispiel #25
0
func ConfigurationFromCap(config *msgs.Configuration) *Configuration {
	c := &Configuration{
		ClusterId:  config.ClusterId(),
		Version:    config.Version(),
		Hosts:      config.Hosts().ToArray(),
		F:          config.F(),
		MaxRMCount: config.MaxRMCount(),
		NoSync:     config.NoSync(),
	}

	rms := config.Rms()
	c.rms = make([]common.RMId, rms.Len())
	for idx := range c.rms {
		c.rms[idx] = common.RMId(rms.At(idx))
	}

	rmsRemoved := config.RmsRemoved()
	c.rmsRemoved = make(map[common.RMId]server.EmptyStruct, rmsRemoved.Len())
	for idx, l := 0, rmsRemoved.Len(); idx < l; idx++ {
		c.rmsRemoved[common.RMId(rmsRemoved.At(idx))] = server.EmptyStructVal
	}

	fingerprints := config.Fingerprints()
	fingerprintsMap := make(map[[sha256.Size]byte]server.EmptyStruct, fingerprints.Len())
	for idx, l := 0, fingerprints.Len(); idx < l; idx++ {
		ary := [sha256.Size]byte{}
		copy(ary[:], fingerprints.At(idx))
		fingerprintsMap[ary] = server.EmptyStructVal
	}
	c.fingerprints = fingerprintsMap

	if config.Which() == msgs.CONFIGURATION_TRANSITIONINGTO {
		next := config.TransitioningTo()
		nextConfig := next.Configuration()

		newRMIdsCap := next.NewRMIds()
		newRMIds := make([]common.RMId, newRMIdsCap.Len())
		for idx := range newRMIds {
			newRMIds[idx] = common.RMId(newRMIdsCap.At(idx))
		}

		survivingRMIdsCap := next.SurvivingRMIds()
		survivingRMIds := make([]common.RMId, survivingRMIdsCap.Len())
		for idx := range survivingRMIds {
			survivingRMIds[idx] = common.RMId(survivingRMIdsCap.At(idx))
		}

		lostRMIdsCap := next.LostRMIds()
		lostRMIds := make([]common.RMId, lostRMIdsCap.Len())
		for idx := range lostRMIds {
			lostRMIds[idx] = common.RMId(lostRMIdsCap.At(idx))
		}

		barrierReached1Cap := next.BarrierReached1()
		barrierReached1 := make([]common.RMId, barrierReached1Cap.Len())
		for idx := range barrierReached1 {
			barrierReached1[idx] = common.RMId(barrierReached1Cap.At(idx))
		}

		barrierReached2Cap := next.BarrierReached2()
		barrierReached2 := make([]common.RMId, barrierReached2Cap.Len())
		for idx := range barrierReached2 {
			barrierReached2[idx] = common.RMId(barrierReached2Cap.At(idx))
		}

		pending := next.Pending()

		c.nextConfiguration = &NextConfiguration{
			Configuration:   ConfigurationFromCap(&nextConfig),
			AllHosts:        next.AllHosts().ToArray(),
			NewRMIds:        newRMIds,
			SurvivingRMIds:  survivingRMIds,
			LostRMIds:       lostRMIds,
			InstalledOnNew:  next.InstalledOnNew(),
			BarrierReached1: barrierReached1,
			BarrierReached2: barrierReached2,
			Pending:         ConditionsFromCap(&pending),
		}
	}

	return c
}
Beispiel #26
0
// from network
func (pm *ProposerManager) TwoBTxnVotesReceived(sender common.RMId, txnId *common.TxnId, twoBTxnVotes *msgs.TwoBTxnVotes) {
	instId := instanceIdPrefix([instanceIdPrefixLen]byte{})
	instIdSlice := instId[:]
	copy(instIdSlice, txnId[:])

	switch twoBTxnVotes.Which() {
	case msgs.TWOBTXNVOTES_FAILURES:
		failures := twoBTxnVotes.Failures()
		server.Log(txnId, "2B received from", sender, "; instance:", common.RMId(failures.RmId()))
		binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], failures.RmId())
		if prop, found := pm.proposals[instId]; found {
			prop.TwoBFailuresReceived(sender, &failures)
		}

	case msgs.TWOBTXNVOTES_OUTCOME:
		binary.BigEndian.PutUint32(instIdSlice[common.KeyLen:], uint32(pm.RMId))
		outcome := twoBTxnVotes.Outcome()

		if proposer, found := pm.proposers[*txnId]; found {
			server.Log(txnId, "2B outcome received from", sender, "(known active)")
			proposer.BallotOutcomeReceived(sender, &outcome)
			return
		}

		txnCap := outcome.Txn()

		alloc := AllocForRMId(&txnCap, pm.RMId)

		if alloc.Active() != 0 {
			// We have no record of this, but we were active - we must
			// have died and recovered (or we may have never received
			// this yet - see above - if we were down, other proposers
			// may have started abort proposers). Thus this could be
			// abort (abort proposers out there) or commit (we previously
			// voted, and that vote got recorded, but we have since died
			// and restarted).
			server.Log(txnId, "2B outcome received from", sender, "(unknown active)")

			// There's a possibility the acceptor that sent us this 2B is
			// one of only a few acceptors that got enough 2As to
			// determine the outcome. We must set up new paxos instances
			// to ensure the result is propogated to all. All we need to
			// do is to start a proposal for our own vars. The proposal
			// itself will detect any further absences and take care of
			// them.
			acceptors := GetAcceptorsFromTxn(&txnCap)
			server.Log(txnId, "Starting abort proposals with acceptors", acceptors)
			fInc := int(txnCap.FInc())
			ballots := MakeAbortBallots(&txnCap, alloc)
			pm.NewPaxosProposals(txnId, &txnCap, fInc, ballots, acceptors, pm.RMId, false)

			proposer := NewProposer(pm, txnId, &txnCap, ProposerActiveLearner)
			pm.proposers[*txnId] = proposer
			proposer.Start()
			proposer.BallotOutcomeReceived(sender, &outcome)
		} else {
			// Not active, so we are a learner
			if outcome.Which() == msgs.OUTCOME_COMMIT {
				server.Log(txnId, "2B outcome received from", sender, "(unknown learner)")
				// we must be a learner.
				proposer := NewProposer(pm, txnId, &txnCap, ProposerPassiveLearner)
				pm.proposers[*txnId] = proposer
				proposer.Start()
				proposer.BallotOutcomeReceived(sender, &outcome)

			} else {
				// Whilst it's an abort now, at some point in the past it
				// was a commit and as such we received that
				// outcome. However, we must have since died and so lost
				// that state/proposer. We should now immediately reply
				// with a TLC.
				server.Log(txnId, "Sending immediate TLC for unknown abort learner")
				NewOneShotSender(MakeTxnLocallyCompleteMsg(txnId), pm.ConnectionManager, sender)
			}
		}

	default:
		panic(fmt.Sprintf("Unexpected 2BVotes type: %v", twoBTxnVotes.Which()))
	}
}