Esempio n. 1
0
func setupConsensusModuleR3_SOLO(
	t *testing.T,
	electionTimeoutLow time.Duration,
	logTerms []TermNo,
	imrsc *inMemoryRpcServiceConnector,
) (IConsensusModule, *log.InMemoryLog, *testhelpers.DummyStateMachine) {
	ps := rps.NewIMPSWithCurrentTerm(0)
	iml := log.TestUtil_NewInMemoryLog_WithTerms(logTerms)
	dsm := testhelpers.NewDummyStateMachine()
	ts := config.TimeSettings{testdata.TickerDuration, testdata.ElectionTimeoutLow}
	ci, err := config.NewClusterInfo([]ServerId{"_SOLO_"}, "_SOLO_")
	if err != nil {
		t.Fatal(err)
	}
	cm, err := NewConsensusModule(ps, iml, imrsc, ci, testdata.MaxEntriesPerAppendEntry, ts)
	if err != nil {
		t.Fatal(err)
	}
	if cm == nil {
		t.Fatal()
	}
	err = cm.Start(dsm)
	if err != nil {
		t.Fatal(err)
	}
	return cm, iml, dsm
}
Esempio n. 2
0
func setupConsensusModuleR2(
	t *testing.T,
	logTerms []TermNo,
) (*ConsensusModule, *testhelpers.MockRpcSender) {
	ps := rps.NewIMPSWithCurrentTerm(testdata.CurrentTerm)
	iml := log.TestUtil_NewInMemoryLog_WithTerms(logTerms)
	dsm := testhelpers.NewDummyStateMachine()
	mrs := testhelpers.NewMockRpcSender()
	ts := config.TimeSettings{testdata.TickerDuration, testdata.ElectionTimeoutLow}
	ci, err := config.NewClusterInfo(testdata.AllServerIds, testdata.ThisServerId)
	if err != nil {
		t.Fatal(err)
	}
	cm, err := NewConsensusModule(ps, iml, mrs, ci, testdata.MaxEntriesPerAppendEntry, ts)
	if err != nil {
		t.Fatal(err)
	}
	if cm == nil {
		t.Fatal()
	}
	err = cm.Start(dsm)
	if err != nil {
		t.Fatal(err)
	}
	return cm, mrs
}
Esempio n. 3
0
// Implementation of locking.LockApi
func NewLockApiImpl() (*raftlock.RaftLock, error) {

	// --  Prepare raft ConsensusModule parameters

	raftPersistentState := raft_rps.NewIMPSWithCurrentTerm(0)

	raftLog := raft_log.NewInMemoryLog()

	timeSettings := raft_config.TimeSettings{TickerDuration, ElectionTimeoutLow}

	clusterInfo, err := raft_config.NewClusterInfo([]raft.ServerId{"_SOLO_"}, "_SOLO_")
	if err != nil {
		return nil, err
	}

	// -- Create the raft ConsensusModule
	raftCm, err := raft_impl.NewConsensusModule(
		raftPersistentState,
		raftLog,
		nil, // should not actually need RpcService for single-node
		clusterInfo,
		MaxEntriesPerAppendEntry,
		timeSettings,
	)
	if err != nil {
		return nil, err
	}

	// -- Make the LockApi

	raftLock := raftlock.NewRaftLock(
		raftCm,
		raftLog,
		[]string{}, // no initial locks
		0,          // initialCommitIndex
	)

	raftCm.Start(raftLock)

	return raftLock, nil
}
Esempio n. 4
0
func setupManagedConsensusModuleR2(
	t *testing.T,
	logTerms []TermNo,
	solo bool,
) (*managedConsensusModule, *testhelpers.MockRpcSender) {
	ps := rps.NewIMPSWithCurrentTerm(testdata.CurrentTerm)
	iml := log.TestUtil_NewInMemoryLog_WithTerms(logTerms)
	dsm := testhelpers.NewDummyStateMachine()
	mrs := testhelpers.NewMockRpcSender()
	var allServerIds []ServerId
	if solo {
		allServerIds = []ServerId{testdata.ThisServerId}
	} else {
		allServerIds = testdata.AllServerIds
	}
	ci, err := config.NewClusterInfo(allServerIds, testdata.ThisServerId)
	if err != nil {
		t.Fatal(err)
	}
	now := time.Now()
	cm, err := NewPassiveConsensusModule(
		ps,
		iml,
		dsm,
		mrs,
		ci,
		testdata.MaxEntriesPerAppendEntry,
		testdata.ElectionTimeoutLow,
		now,
	)
	if err != nil {
		t.Fatal(err)
	}
	if cm == nil {
		t.Fatal()
	}
	// Bias simulated clock to avoid exact time matches
	now = now.Add(testdata.SleepToLetGoroutineRun)
	mcm := &managedConsensusModule{cm, now, iml, dsm}
	return mcm, mrs
}
Esempio n. 5
0
// Run the blackbox test on InMemoryRaftPersistentState
func TestInMemoryRaftPersistentState(t *testing.T) {
	imps := rps.NewIMPSWithCurrentTerm(0)
	testhelpers.BlackboxTest_RaftPersistentState(t, imps)
}