Esempio n. 1
0
func createRunningPbftWithManager(id uint64, config *viper.Viper, stack innerStack) (*pbftCore, events.Manager) {
	manager := events.NewManagerImpl()
	core := newPbftCore(id, loadConfig(), stack, events.NewTimerFactoryImpl(manager))
	manager.SetReceiver(core)
	manager.Start()
	return core, manager
}
Esempio n. 2
0
func makePBFTNetwork(N int, config *viper.Viper) *pbftNetwork {
	if config == nil {
		config = loadConfig()
	}

	config.Set("general.N", N)
	config.Set("general.f", (N-1)/3)
	endpointFunc := func(id uint64, net *testnet) endpoint {
		tep := makeTestEndpoint(id, net)
		pe := &pbftEndpoint{
			testEndpoint: tep,
			manager:      events.NewManagerImpl(),
		}

		pe.sc = &simpleConsumer{
			pe: pe,
		}

		pe.pbft = newPbftCore(id, config, pe.sc, events.NewTimerFactoryImpl(pe.manager))
		pe.manager.SetReceiver(pe.pbft)

		pe.manager.Start()

		return pe

	}

	pn := &pbftNetwork{testnet: makeTestnet(N, endpointFunc)}
	pn.pbftEndpoints = make([]*pbftEndpoint, len(pn.endpoints))
	for i, ep := range pn.endpoints {
		pn.pbftEndpoints[i] = ep.(*pbftEndpoint)
		pn.pbftEndpoints[i].sc.pbftNet = pn
	}
	return pn
}
Esempio n. 3
0
func (shim *legacyGenericShim) init(id uint64, config *viper.Viper, consumer legacyInnerStack) {
	shim.pbft = &legacyPbftShim{
		closed:   make(chan struct{}),
		manager:  events.NewManagerImpl(),
		consumer: consumer,
	}
	shim.pbft.pbftCore = newPbftCore(id, config, consumer, events.NewTimerFactoryImpl(shim.pbft.manager))
	shim.pbft.manager.SetReceiver(shim.pbft)
	logger.Debug("Replica %d Consumer is %p", shim.pbft.id, shim.pbft.consumer)
	shim.pbft.manager.Start()
	logger.Debug("Replica %d legacyGenericShim now initialized: %v", id, shim)
}
Esempio n. 4
0
func newObcBatch(id uint64, config *viper.Viper, stack consensus.Stack) *obcBatch {
	var err error

	op := &obcBatch{
		obcGeneric: obcGeneric{stack: stack},
	}

	op.persistForward.persistor = stack

	logger.Debugf("Replica %d obtaining startup information", id)

	op.manager = events.NewManagerImpl() // TODO, this is hacky, eventually rip it out
	op.manager.SetReceiver(op)
	etf := events.NewTimerFactoryImpl(op.manager)
	op.pbft = newPbftCore(id, config, op, etf)
	op.manager.Start()
	op.externalEventReceiver.manager = op.manager
	op.broadcaster = newBroadcaster(id, op.pbft.N, op.pbft.f, stack)

	op.batchSize = config.GetInt("general.batchsize")
	op.batchStore = nil
	op.batchTimeout, err = time.ParseDuration(config.GetString("general.timeout.batch"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse batch timeout: %s", err))
	}
	logger.Infof("PBFT Batch size = %d", op.batchSize)
	logger.Infof("PBFT Batch timeout = %v", op.batchTimeout)

	if op.batchTimeout >= op.pbft.requestTimeout {
		op.pbft.requestTimeout = 3 * op.batchTimeout / 2
		logger.Warningf("Configured request timeout must be greater than batch timeout, setting to %v", op.pbft.requestTimeout)
	}

	if op.pbft.requestTimeout >= op.pbft.nullRequestTimeout && op.pbft.nullRequestTimeout != 0 {
		op.pbft.nullRequestTimeout = 3 * op.pbft.requestTimeout / 2
		logger.Warningf("Configured null request timeout must be greater than request timeout, setting to %v", op.pbft.nullRequestTimeout)
	}

	op.incomingChan = make(chan *batchMessage)

	op.batchTimer = etf.CreateTimer()

	op.reqStore = newRequestStore()

	op.deduplicator = newDeduplicator()

	op.idleChan = make(chan struct{})
	close(op.idleChan) // TODO remove eventually

	return op
}
Esempio n. 5
0
// TestReplicaCrash1 simulates the restart of replicas 0 and 1 after
// some state has been built (one request executed).  At the time of
// the restart, replica 0 is also the primary.  All three requests
// submitted should also be executed on all replicas.
func TestReplicaCrash1(t *testing.T) {
	validatorCount := 4
	config := loadConfig()
	config.Set("general.K", 2)
	config.Set("general.logmultiplier", 2)
	net := makePBFTNetwork(validatorCount, config)
	defer net.stop()

	mkreq := func(n int64) *Request {
		txTime := &gp.Timestamp{Seconds: n, Nanos: 0}
		tx := &pb.Transaction{Type: pb.Transaction_CHAINCODE_DEPLOY, Timestamp: txTime}
		txPacked, _ := proto.Marshal(tx)

		return &Request{
			Timestamp: &gp.Timestamp{Seconds: n, Nanos: 0},
			Payload:   txPacked,
			ReplicaId: uint64(generateBroadcaster(validatorCount)),
		}
	}

	net.pbftEndpoints[0].manager.Queue() <- mkreq(1)
	net.process()

	for id := 0; id < 2; id++ {
		pe := net.pbftEndpoints[id]
		pe.pbft = newPbftCore(uint64(id), loadConfig(), pe.sc, events.NewTimerFactoryImpl(pe.manager))
		pe.manager.SetReceiver(pe.pbft)
		pe.pbft.N = 4
		pe.pbft.f = (4 - 1) / 3
		pe.pbft.K = 2
		pe.pbft.L = 2 * pe.pbft.K
	}

	net.pbftEndpoints[0].manager.Queue() <- mkreq(2)
	net.pbftEndpoints[0].manager.Queue() <- (mkreq(3))
	net.process()

	for _, pep := range net.pbftEndpoints {
		if pep.sc.executions != 3 {
			t.Errorf("Expected 3 executions on replica %d, got %d", pep.id, pep.sc.executions)
			continue
		}

		if pep.pbft.view != 0 {
			t.Errorf("Replica %d should still be in view 0, is %v %d", pep.id, pep.pbft.activeView, pep.pbft.view)
		}
	}
}
Esempio n. 6
0
// TestReplicaCrash3 simulates the restart requiring a view change
// to a checkpoint which was restored from the persistence state
// Replicas 0,1,2 participate up to a checkpoint, then all crash
// Then replicas 0,1,3 start back up, and a view change must be
// triggered to get vp3 up to speed
func TestReplicaCrash3(t *testing.T) {
	validatorCount := 4
	config := loadConfig()
	config.Set("general.K", 2)
	config.Set("general.logmultiplier", 2)
	net := makePBFTNetwork(validatorCount, config)
	defer net.stop()

	twoOffline := false
	threeOffline := true
	net.filterFn = func(src int, dst int, msg []byte) []byte {
		if twoOffline && dst == 2 { // 2 is 'offline'
			return nil
		}
		if threeOffline && dst == 3 { // 3 is 'offline'
			return nil
		}
		return msg
	}

	mkreq := func(n int64) *Request {
		txTime := &gp.Timestamp{Seconds: n, Nanos: 0}
		tx := &pb.Transaction{Type: pb.Transaction_CHAINCODE_DEPLOY, Timestamp: txTime}
		txPacked, _ := proto.Marshal(tx)

		return &Request{
			Timestamp: &gp.Timestamp{Seconds: n, Nanos: 0},
			Payload:   txPacked,
			ReplicaId: uint64(generateBroadcaster(validatorCount)),
		}
	}

	for i := int64(1); i <= 8; i++ {
		net.pbftEndpoints[0].manager.Queue() <- (mkreq(i))
	}
	net.process() // vp0,1,2 should have a stable checkpoint for seqNo 8

	// Create new pbft instances to restore from persistence
	for id := 0; id < 2; id++ {
		pe := net.pbftEndpoints[id]
		config := loadConfig()
		config.Set("general.K", "2")
		pe.pbft.close()
		pe.pbft = newPbftCore(uint64(id), config, pe.sc, events.NewTimerFactoryImpl(pe.manager))
		pe.manager.SetReceiver(pe.pbft)
		pe.pbft.N = 4
		pe.pbft.f = (4 - 1) / 3
		pe.pbft.requestTimeout = 200 * time.Millisecond
	}

	threeOffline = false
	twoOffline = true

	// Because vp2 is 'offline', and vp3 is still at the genesis block, the network needs to make a view change

	net.pbftEndpoints[0].manager.Queue() <- (mkreq(9))
	net.process()

	// Now vp0,1,3 should be in sync with 9 executions in view 1, and vp2 should be at 8 executions in view 0
	for i, pep := range net.pbftEndpoints {

		if i == 2 {
			// 2 is 'offline'
			if pep.pbft.view != 0 {
				t.Errorf("Expected replica %d to be in view 0, got %d", pep.id, pep.pbft.view)
			}
			expectedExecutions := uint64(8)
			if pep.sc.executions != expectedExecutions {
				t.Errorf("Expected %d executions on replica %d, got %d", expectedExecutions, pep.id, pep.sc.executions)
			}
			continue
		}

		if pep.pbft.view != 1 {
			t.Errorf("Expected replica %d to be in view 1, got %d", pep.id, pep.pbft.view)
		}

		expectedExecutions := uint64(9)
		if pep.sc.executions != expectedExecutions {
			t.Errorf("Expected %d executions on replica %d, got %d", expectedExecutions, pep.id, pep.sc.executions)
		}
	}
}