Ejemplo n.º 1
0
func newObcBatch(id uint64, config *viper.Viper, stack consensus.Stack) *obcBatch {
	var err error

	op := &obcBatch{
		obcGeneric: obcGeneric{stack},
		stack:      stack,
	}

	op.persistForward.persistor = stack

	logger.Debug("Replica %d obtaining startup information", id)

	op.pbft = newPbftCore(id, config, op)

	op.batchSize = config.GetInt("general.batchSize")
	op.batchStore = nil
	op.batchTimeout, err = time.ParseDuration(config.GetString("general.timeout.batch"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse batch timeout: %s", err))
	}

	op.incomingChan = make(chan *batchMessage)

	// create non-running timer
	op.batchTimer = time.NewTimer(100 * time.Hour) // XXX ugly
	op.batchTimer.Stop()

	op.idleChan = make(chan struct{})

	go op.main()
	return op
}
Ejemplo n.º 2
0
func newObcBatch(id uint64, config *viper.Viper, stack consensus.Stack) *obcBatch {
	var err error

	op := &obcBatch{
		obcGeneric: obcGeneric{stack: stack},
	}

	op.persistForward.persistor = stack

	logger.Debugf("Replica %d obtaining startup information", id)

	op.manager = events.NewManagerImpl() // TODO, this is hacky, eventually rip it out
	op.manager.SetReceiver(op)
	etf := events.NewTimerFactoryImpl(op.manager)
	op.pbft = newPbftCore(id, config, op, etf)
	op.manager.Start()
	blockchainInfoBlob := stack.GetBlockchainInfoBlob()
	op.externalEventReceiver.manager = op.manager
	op.broadcaster = newBroadcaster(id, op.pbft.N, op.pbft.f, op.pbft.broadcastTimeout, stack)
	op.manager.Queue() <- workEvent(func() {
		op.pbft.stateTransfer(&stateUpdateTarget{
			checkpointMessage: checkpointMessage{
				seqNo: op.pbft.lastExec,
				id:    blockchainInfoBlob,
			},
		})
	})

	op.batchSize = config.GetInt("general.batchsize")
	op.batchStore = nil
	op.batchTimeout, err = time.ParseDuration(config.GetString("general.timeout.batch"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse batch timeout: %s", err))
	}
	logger.Infof("PBFT Batch size = %d", op.batchSize)
	logger.Infof("PBFT Batch timeout = %v", op.batchTimeout)

	if op.batchTimeout >= op.pbft.requestTimeout {
		op.pbft.requestTimeout = 3 * op.batchTimeout / 2
		logger.Warningf("Configured request timeout must be greater than batch timeout, setting to %v", op.pbft.requestTimeout)
	}

	if op.pbft.requestTimeout >= op.pbft.nullRequestTimeout && op.pbft.nullRequestTimeout != 0 {
		op.pbft.nullRequestTimeout = 3 * op.pbft.requestTimeout / 2
		logger.Warningf("Configured null request timeout must be greater than request timeout, setting to %v", op.pbft.nullRequestTimeout)
	}

	op.incomingChan = make(chan *batchMessage)

	op.batchTimer = etf.CreateTimer()

	op.reqStore = newRequestStore()

	op.deduplicator = newDeduplicator()

	op.idleChan = make(chan struct{})
	close(op.idleChan) // TODO remove eventually

	return op
}
Ejemplo n.º 3
0
func newObcBatch(id uint64, config *viper.Viper, stack consensus.Stack) *obcBatch {
	var err error

	op := &obcBatch{stack: stack}
	op.startup = make(chan []byte)

	op.executor = NewOBCExecutor(config, op, stack)

	logger.Debug("Replica %d obtaining startup information", id)
	startupInfo := <-op.startup
	close(op.startup)

	op.pbft = newPbftCore(id, config, op, startupInfo)

	queueSize := config.GetInt("executor.queuesize")
	if queueSize <= int(op.pbft.L) {
		logger.Error("Replica %d has executor queue size %d less than PBFT log size %d, this indicates a misconfiguration", id, queueSize, op.pbft.L)
	}

	op.batchSize = config.GetInt("general.batchSize")
	op.batchStore = nil
	op.batchTimeout, err = time.ParseDuration(config.GetString("general.timeout.batch"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse batch timeout: %s", err))
	}

	// create non-running timer
	op.batchTimer = time.NewTimer(100 * time.Hour) // XXX ugly
	op.batchTimer.Stop()
	go op.batchTimerHander()
	return op
}
Ejemplo n.º 4
0
func NewOBCExecutor(config *viper.Viper, orderer Orderer, stack statetransfer.PartialStack) (obcex *obcExecutor) {
	var err error
	obcex = &obcExecutor{}
	queueSize := config.GetInt("executor.queuesize")
	if queueSize <= 0 {
		panic("Queue size must be positive")
	}

	obcex.executionQueue = make(chan *transaction, queueSize)
	obcex.syncTargets = make(chan *syncTarget)
	obcex.completeSync = make(chan *syncTarget)
	obcex.threadIdle = make(chan struct{})
	obcex.threadExit = make(chan struct{})
	obcex.orderer = orderer
	obcex.executorStack = stack
	obcex.id, _, err = stack.GetNetworkHandles()
	if nil != err {
		logger.Error("Could not resolve our own PeerID, assigning dummy")
		obcex.id = &pb.PeerID{"Dummy"}
	}

	logger.Info("Executor for %v using queue size of %d", obcex.id, queueSize)

	obcex.sts = statetransfer.NewStateTransferState(config, stack)

	listener := struct{ statetransfer.ProtoListener }{}
	listener.CompletedImpl = obcex.stateTransferCompleted
	obcex.sts.RegisterListener(&listener)

	go obcex.queueThread()
	return
}
Ejemplo n.º 5
0
func loadOAuth2Config(v *viper.Viper) {
	Config.OAuth2Enabled = v.GetBool("stormpath.web.oauth2.enabled")
	Config.OAuth2URI = v.GetString("stormpath.web.oauth2.uri")
	Config.OAuth2ClientCredentialsGrantTypeEnabled = v.GetBool("stormpath.web.oauth2.client_credentials.enabled")
	Config.OAuth2ClientCredentialsGrantTypeAccessTokenTTL = time.Duration(v.GetInt("stormpath.web.oauth2.client_credentials.accessToken.ttl")) * time.Second
	Config.OAuth2PasswordGrantTypeEnabled = v.GetBool("stormpath.web.oauth2.password.enabled")
	Config.OAuth2PasswordGrantTypeValidationStrategy = v.GetString("stormpath.web.oauth2.password.validationStrategy")
}
Ejemplo n.º 6
0
func newInstance(v *viper.Viper) *instance {
	var i = instance{
		viper: v,
		slots: v.GetInt("start.parallel"),
	}
	i.slotAvailable = sync.NewCond(&i.m)
	i.taskFinished = sync.NewCond(&i.m)
	return &i
}
Ejemplo n.º 7
0
func ThreadlessNewStateTransferState(config *viper.Viper, stack PartialStack) *StateTransferState {
	var err error
	sts := &StateTransferState{}

	sts.stateTransferListenersLock = &sync.Mutex{}

	sts.stack = stack
	sts.id, _, err = stack.GetNetworkHandles()

	if nil != err {
		logger.Debug("Error resolving our own PeerID, this shouldn't happen")
		sts.id = &protos.PeerID{"ERROR_RESOLVING_ID"}
	}

	sts.asynchronousTransferInProgress = false

	sts.RecoverDamage = config.GetBool("statetransfer.recoverdamage")

	sts.stateValid = true // Assume our starting state is correct unless told otherwise

	sts.validBlockRanges = make([]*blockRange, 0)
	sts.blockVerifyChunkSize = uint64(config.GetInt("statetransfer.blocksperrequest"))
	if sts.blockVerifyChunkSize == 0 {
		panic(fmt.Errorf("Must set statetransfer.blocksperrequest to be nonzero"))
	}

	sts.initiateStateSync = make(chan *syncMark)
	sts.blockHashReceiver = make(chan *blockHashReply, 1)
	sts.blockSyncReq = make(chan *blockSyncReq)

	sts.threadExit = make(chan struct{})

	sts.blockThreadIdle = true
	sts.stateThreadIdle = true

	sts.blockThreadIdleChan = make(chan struct{})
	sts.stateThreadIdleChan = make(chan struct{})

	sts.DiscoveryThrottleTime = 1 * time.Second // TODO make this configurable

	sts.BlockRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.singleblock"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse statetransfer.timeout.singleblock timeout: %s", err))
	}
	sts.StateDeltaRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.singlestatedelta"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse statetransfer.timeout.singlestatedelta timeout: %s", err))
	}
	sts.StateSnapshotRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.fullstate"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse statetransfer.timeout.fullstate timeout: %s", err))
	}

	return sts
}
Ejemplo n.º 8
0
func newPbftCore(id uint64, config *viper.Viper, consumer innerCPI) *pbftCore {
	instance := &pbftCore{}
	instance.id = id
	instance.consumer = consumer

	// in dev/debugging mode you are expected to override the config values
	// with the environment variable OPENCHAIN_OBCPBFT_X_Y

	// read from the config file
	// you can override the config values with the environment variable prefix
	// OPENCHAIN_OBCPBFT, e.g. OPENCHAIN_OBCPBFT_BYZANTINE
	var err error
	instance.byzantine = config.GetBool("replica.byzantine")
	instance.f = config.GetInt("general.f")
	instance.K = uint64(config.GetInt("general.K"))
	instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse request timeout: %s", err))
	}
	instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse new view timeout: %s", err))
	}

	instance.activeView = true
	instance.L = 2 * instance.K // log size
	instance.replicaCount = 3*instance.f + 1

	// init the logs
	instance.certStore = make(map[msgID]*msgCert)
	instance.reqStore = make(map[string]*Request)
	instance.checkpointStore = make(map[Checkpoint]bool)
	instance.chkpts = make(map[uint64]string)
	instance.viewChangeStore = make(map[vcidx]*ViewChange)
	instance.pset = make(map[uint64]*ViewChange_PQ)
	instance.qset = make(map[qidx]*ViewChange_PQ)
	instance.newViewStore = make(map[uint64]*NewView)

	// load genesis checkpoint
	stateHash, err := instance.consumer.getStateHash(0)
	if err != nil {
		panic(fmt.Errorf("Cannot load genesis block: %s", err))
	}
	instance.chkpts[0] = base64.StdEncoding.EncodeToString(stateHash)

	// create non-running timer XXX ugly
	instance.newViewTimer = time.NewTimer(100 * time.Hour)
	instance.newViewTimer.Stop()
	instance.lastNewViewTimeout = instance.newViewTimeout
	instance.outstandingReqs = make(map[string]*Request)

	go instance.timerHander()

	return instance
}
Ejemplo n.º 9
0
func newObcBatch(id uint64, config *viper.Viper, stack consensus.Stack) *obcBatch {
	var err error
	op := &obcBatch{stack: stack}
	op.pbft = newPbftCore(id, config, op, stack)
	op.batchSize = config.GetInt("general.batchSize")
	op.batchStore = nil
	op.batchTimeout, err = time.ParseDuration(config.GetString("general.timeout.batch"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse batch timeout: %s", err))
	}
	// create non-running timer
	op.batchTimer = time.NewTimer(100 * time.Hour) // XXX ugly
	op.batchTimer.Stop()
	go op.batchTimerHander()
	return op
}
Ejemplo n.º 10
0
func newObcBatch(id uint64, config *viper.Viper, cpi consensus.CPI) *obcBatch {
	var err error
	op := &obcBatch{cpi: cpi}
	op.pbft = newPbftCore(id, config, op, cpi)
	op.batchSize = config.GetInt("general.batchSize")
	op.batchStore = make(map[string]*Request)
	op.batchTimeout, err = time.ParseDuration(config.GetString("general.timeout.batch"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse batch timeout: %s", err))
	}
	// create non-running timer XXX ugly
	op.batchTimer = time.NewTimer(100 * time.Hour)
	op.batchTimer.Stop()
	go op.batchTimerHander()
	return op
}
Ejemplo n.º 11
0
func ThreadlessNewStateTransferState(id *protos.PeerID, config *viper.Viper, ledger consensus.LedgerStack, defaultPeerIDs []*protos.PeerID) *StateTransferState {
	sts := &StateTransferState{}

	sts.stateTransferListenersLock = &sync.Mutex{}

	sts.ledger = ledger
	sts.id = id

	sts.asynchronousTransferInProgress = false

	logger.Debug("%v assigning %v to defaultPeerIDs", id, defaultPeerIDs)
	sts.defaultPeerIDs = defaultPeerIDs

	sts.RecoverDamage = config.GetBool("statetransfer.recoverdamage")

	sts.stateValid = true // Assume our starting state is correct unless told otherwise

	sts.validBlockRanges = make([]*blockRange, 0)
	sts.blockVerifyChunkSize = uint64(config.GetInt("statetransfer.blocksperrequest"))
	if sts.blockVerifyChunkSize == 0 {
		panic(fmt.Errorf("Must set statetransfer.blocksperrequest to be nonzero"))
	}

	sts.initiateStateSync = make(chan *syncMark, 1)
	sts.blockHashReceiver = make(chan *blockHashReply, 1)
	sts.blockSyncReq = make(chan *blockSyncReq)

	sts.blockThreadExit = make(chan struct{}, 1)
	sts.stateThreadExit = make(chan struct{}, 1)

	var err error

	sts.BlockRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.singleblock"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse statetransfer.timeout.singleblock timeout: %s", err))
	}
	sts.StateDeltaRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.singlestatedelta"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse statetransfer.timeout.singlestatedelta timeout: %s", err))
	}
	sts.StateSnapshotRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.fullstate"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse statetransfer.timeout.fullstate timeout: %s", err))
	}

	return sts
}
Ejemplo n.º 12
0
func newObcClassic(id uint64, config *viper.Viper, stack consensus.Stack) *obcClassic {
	op := &obcClassic{stack: stack}

	op.startup = make(chan []byte)

	op.executor = NewOBCExecutor(config, op, stack)

	logger.Debug("Replica %d obtaining startup information", id)
	startupInfo := <-op.startup
	close(op.startup)

	op.pbft = newPbftCore(id, config, op, startupInfo)

	queueSize := config.GetInt("executor.queuesize")
	if queueSize <= int(op.pbft.L) {
		logger.Error("Replica %d has executor queue size %d less than PBFT log size %d, this indicates a misconfiguration", id, queueSize, op.pbft.L)
	}

	return op
}
Ejemplo n.º 13
0
func newObcBatch(id uint64, config *viper.Viper, stack consensus.Stack) *obcBatch {
	var err error

	op := &obcBatch{
		obcGeneric: obcGeneric{stack: stack},
	}

	op.persistForward.persistor = stack

	logger.Debugf("Replica %d obtaining startup information", id)

	op.manager = events.NewManagerImpl() // TODO, this is hacky, eventually rip it out
	op.manager.SetReceiver(op)
	etf := events.NewTimerFactoryImpl(op.manager)
	op.pbft = newPbftCore(id, config, op, etf)
	op.manager.Start()
	op.externalEventReceiver.manager = op.manager
	op.broadcaster = newBroadcaster(id, op.pbft.N, op.pbft.f, stack)

	op.batchSize = config.GetInt("general.batchsize")
	op.batchStore = nil
	op.batchTimeout, err = time.ParseDuration(config.GetString("general.timeout.batch"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse batch timeout: %s", err))
	}
	logger.Infof("PBFT Batch size = %d", op.batchSize)
	logger.Infof("PBFT Batch timeout = %v", op.batchTimeout)

	op.incomingChan = make(chan *batchMessage)

	op.batchTimer = etf.CreateTimer()

	op.outstandingReqs = make(map[*Request]struct{})

	op.idleChan = make(chan struct{})
	close(op.idleChan) // TODO remove eventually

	return op
}
Ejemplo n.º 14
0
func getGlobalProject(v *viper.Viper) (*libcentrifugo.Project, bool) {
	p := &libcentrifugo.Project{}

	// TODO: the same as for structureFromConfig function
	if v == nil {
		if !viper.IsSet("project_name") || viper.GetString("project_name") == "" {
			return nil, false
		}
		p.Name = libcentrifugo.ProjectKey(viper.GetString("project_name"))
		p.Secret = viper.GetString("project_secret")
		p.ConnLifetime = int64(viper.GetInt("project_connection_lifetime"))
		p.Anonymous = viper.GetBool("project_anonymous")
		p.Watch = viper.GetBool("project_watch")
		p.Publish = viper.GetBool("project_publish")
		p.JoinLeave = viper.GetBool("project_join_leave")
		p.Presence = viper.GetBool("project_presence")
		p.HistorySize = int64(viper.GetInt("project_history_size"))
		p.HistoryLifetime = int64(viper.GetInt("project_history_lifetime"))
	} else {
		if !v.IsSet("project_name") || v.GetString("project_name") == "" {
			return nil, false
		}
		p.Name = libcentrifugo.ProjectKey(v.GetString("project_name"))
		p.Secret = v.GetString("project_secret")
		p.ConnLifetime = int64(v.GetInt("project_connection_lifetime"))
		p.Anonymous = v.GetBool("project_anonymous")
		p.Watch = v.GetBool("project_watch")
		p.Publish = v.GetBool("project_publish")
		p.JoinLeave = v.GetBool("project_join_leave")
		p.Presence = v.GetBool("project_presence")
		p.HistorySize = int64(v.GetInt("project_history_size"))
		p.HistoryLifetime = int64(v.GetInt("project_history_lifetime"))
	}

	var nl []libcentrifugo.Namespace
	if v == nil {
		viper.MarshalKey("project_namespaces", &nl)
	} else {
		v.MarshalKey("project_namespaces", &nl)
	}
	p.Namespaces = nl

	return p, true
}
Ejemplo n.º 15
0
func newPbftCore(id uint64, config *viper.Viper, consumer innerStack) *pbftCore {
	var err error
	instance := &pbftCore{}
	instance.id = id
	instance.consumer = consumer
	instance.closed = make(chan struct{})
	instance.incomingChan = make(chan *pbftMessage)
	instance.stateUpdatedChan = make(chan *checkpointMessage)
	instance.stateUpdatingChan = make(chan *checkpointMessage)
	instance.execCompleteChan = make(chan struct{})
	instance.idleChan = make(chan struct{})
	instance.injectChan = make(chan func())

	// TODO Ultimately, the timer factory will be passed in, and the existence of the manager
	// will be hidden from pbftCore, but in the interest of a small PR, leaving it here for now
	instance.manager = newEventManagerImpl(instance)
	etf := newEventTimerFactoryImpl(instance.manager)
	instance.newViewTimer = etf.createTimer()

	instance.N = config.GetInt("general.N")
	instance.f = config.GetInt("general.f")
	if instance.f*3+1 > instance.N {
		panic(fmt.Sprintf("need at least %d enough replicas to tolerate %d byzantine faults, but only %d replicas configured", instance.f*3+1, instance.f, instance.N))
	}

	instance.K = uint64(config.GetInt("general.K"))

	instance.logMultiplier = uint64(config.GetInt("general.logmultiplier"))
	if instance.logMultiplier < 2 {
		panic("Log multiplier must be greater than or equal to 2")
	}
	instance.L = instance.logMultiplier * instance.K // log size

	instance.byzantine = config.GetBool("general.byzantine")

	instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse request timeout: %s", err))
	}
	instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse new view timeout: %s", err))
	}

	instance.activeView = true
	instance.replicaCount = instance.N

	logger.Info("PBFT type = %T", instance.consumer)
	logger.Info("PBFT Max number of validating peers (N) = %v", instance.N)
	logger.Info("PBFT Max number of failing peers (f) = %v", instance.f)
	logger.Info("PBFT byzantine flag = %v", instance.byzantine)
	logger.Info("PBFT request timeout = %v", instance.requestTimeout)
	logger.Info("PBFT view change timeout = %v", instance.newViewTimeout)
	logger.Info("PBFT Checkpoint period (K) = %v", instance.K)
	logger.Info("PBFT Log multiplier = %v", instance.logMultiplier)
	logger.Info("PBFT log size (L) = %v", instance.L)

	// init the logs
	instance.certStore = make(map[msgID]*msgCert)
	instance.reqStore = make(map[string]*Request)
	instance.checkpointStore = make(map[Checkpoint]bool)
	instance.chkpts = make(map[uint64]string)
	instance.viewChangeStore = make(map[vcidx]*ViewChange)
	instance.pset = make(map[uint64]*ViewChange_PQ)
	instance.qset = make(map[qidx]*ViewChange_PQ)
	instance.newViewStore = make(map[uint64]*NewView)

	// initialize state transfer
	instance.hChkpts = make(map[uint64]uint64)

	instance.chkpts[0] = "XXX GENESIS"

	instance.lastNewViewTimeout = instance.newViewTimeout
	instance.outstandingReqs = make(map[string]*Request)
	instance.missingReqs = make(map[string]bool)

	instance.restoreState()

	return instance
}
Ejemplo n.º 16
0
func newPbftCore(id uint64, config *viper.Viper, consumer innerStack, ledger consensus.LedgerStack) *pbftCore {
	var err error
	instance := &pbftCore{}
	instance.id = id
	instance.consumer = consumer
	instance.ledger = ledger
	instance.closed = make(chan bool)
	instance.notifyCommit = make(chan bool, 1)
	instance.notifyExec = sync.NewCond(&instance.internalLock)

	instance.N = config.GetInt("general.N")
	instance.f = config.GetInt("general.f")
	if instance.f*3+1 > instance.N {
		panic(fmt.Sprintf("need at least %d enough replicas to tolerate %d byzantine faults, but only %d replicas configured", instance.f*3+1, instance.f, instance.N))
	}

	instance.K = uint64(config.GetInt("general.K"))

	instance.byzantine = config.GetBool("general.byzantine")

	instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse request timeout: %s", err))
	}
	instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse new view timeout: %s", err))
	}

	instance.activeView = true
	instance.L = 2 * instance.K // log size
	instance.replicaCount = instance.N

	// init the logs
	instance.certStore = make(map[msgID]*msgCert)
	instance.reqStore = make(map[string]*Request)
	instance.checkpointStore = make(map[Checkpoint]bool)
	instance.chkpts = make(map[uint64]*blockState)
	instance.viewChangeStore = make(map[vcidx]*ViewChange)
	instance.pset = make(map[uint64]*ViewChange_PQ)
	instance.qset = make(map[qidx]*ViewChange_PQ)
	instance.newViewStore = make(map[uint64]*NewView)

	// initialize state transfer
	instance.hChkpts = make(map[uint64]uint64)

	defaultPeerIDs := make([]*protos.PeerID, instance.replicaCount-1)
	if instance.replicaCount > 1 {
		// For some tests, only 1 replica will be present, and defaultPeerIDs makes no sense
		for i := uint64(0); i < uint64(instance.replicaCount); i++ {
			handle, err := getValidatorHandle(i)
			if err != nil {
				panic(fmt.Errorf("Cannot retrieve handle for peer which must exist : %s", err))
			}
			if i < instance.id {
				logger.Debug("Replica %d assigning %v to index %d for replicaCount %d and id %d", instance.id, handle, i, instance.replicaCount, instance.id)
				defaultPeerIDs[i] = handle
			} else if i > instance.id {
				logger.Debug("Replica %d assigning %v to index %d for replicaCount %d and id %d", instance.id, handle, i-1, instance.replicaCount, instance.id)
				defaultPeerIDs[i-1] = handle
			} else {
				// This is our ID, do not add it to the list of default peers
			}
		}
	} else {
		logger.Debug("Replica %d not initializing defaultPeerIDs, as replicaCount is %d", instance.id, instance.replicaCount)
	}

	if myHandle, err := getValidatorHandle(instance.id); err != nil {
		panic("Could not retrieve own handle")
	} else {
		instance.sts = statetransfer.NewStateTransferState(myHandle, config, ledger, defaultPeerIDs)
	}

	listener := struct{ statetransfer.ProtoListener }{}
	listener.CompletedImpl = instance.stateTransferCompleted
	instance.sts.RegisterListener(&listener)

	// load genesis checkpoint
	genesisBlock, err := instance.ledger.GetBlock(0)
	if err != nil {
		panic(fmt.Errorf("Cannot load genesis block: %s", err))
	}
	genesisHash, err := ledger.HashBlock(genesisBlock)
	if err != nil {
		panic(fmt.Errorf("Cannot hash genesis block: %s", err))
	}
	instance.chkpts[0] = &blockState{
		blockNumber: 0,
		blockHash:   base64.StdEncoding.EncodeToString(genesisHash),
	}

	// create non-running timer XXX ugly
	instance.newViewTimer = time.NewTimer(100 * time.Hour)
	instance.newViewTimer.Stop()
	instance.timerResetCount = 1
	instance.lastNewViewTimeout = instance.newViewTimeout
	instance.outstandingReqs = make(map[string]*Request)
	instance.missingReqs = make(map[string]bool)

	go instance.timerHander()
	go instance.executeRoutine()

	return instance
}
Ejemplo n.º 17
0
func newPbftCore(id uint64, config *viper.Viper, consumer innerStack, etf events.TimerFactory) *pbftCore {
	var err error
	instance := &pbftCore{}
	instance.id = id
	instance.consumer = consumer

	instance.newViewTimer = etf.CreateTimer()
	instance.vcResendTimer = etf.CreateTimer()
	instance.nullRequestTimer = etf.CreateTimer()

	instance.N = config.GetInt("general.N")
	instance.f = config.GetInt("general.f")
	if instance.f*3+1 > instance.N {
		panic(fmt.Sprintf("need at least %d enough replicas to tolerate %d byzantine faults, but only %d replicas configured", instance.f*3+1, instance.f, instance.N))
	}

	instance.K = uint64(config.GetInt("general.K"))

	instance.logMultiplier = uint64(config.GetInt("general.logmultiplier"))
	if instance.logMultiplier < 2 {
		panic("Log multiplier must be greater than or equal to 2")
	}
	instance.L = instance.logMultiplier * instance.K // log size
	instance.viewChangePeriod = uint64(config.GetInt("general.viewchangeperiod"))

	instance.byzantine = config.GetBool("general.byzantine")

	instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse request timeout: %s", err))
	}
	instance.vcResendTimeout, err = time.ParseDuration(config.GetString("general.timeout.resendviewchange"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse request timeout: %s", err))
	}
	instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse new view timeout: %s", err))
	}
	instance.nullRequestTimeout, err = time.ParseDuration(config.GetString("general.timeout.nullrequest"))
	if err != nil {
		instance.nullRequestTimeout = 0
	}
	instance.broadcastTimeout, err = time.ParseDuration(config.GetString("general.timeout.broadcast"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse new broadcast timeout: %s", err))
	}

	instance.activeView = true
	instance.replicaCount = instance.N

	logger.Infof("PBFT type = %T", instance.consumer)
	logger.Infof("PBFT Max number of validating peers (N) = %v", instance.N)
	logger.Infof("PBFT Max number of failing peers (f) = %v", instance.f)
	logger.Infof("PBFT byzantine flag = %v", instance.byzantine)
	logger.Infof("PBFT request timeout = %v", instance.requestTimeout)
	logger.Infof("PBFT view change timeout = %v", instance.newViewTimeout)
	logger.Infof("PBFT Checkpoint period (K) = %v", instance.K)
	logger.Infof("PBFT broadcast timeout = %v", instance.broadcastTimeout)
	logger.Infof("PBFT Log multiplier = %v", instance.logMultiplier)
	logger.Infof("PBFT log size (L) = %v", instance.L)
	if instance.nullRequestTimeout > 0 {
		logger.Infof("PBFT null requests timeout = %v", instance.nullRequestTimeout)
	} else {
		logger.Infof("PBFT null requests disabled")
	}
	if instance.viewChangePeriod > 0 {
		logger.Infof("PBFT view change period = %v", instance.viewChangePeriod)
	} else {
		logger.Infof("PBFT automatic view change disabled")
	}

	// init the logs
	instance.certStore = make(map[msgID]*msgCert)
	instance.reqBatchStore = make(map[string]*RequestBatch)
	instance.checkpointStore = make(map[Checkpoint]bool)
	instance.chkpts = make(map[uint64]string)
	instance.viewChangeStore = make(map[vcidx]*ViewChange)
	instance.pset = make(map[uint64]*ViewChange_PQ)
	instance.qset = make(map[qidx]*ViewChange_PQ)
	instance.newViewStore = make(map[uint64]*NewView)

	// initialize state transfer
	instance.hChkpts = make(map[uint64]uint64)

	instance.chkpts[0] = "XXX GENESIS"

	instance.lastNewViewTimeout = instance.newViewTimeout
	instance.outstandingReqBatches = make(map[string]*RequestBatch)
	instance.missingReqBatches = make(map[string]bool)

	instance.restoreState()

	instance.viewChangeSeqNo = ^uint64(0) // infinity
	instance.updateViewChangeSeqNo()

	return instance
}
Ejemplo n.º 18
0
func newPbftCore(id uint64, config *viper.Viper, consumer innerStack, startupID []byte) *pbftCore {
	var err error
	instance := &pbftCore{}
	instance.id = id
	instance.consumer = consumer
	instance.closed = make(chan bool)
	instance.notifyCommit = make(chan bool, 1)

	instance.N = config.GetInt("general.N")
	instance.f = config.GetInt("general.f")
	if instance.f*3+1 > instance.N {
		panic(fmt.Sprintf("need at least %d enough replicas to tolerate %d byzantine faults, but only %d replicas configured", instance.f*3+1, instance.f, instance.N))
	}

	instance.K = uint64(config.GetInt("general.K"))
	instance.logMultiplier = uint64(config.GetInt("general.logmultiplier"))

	instance.byzantine = config.GetBool("general.byzantine")

	instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse request timeout: %s", err))
	}
	instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange"))
	if err != nil {
		panic(fmt.Errorf("Cannot parse new view timeout: %s", err))
	}

	instance.activeView = true
	instance.L = instance.logMultiplier * instance.K // log size
	instance.replicaCount = instance.N

	logger.Info("PBFT type = %T", instance.consumer)
	logger.Info("PBFT Max number of validating peers (N) = %v", instance.N)
	logger.Info("PBFT Max number of failing peers (f) = %v", instance.f)
	logger.Info("PBFT byzantine flag = %v", instance.byzantine)
	logger.Info("PBFT request timeout = %v", instance.requestTimeout)
	logger.Info("PBFT view change timeout = %v", instance.newViewTimeout)
	logger.Info("PBFT Checkpoint period (K) = %v", instance.K)
	logger.Info("PBFT Log multiplier = %v", instance.logMultiplier)
	logger.Info("PBFT log size (L) = %v", instance.L)

	// init the logs
	instance.certStore = make(map[msgID]*msgCert)
	instance.reqStore = make(map[string]*Request)
	instance.checkpointStore = make(map[Checkpoint]bool)
	instance.chkpts = make(map[uint64]string)
	instance.viewChangeStore = make(map[vcidx]*ViewChange)
	instance.pset = make(map[uint64]*ViewChange_PQ)
	instance.qset = make(map[qidx]*ViewChange_PQ)
	instance.newViewStore = make(map[uint64]*NewView)

	// initialize state transfer
	instance.hChkpts = make(map[uint64]uint64)

	instance.chkpts[0] = base64.StdEncoding.EncodeToString(startupID)

	// create non-running timer XXX ugly
	instance.newViewTimer = time.NewTimer(100 * time.Hour)
	instance.newViewTimer.Stop()
	instance.timerResetCount = 1
	instance.lastNewViewTimeout = instance.newViewTimeout
	instance.outstandingReqs = make(map[string]*Request)
	instance.missingReqs = make(map[string]bool)

	go instance.timerHander()

	return instance
}
Ejemplo n.º 19
-1
func loadConfig(v *viper.Viper) *runnerConf {
	v.SetDefault("check_timeout", 10)
	v.SetDefault("host_window", 60)
	v.SetDefault("host_threshold", 5)
	v.SetDefault("flood_window", 120)
	v.SetDefault("flood_threshold", 100)
	v.SetDefault("flap_window", 1200)
	v.SetDefault("flap_threshold", 5)
	v.SetDefault("alert_threshold", 3)
	v.SetDefault("worker_id", "worker1")
	v.SetDefault("check_key", "canhazstatus")
	conf := &runnerConf{}
	conf.checkTimeout = v.GetDuration("check_timeout")
	conf.hostWindow = int64(v.GetInt("host_window"))
	conf.hostThreshold = v.GetInt("host_threshold")
	conf.floodWindow = int64(v.GetInt("flood_window"))
	conf.floodThreshold = v.GetInt("flood_threshold")
	conf.flapWindow = v.GetInt("flap_window")
	conf.flapThreshold = v.GetInt("flap_threshold")
	conf.alertThreshold = v.GetInt("alert_threshold")
	conf.workerQueue = v.GetString("worker_id")
	conf.checkKey = v.GetString("check_key")

	//twilio config
	v.SetDefault("twilio_enable", false)
	conf.twilioEnabled = v.GetBool("twilio_enable")
	conf.twsid = v.GetString("twiliosid")
	conf.twtoken = v.GetString("twiliotoken")
	conf.twfrom = v.GetString("twiliofrom")
	conf.twdest = v.GetStringSlice("twiliodest")

	//pagerduty config
	v.SetDefault("pagerduty_enable", false)
	conf.pagerDutyEnabled = v.GetBool("pagerduty_enable")
	conf.pagerDutyPriOneKey = v.GetString("pagerduty_priority_one_key")
	conf.pagerDutyPriTwoKey = v.GetString("pagerduty_priority_two_key")
	conf.pagerDutyIncidentKeyPrefix = v.GetString("pagerduty_incident_key_prefix")

	return conf
}