func getTrustPinning(config *viper.Viper) (trustpinning.TrustPinConfig, error) { var ok bool // Need to parse out Certs section from config certMap := config.GetStringMap("trust_pinning.certs") resultCertMap := make(map[string][]string) for gun, certSlice := range certMap { var castedCertSlice []interface{} if castedCertSlice, ok = certSlice.([]interface{}); !ok { return trustpinning.TrustPinConfig{}, fmt.Errorf("invalid format for trust_pinning.certs") } certsForGun := make([]string, len(castedCertSlice)) for idx, certIDInterface := range castedCertSlice { if certID, ok := certIDInterface.(string); ok { certsForGun[idx] = certID } else { return trustpinning.TrustPinConfig{}, fmt.Errorf("invalid format for trust_pinning.certs") } } resultCertMap[gun] = certsForGun } return trustpinning.TrustPinConfig{ DisableTOFU: config.GetBool("trust_pinning.disable_tofu"), CA: config.GetStringMapString("trust_pinning.ca"), Certs: resultCertMap, }, nil }
func loadBoolPtr(key string, v *viper.Viper) *bool { val := v.Get(key) if val == nil { return nil } b := v.GetBool(key) return &b }
func newPbftCore(id uint64, config *viper.Viper, consumer innerCPI) *pbftCore { instance := &pbftCore{} instance.id = id instance.consumer = consumer // in dev/debugging mode you are expected to override the config values // with the environment variable OPENCHAIN_OBCPBFT_X_Y // read from the config file // you can override the config values with the environment variable prefix // OPENCHAIN_OBCPBFT, e.g. OPENCHAIN_OBCPBFT_BYZANTINE var err error instance.byzantine = config.GetBool("replica.byzantine") instance.f = config.GetInt("general.f") instance.K = uint64(config.GetInt("general.K")) instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request")) if err != nil { panic(fmt.Errorf("Cannot parse request timeout: %s", err)) } instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange")) if err != nil { panic(fmt.Errorf("Cannot parse new view timeout: %s", err)) } instance.activeView = true instance.L = 2 * instance.K // log size instance.replicaCount = 3*instance.f + 1 // init the logs instance.certStore = make(map[msgID]*msgCert) instance.reqStore = make(map[string]*Request) instance.checkpointStore = make(map[Checkpoint]bool) instance.chkpts = make(map[uint64]string) instance.viewChangeStore = make(map[vcidx]*ViewChange) instance.pset = make(map[uint64]*ViewChange_PQ) instance.qset = make(map[qidx]*ViewChange_PQ) instance.newViewStore = make(map[uint64]*NewView) // load genesis checkpoint stateHash, err := instance.consumer.getStateHash(0) if err != nil { panic(fmt.Errorf("Cannot load genesis block: %s", err)) } instance.chkpts[0] = base64.StdEncoding.EncodeToString(stateHash) // create non-running timer XXX ugly instance.newViewTimer = time.NewTimer(100 * time.Hour) instance.newViewTimer.Stop() instance.lastNewViewTimeout = instance.newViewTimeout instance.outstandingReqs = make(map[string]*Request) go instance.timerHander() return instance }
func ThreadlessNewStateTransferState(config *viper.Viper, stack PartialStack) *StateTransferState { var err error sts := &StateTransferState{} sts.stateTransferListenersLock = &sync.Mutex{} sts.stack = stack sts.id, _, err = stack.GetNetworkHandles() if nil != err { logger.Debug("Error resolving our own PeerID, this shouldn't happen") sts.id = &protos.PeerID{"ERROR_RESOLVING_ID"} } sts.asynchronousTransferInProgress = false sts.RecoverDamage = config.GetBool("statetransfer.recoverdamage") sts.stateValid = true // Assume our starting state is correct unless told otherwise sts.validBlockRanges = make([]*blockRange, 0) sts.blockVerifyChunkSize = uint64(config.GetInt("statetransfer.blocksperrequest")) if sts.blockVerifyChunkSize == 0 { panic(fmt.Errorf("Must set statetransfer.blocksperrequest to be nonzero")) } sts.initiateStateSync = make(chan *syncMark) sts.blockHashReceiver = make(chan *blockHashReply, 1) sts.blockSyncReq = make(chan *blockSyncReq) sts.threadExit = make(chan struct{}) sts.blockThreadIdle = true sts.stateThreadIdle = true sts.blockThreadIdleChan = make(chan struct{}) sts.stateThreadIdleChan = make(chan struct{}) sts.DiscoveryThrottleTime = 1 * time.Second // TODO make this configurable sts.BlockRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.singleblock")) if err != nil { panic(fmt.Errorf("Cannot parse statetransfer.timeout.singleblock timeout: %s", err)) } sts.StateDeltaRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.singlestatedelta")) if err != nil { panic(fmt.Errorf("Cannot parse statetransfer.timeout.singlestatedelta timeout: %s", err)) } sts.StateSnapshotRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.fullstate")) if err != nil { panic(fmt.Errorf("Cannot parse statetransfer.timeout.fullstate timeout: %s", err)) } return sts }
func loadCookiesConfig(v *viper.Viper) { //AccessToken Config.AccessTokenCookieHTTPOnly = v.GetBool("stormpath.web.accessTokenCookie.httpOnly") Config.AccessTokenCookieName = v.GetString("stormpath.web.accessTokenCookie.name") Config.AccessTokenCookieSecure = loadBoolPtr("stormpath.web.accessTokenCookie.secure", v) Config.AccessTokenCookiePath = v.GetString("stormpath.web.accessTokenCookie.path") Config.AccessTokenCookieDomain = v.GetString("stormpath.web.accessTokenCookie.domain") //RefreshToken Config.RefreshTokenCookieHTTPOnly = v.GetBool("stormpath.web.refreshTokenCookie.httpOnly") Config.RefreshTokenCookieName = v.GetString("stormpath.web.refreshTokenCookie.name") Config.RefreshTokenCookieSecure = loadBoolPtr("stormpath.web.refreshTokenCookie.secure", v) Config.RefreshTokenCookiePath = v.GetString("stormpath.web.refreshTokenCookie.path") Config.RefreshTokenCookieDomain = v.GetString("stormpath.web.refreshTokenCookie.domain") }
func ThreadlessNewStateTransferState(id *protos.PeerID, config *viper.Viper, ledger consensus.LedgerStack, defaultPeerIDs []*protos.PeerID) *StateTransferState { sts := &StateTransferState{} sts.stateTransferListenersLock = &sync.Mutex{} sts.ledger = ledger sts.id = id sts.asynchronousTransferInProgress = false logger.Debug("%v assigning %v to defaultPeerIDs", id, defaultPeerIDs) sts.defaultPeerIDs = defaultPeerIDs sts.RecoverDamage = config.GetBool("statetransfer.recoverdamage") sts.stateValid = true // Assume our starting state is correct unless told otherwise sts.validBlockRanges = make([]*blockRange, 0) sts.blockVerifyChunkSize = uint64(config.GetInt("statetransfer.blocksperrequest")) if sts.blockVerifyChunkSize == 0 { panic(fmt.Errorf("Must set statetransfer.blocksperrequest to be nonzero")) } sts.initiateStateSync = make(chan *syncMark, 1) sts.blockHashReceiver = make(chan *blockHashReply, 1) sts.blockSyncReq = make(chan *blockSyncReq) sts.blockThreadExit = make(chan struct{}, 1) sts.stateThreadExit = make(chan struct{}, 1) var err error sts.BlockRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.singleblock")) if err != nil { panic(fmt.Errorf("Cannot parse statetransfer.timeout.singleblock timeout: %s", err)) } sts.StateDeltaRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.singlestatedelta")) if err != nil { panic(fmt.Errorf("Cannot parse statetransfer.timeout.singlestatedelta timeout: %s", err)) } sts.StateSnapshotRequestTimeout, err = time.ParseDuration(config.GetString("statetransfer.timeout.fullstate")) if err != nil { panic(fmt.Errorf("Cannot parse statetransfer.timeout.fullstate timeout: %s", err)) } return sts }
func getGlobalProject(v *viper.Viper) (*libcentrifugo.Project, bool) { p := &libcentrifugo.Project{} // TODO: the same as for structureFromConfig function if v == nil { if !viper.IsSet("project_name") || viper.GetString("project_name") == "" { return nil, false } p.Name = libcentrifugo.ProjectKey(viper.GetString("project_name")) p.Secret = viper.GetString("project_secret") p.ConnLifetime = int64(viper.GetInt("project_connection_lifetime")) p.Anonymous = viper.GetBool("project_anonymous") p.Watch = viper.GetBool("project_watch") p.Publish = viper.GetBool("project_publish") p.JoinLeave = viper.GetBool("project_join_leave") p.Presence = viper.GetBool("project_presence") p.HistorySize = int64(viper.GetInt("project_history_size")) p.HistoryLifetime = int64(viper.GetInt("project_history_lifetime")) } else { if !v.IsSet("project_name") || v.GetString("project_name") == "" { return nil, false } p.Name = libcentrifugo.ProjectKey(v.GetString("project_name")) p.Secret = v.GetString("project_secret") p.ConnLifetime = int64(v.GetInt("project_connection_lifetime")) p.Anonymous = v.GetBool("project_anonymous") p.Watch = v.GetBool("project_watch") p.Publish = v.GetBool("project_publish") p.JoinLeave = v.GetBool("project_join_leave") p.Presence = v.GetBool("project_presence") p.HistorySize = int64(v.GetInt("project_history_size")) p.HistoryLifetime = int64(v.GetInt("project_history_lifetime")) } var nl []libcentrifugo.Namespace if v == nil { viper.MarshalKey("project_namespaces", &nl) } else { v.MarshalKey("project_namespaces", &nl) } p.Namespaces = nl return p, true }
func buildForm(formName string, v *viper.Viper) form { form := form{} for _, fieldName := range getConfiguredFormFieldNames(formName, v) { field := field{ Name: fieldName, Label: v.GetString("stormpath.web." + formName + ".form.fields." + fieldName + ".label"), PlaceHolder: v.GetString("stormpath.web." + formName + ".form.fields." + fieldName + ".placeHolder"), Visible: v.GetBool("stormpath.web." + formName + ".form.fields." + fieldName + ".visible"), Enabled: v.GetBool("stormpath.web." + formName + ".form.fields." + fieldName + ".enabled"), Required: v.GetBool("stormpath.web." + formName + ".form.fields." + fieldName + ".required"), Type: v.GetString("stormpath.web." + formName + ".form.fields." + fieldName + ".type"), } if field.Enabled { form.Fields = append(form.Fields, field) } } return form }
// getTransport returns an http.RoundTripper to be used for all http requests. // It correctly handles the auth challenge/credentials required to interact // with a notary server over both HTTP Basic Auth and the JWT auth implemented // in the notary-server // The readOnly flag indicates if the operation should be performed as an // anonymous read only operation. If the command entered requires write // permissions on the server, readOnly must be false func getTransport(config *viper.Viper, gun string, readOnly bool) (http.RoundTripper, error) { // Attempt to get a root CA from the config file. Nil is the host defaults. rootCAFile := utils.GetPathRelativeToConfig(config, "remote_server.root_ca") clientCert := utils.GetPathRelativeToConfig(config, "remote_server.tls_client_cert") clientKey := utils.GetPathRelativeToConfig(config, "remote_server.tls_client_key") insecureSkipVerify := false if config.IsSet("remote_server.skipTLSVerify") { insecureSkipVerify = config.GetBool("remote_server.skipTLSVerify") } if clientCert == "" && clientKey != "" || clientCert != "" && clientKey == "" { return nil, fmt.Errorf("either pass both client key and cert, or neither") } tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ CAFile: rootCAFile, InsecureSkipVerify: insecureSkipVerify, CertFile: clientCert, KeyFile: clientKey, }) if err != nil { return nil, fmt.Errorf("unable to configure TLS: %s", err.Error()) } base := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }).Dial, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, DisableKeepAlives: true, } trustServerURL := getRemoteTrustServer(config) return tokenAuth(trustServerURL, base, gun, readOnly) }
func loadOAuth2Config(v *viper.Viper) { Config.OAuth2Enabled = v.GetBool("stormpath.web.oauth2.enabled") Config.OAuth2URI = v.GetString("stormpath.web.oauth2.uri") Config.OAuth2ClientCredentialsGrantTypeEnabled = v.GetBool("stormpath.web.oauth2.client_credentials.enabled") Config.OAuth2ClientCredentialsGrantTypeAccessTokenTTL = time.Duration(v.GetInt("stormpath.web.oauth2.client_credentials.accessToken.ttl")) * time.Second Config.OAuth2PasswordGrantTypeEnabled = v.GetBool("stormpath.web.oauth2.password.enabled") Config.OAuth2PasswordGrantTypeValidationStrategy = v.GetString("stormpath.web.oauth2.password.validationStrategy") }
func getTransport(config *viper.Viper, gun string, readOnly bool) http.RoundTripper { // Attempt to get a root CA from the config file. Nil is the host defaults. rootCAFile := config.GetString("remote_server.root_ca") if rootCAFile != "" { // If we haven't been given an Absolute path, we assume it's relative // from the configuration directory (~/.notary by default) if !filepath.IsAbs(rootCAFile) { rootCAFile = filepath.Join(configPath, rootCAFile) } } insecureSkipVerify := false if config.IsSet("remote_server.skipTLSVerify") { insecureSkipVerify = config.GetBool("remote_server.skipTLSVerify") } tlsConfig, err := utils.ConfigureClientTLS(&utils.ClientTLSOpts{ RootCAFile: rootCAFile, InsecureSkipVerify: insecureSkipVerify, }) if err != nil { logrus.Fatal("Unable to configure TLS: ", err.Error()) } base := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }).Dial, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, DisableKeepAlives: true, } return tokenAuth(config, base, gun, readOnly) }
func loadEndpointsConfig(v *viper.Viper) { //Login Config.LoginURI = v.GetString("stormpath.web.login.uri") Config.LoginNextURI = v.GetString("stormpath.web.login.nextUri") Config.LoginView = v.GetString("stormpath.web.login.view") Config.LoginEnabled = v.GetBool("stormpath.web.login.enabled") Config.LoginForm = buildForm("login", v) //Register Config.RegisterURI = v.GetString("stormpath.web.register.uri") Config.RegisterView = v.GetString("stormpath.web.register.view") Config.RegisterNextURI = v.GetString("stormpath.web.register.uri") Config.RegisterEnabled = v.GetBool("stormpath.web.register.enabled") Config.RegisterAutoLoginEnabled = v.GetBool("stormpath.web.register.autoLogin") Config.RegisterForm = buildForm("register", v) //Verify Config.VerifyURI = v.GetString("stormpath.web.verifyEmail.uri") Config.VerifyEnabled = loadBoolPtr("stormpath.web.verifyEmail.enabled", v) Config.VerifyView = v.GetString("stormpath.web.verifyEmail.view") Config.VerifyNextURI = v.GetString("stormpath.web.verifyEmail.nextUri") //Forgot Password Config.ForgotPasswordURI = v.GetString("stormpath.web.forgotPassword.uri") Config.ForgotPasswordNextURI = v.GetString("stormpath.web.forgotPassword.nextUri") Config.ForgotPasswordView = v.GetString("stormpath.web.forgotPassword.view") Config.ForgotPasswordEnabled = loadBoolPtr("stormpath.web.forgotPassword.enabled", v) //Change Password Config.ChangePasswordURI = v.GetString("stormpath.web.changePassword.uri") Config.ChangePasswordNextURI = v.GetString("stormpath.web.changePassword.nextUri") Config.ChangePasswordView = v.GetString("stormpath.web.changePassword.view") Config.ChangePasswordEnabled = loadBoolPtr("stormpath.web.changePassword.enabled", v) Config.ChangePasswordAutoLoginEnabled = v.GetBool("stormpath.web.changePassword.autoLogin") Config.ChangePasswordErrorURI = v.GetString("stormpath.web.changePassword.errorUri") //Logout Config.LogoutURI = v.GetString("stormpath.web.logout.uri") Config.LogoutNextURI = v.GetString("stormpath.web.logout.nextUri") Config.LogoutEnabled = v.GetBool("stormpath.web.logout.enabled") //IDSite Config.IDSiteEnabled = v.GetBool("stormpath.web.idSite.enabled") Config.IDSiteLoginURI = v.GetString("stormpath.web.idSite.loginUri") Config.IDSiteForgotURI = v.GetString("stormpath.web.idSite.forgotUri") Config.IDSiteRegisterURI = v.GetString("stormpath.web.idSite.registerUri") Config.CallbackEnabled = v.GetBool("stormpath.web.callback.enabled") Config.CallbackURI = v.GetString("stormpath.web.callback.uri") //Me Config.MeEnabled = v.GetBool("stormpath.web.me.enabled") Config.MeURI = v.GetString("stormpath.web.me.uri") Config.MeExpand = v.GetStringMap("stormpath.web.me.expand") }
func newPbftCore(id uint64, config *viper.Viper, consumer innerStack, etf events.TimerFactory) *pbftCore { var err error instance := &pbftCore{} instance.id = id instance.consumer = consumer instance.newViewTimer = etf.CreateTimer() instance.vcResendTimer = etf.CreateTimer() instance.nullRequestTimer = etf.CreateTimer() instance.N = config.GetInt("general.N") instance.f = config.GetInt("general.f") if instance.f*3+1 > instance.N { panic(fmt.Sprintf("need at least %d enough replicas to tolerate %d byzantine faults, but only %d replicas configured", instance.f*3+1, instance.f, instance.N)) } instance.K = uint64(config.GetInt("general.K")) instance.logMultiplier = uint64(config.GetInt("general.logmultiplier")) if instance.logMultiplier < 2 { panic("Log multiplier must be greater than or equal to 2") } instance.L = instance.logMultiplier * instance.K // log size instance.viewChangePeriod = uint64(config.GetInt("general.viewchangeperiod")) instance.byzantine = config.GetBool("general.byzantine") instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request")) if err != nil { panic(fmt.Errorf("Cannot parse request timeout: %s", err)) } instance.vcResendTimeout, err = time.ParseDuration(config.GetString("general.timeout.resendviewchange")) if err != nil { panic(fmt.Errorf("Cannot parse request timeout: %s", err)) } instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange")) if err != nil { panic(fmt.Errorf("Cannot parse new view timeout: %s", err)) } instance.nullRequestTimeout, err = time.ParseDuration(config.GetString("general.timeout.nullrequest")) if err != nil { instance.nullRequestTimeout = 0 } instance.broadcastTimeout, err = time.ParseDuration(config.GetString("general.timeout.broadcast")) if err != nil { panic(fmt.Errorf("Cannot parse new broadcast timeout: %s", err)) } instance.activeView = true instance.replicaCount = instance.N logger.Infof("PBFT type = %T", instance.consumer) logger.Infof("PBFT Max number of validating peers (N) = %v", instance.N) logger.Infof("PBFT Max number of failing peers (f) = %v", instance.f) logger.Infof("PBFT byzantine flag = %v", instance.byzantine) logger.Infof("PBFT request timeout = %v", instance.requestTimeout) logger.Infof("PBFT view change timeout = %v", instance.newViewTimeout) logger.Infof("PBFT Checkpoint period (K) = %v", instance.K) logger.Infof("PBFT broadcast timeout = %v", instance.broadcastTimeout) logger.Infof("PBFT Log multiplier = %v", instance.logMultiplier) logger.Infof("PBFT log size (L) = %v", instance.L) if instance.nullRequestTimeout > 0 { logger.Infof("PBFT null requests timeout = %v", instance.nullRequestTimeout) } else { logger.Infof("PBFT null requests disabled") } if instance.viewChangePeriod > 0 { logger.Infof("PBFT view change period = %v", instance.viewChangePeriod) } else { logger.Infof("PBFT automatic view change disabled") } // init the logs instance.certStore = make(map[msgID]*msgCert) instance.reqBatchStore = make(map[string]*RequestBatch) instance.checkpointStore = make(map[Checkpoint]bool) instance.chkpts = make(map[uint64]string) instance.viewChangeStore = make(map[vcidx]*ViewChange) instance.pset = make(map[uint64]*ViewChange_PQ) instance.qset = make(map[qidx]*ViewChange_PQ) instance.newViewStore = make(map[uint64]*NewView) // initialize state transfer instance.hChkpts = make(map[uint64]uint64) instance.chkpts[0] = "XXX GENESIS" instance.lastNewViewTimeout = instance.newViewTimeout instance.outstandingReqBatches = make(map[string]*RequestBatch) instance.missingReqBatches = make(map[string]bool) instance.restoreState() instance.viewChangeSeqNo = ^uint64(0) // infinity instance.updateViewChangeSeqNo() return instance }
func newPbftCore(id uint64, config *viper.Viper, consumer innerStack, startupID []byte) *pbftCore { var err error instance := &pbftCore{} instance.id = id instance.consumer = consumer instance.closed = make(chan bool) instance.notifyCommit = make(chan bool, 1) instance.N = config.GetInt("general.N") instance.f = config.GetInt("general.f") if instance.f*3+1 > instance.N { panic(fmt.Sprintf("need at least %d enough replicas to tolerate %d byzantine faults, but only %d replicas configured", instance.f*3+1, instance.f, instance.N)) } instance.K = uint64(config.GetInt("general.K")) instance.logMultiplier = uint64(config.GetInt("general.logmultiplier")) instance.byzantine = config.GetBool("general.byzantine") instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request")) if err != nil { panic(fmt.Errorf("Cannot parse request timeout: %s", err)) } instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange")) if err != nil { panic(fmt.Errorf("Cannot parse new view timeout: %s", err)) } instance.activeView = true instance.L = instance.logMultiplier * instance.K // log size instance.replicaCount = instance.N logger.Info("PBFT type = %T", instance.consumer) logger.Info("PBFT Max number of validating peers (N) = %v", instance.N) logger.Info("PBFT Max number of failing peers (f) = %v", instance.f) logger.Info("PBFT byzantine flag = %v", instance.byzantine) logger.Info("PBFT request timeout = %v", instance.requestTimeout) logger.Info("PBFT view change timeout = %v", instance.newViewTimeout) logger.Info("PBFT Checkpoint period (K) = %v", instance.K) logger.Info("PBFT Log multiplier = %v", instance.logMultiplier) logger.Info("PBFT log size (L) = %v", instance.L) // init the logs instance.certStore = make(map[msgID]*msgCert) instance.reqStore = make(map[string]*Request) instance.checkpointStore = make(map[Checkpoint]bool) instance.chkpts = make(map[uint64]string) instance.viewChangeStore = make(map[vcidx]*ViewChange) instance.pset = make(map[uint64]*ViewChange_PQ) instance.qset = make(map[qidx]*ViewChange_PQ) instance.newViewStore = make(map[uint64]*NewView) // initialize state transfer instance.hChkpts = make(map[uint64]uint64) instance.chkpts[0] = base64.StdEncoding.EncodeToString(startupID) // create non-running timer XXX ugly instance.newViewTimer = time.NewTimer(100 * time.Hour) instance.newViewTimer.Stop() instance.timerResetCount = 1 instance.lastNewViewTimeout = instance.newViewTimeout instance.outstandingReqs = make(map[string]*Request) instance.missingReqs = make(map[string]bool) go instance.timerHander() return instance }
func newPbftCore(id uint64, config *viper.Viper, consumer innerStack, ledger consensus.LedgerStack) *pbftCore { var err error instance := &pbftCore{} instance.id = id instance.consumer = consumer instance.ledger = ledger instance.closed = make(chan bool) instance.notifyCommit = make(chan bool, 1) instance.notifyExec = sync.NewCond(&instance.internalLock) instance.N = config.GetInt("general.N") instance.f = config.GetInt("general.f") if instance.f*3+1 > instance.N { panic(fmt.Sprintf("need at least %d enough replicas to tolerate %d byzantine faults, but only %d replicas configured", instance.f*3+1, instance.f, instance.N)) } instance.K = uint64(config.GetInt("general.K")) instance.byzantine = config.GetBool("general.byzantine") instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request")) if err != nil { panic(fmt.Errorf("Cannot parse request timeout: %s", err)) } instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange")) if err != nil { panic(fmt.Errorf("Cannot parse new view timeout: %s", err)) } instance.activeView = true instance.L = 2 * instance.K // log size instance.replicaCount = instance.N // init the logs instance.certStore = make(map[msgID]*msgCert) instance.reqStore = make(map[string]*Request) instance.checkpointStore = make(map[Checkpoint]bool) instance.chkpts = make(map[uint64]*blockState) instance.viewChangeStore = make(map[vcidx]*ViewChange) instance.pset = make(map[uint64]*ViewChange_PQ) instance.qset = make(map[qidx]*ViewChange_PQ) instance.newViewStore = make(map[uint64]*NewView) // initialize state transfer instance.hChkpts = make(map[uint64]uint64) defaultPeerIDs := make([]*protos.PeerID, instance.replicaCount-1) if instance.replicaCount > 1 { // For some tests, only 1 replica will be present, and defaultPeerIDs makes no sense for i := uint64(0); i < uint64(instance.replicaCount); i++ { handle, err := getValidatorHandle(i) if err != nil { panic(fmt.Errorf("Cannot retrieve handle for peer which must exist : %s", err)) } if i < instance.id { logger.Debug("Replica %d assigning %v to index %d for replicaCount %d and id %d", instance.id, handle, i, instance.replicaCount, instance.id) defaultPeerIDs[i] = handle } else if i > instance.id { logger.Debug("Replica %d assigning %v to index %d for replicaCount %d and id %d", instance.id, handle, i-1, instance.replicaCount, instance.id) defaultPeerIDs[i-1] = handle } else { // This is our ID, do not add it to the list of default peers } } } else { logger.Debug("Replica %d not initializing defaultPeerIDs, as replicaCount is %d", instance.id, instance.replicaCount) } if myHandle, err := getValidatorHandle(instance.id); err != nil { panic("Could not retrieve own handle") } else { instance.sts = statetransfer.NewStateTransferState(myHandle, config, ledger, defaultPeerIDs) } listener := struct{ statetransfer.ProtoListener }{} listener.CompletedImpl = instance.stateTransferCompleted instance.sts.RegisterListener(&listener) // load genesis checkpoint genesisBlock, err := instance.ledger.GetBlock(0) if err != nil { panic(fmt.Errorf("Cannot load genesis block: %s", err)) } genesisHash, err := ledger.HashBlock(genesisBlock) if err != nil { panic(fmt.Errorf("Cannot hash genesis block: %s", err)) } instance.chkpts[0] = &blockState{ blockNumber: 0, blockHash: base64.StdEncoding.EncodeToString(genesisHash), } // create non-running timer XXX ugly instance.newViewTimer = time.NewTimer(100 * time.Hour) instance.newViewTimer.Stop() instance.timerResetCount = 1 instance.lastNewViewTimeout = instance.newViewTimeout instance.outstandingReqs = make(map[string]*Request) instance.missingReqs = make(map[string]bool) go instance.timerHander() go instance.executeRoutine() return instance }
func newPbftCore(id uint64, config *viper.Viper, consumer innerStack) *pbftCore { var err error instance := &pbftCore{} instance.id = id instance.consumer = consumer instance.closed = make(chan struct{}) instance.incomingChan = make(chan *pbftMessage) instance.stateUpdatedChan = make(chan *checkpointMessage) instance.stateUpdatingChan = make(chan *checkpointMessage) instance.execCompleteChan = make(chan struct{}) instance.idleChan = make(chan struct{}) instance.injectChan = make(chan func()) // TODO Ultimately, the timer factory will be passed in, and the existence of the manager // will be hidden from pbftCore, but in the interest of a small PR, leaving it here for now instance.manager = newEventManagerImpl(instance) etf := newEventTimerFactoryImpl(instance.manager) instance.newViewTimer = etf.createTimer() instance.N = config.GetInt("general.N") instance.f = config.GetInt("general.f") if instance.f*3+1 > instance.N { panic(fmt.Sprintf("need at least %d enough replicas to tolerate %d byzantine faults, but only %d replicas configured", instance.f*3+1, instance.f, instance.N)) } instance.K = uint64(config.GetInt("general.K")) instance.logMultiplier = uint64(config.GetInt("general.logmultiplier")) if instance.logMultiplier < 2 { panic("Log multiplier must be greater than or equal to 2") } instance.L = instance.logMultiplier * instance.K // log size instance.byzantine = config.GetBool("general.byzantine") instance.requestTimeout, err = time.ParseDuration(config.GetString("general.timeout.request")) if err != nil { panic(fmt.Errorf("Cannot parse request timeout: %s", err)) } instance.newViewTimeout, err = time.ParseDuration(config.GetString("general.timeout.viewchange")) if err != nil { panic(fmt.Errorf("Cannot parse new view timeout: %s", err)) } instance.activeView = true instance.replicaCount = instance.N logger.Info("PBFT type = %T", instance.consumer) logger.Info("PBFT Max number of validating peers (N) = %v", instance.N) logger.Info("PBFT Max number of failing peers (f) = %v", instance.f) logger.Info("PBFT byzantine flag = %v", instance.byzantine) logger.Info("PBFT request timeout = %v", instance.requestTimeout) logger.Info("PBFT view change timeout = %v", instance.newViewTimeout) logger.Info("PBFT Checkpoint period (K) = %v", instance.K) logger.Info("PBFT Log multiplier = %v", instance.logMultiplier) logger.Info("PBFT log size (L) = %v", instance.L) // init the logs instance.certStore = make(map[msgID]*msgCert) instance.reqStore = make(map[string]*Request) instance.checkpointStore = make(map[Checkpoint]bool) instance.chkpts = make(map[uint64]string) instance.viewChangeStore = make(map[vcidx]*ViewChange) instance.pset = make(map[uint64]*ViewChange_PQ) instance.qset = make(map[qidx]*ViewChange_PQ) instance.newViewStore = make(map[uint64]*NewView) // initialize state transfer instance.hChkpts = make(map[uint64]uint64) instance.chkpts[0] = "XXX GENESIS" instance.lastNewViewTimeout = instance.newViewTimeout instance.outstandingReqs = make(map[string]*Request) instance.missingReqs = make(map[string]bool) instance.restoreState() return instance }
func loadConfig(v *viper.Viper) *runnerConf { v.SetDefault("check_timeout", 10) v.SetDefault("host_window", 60) v.SetDefault("host_threshold", 5) v.SetDefault("flood_window", 120) v.SetDefault("flood_threshold", 100) v.SetDefault("flap_window", 1200) v.SetDefault("flap_threshold", 5) v.SetDefault("alert_threshold", 3) v.SetDefault("worker_id", "worker1") v.SetDefault("check_key", "canhazstatus") conf := &runnerConf{} conf.checkTimeout = v.GetDuration("check_timeout") conf.hostWindow = int64(v.GetInt("host_window")) conf.hostThreshold = v.GetInt("host_threshold") conf.floodWindow = int64(v.GetInt("flood_window")) conf.floodThreshold = v.GetInt("flood_threshold") conf.flapWindow = v.GetInt("flap_window") conf.flapThreshold = v.GetInt("flap_threshold") conf.alertThreshold = v.GetInt("alert_threshold") conf.workerQueue = v.GetString("worker_id") conf.checkKey = v.GetString("check_key") //twilio config v.SetDefault("twilio_enable", false) conf.twilioEnabled = v.GetBool("twilio_enable") conf.twsid = v.GetString("twiliosid") conf.twtoken = v.GetString("twiliotoken") conf.twfrom = v.GetString("twiliofrom") conf.twdest = v.GetStringSlice("twiliodest") //pagerduty config v.SetDefault("pagerduty_enable", false) conf.pagerDutyEnabled = v.GetBool("pagerduty_enable") conf.pagerDutyPriOneKey = v.GetString("pagerduty_priority_one_key") conf.pagerDutyPriTwoKey = v.GetString("pagerduty_priority_two_key") conf.pagerDutyIncidentKeyPrefix = v.GetString("pagerduty_incident_key_prefix") return conf }