// Start initializes a new verifier based on config and db, or returns an error // if initialization fails. It then starts the worker goroutine(s). func Start(cfg *proto.VerifierConfig, db kv.DB, getKey func(string) (crypto.PrivateKey, error)) (*Verifier, error) { tls, err := cfg.TLS.Config(getKey) if err != nil { return nil, err } sk, err := getKey(cfg.SigningKeyID) if err != nil { return nil, err } vr := &Verifier{ id: cfg.ID, realm: cfg.Realm, signingKey: sk.(*[ed25519.PrivateKeySize]byte), keyserverAddr: cfg.KeyserverAddr, auth: credentials.NewTLS(tls), db: db, } vr.ctx, vr.stop = context.WithCancel(context.Background()) switch verifierStateBytes, err := db.Get(tableVerifierState); err { case vr.db.ErrNotFound(): vr.vs.KeyserverAuth = &cfg.InitialKeyserverAuth vr.vs.NextEpoch = 1 case nil: if err := vr.vs.Unmarshal(verifierStateBytes); err != nil { return nil, err } default: return nil, err } vr.merkletree, err = merkletree.AccessMerkleTree(vr.db, []byte{tableMerkleTreePrefix}, cfg.TreeNonce) if err != nil { return nil, err } vr.waitStop.Add(1) go func() { vr.run(); vr.waitStop.Done() }() return vr, nil }
// AccessMerkleTree opens the Merkle tree stored in the DB. There should never be two different // MerkleTree objects accessing the same tree. func AccessMerkleTree(db kv.DB, prefix []byte, treeNonce []byte) (*MerkleTree, error) { // read the allocation count out of the DB allocCounterKey := append(append([]byte(nil), prefix...), AllocCounterKey...) val, err := db.Get(allocCounterKey) var allocCount uint64 if err == db.ErrNotFound() { allocCount = 0 } else if err != nil { return nil, err } else if len(val) != 8 { log.Panicf("bad alloc counter") } else { allocCount = binary.LittleEndian.Uint64(val) } return &MerkleTree{ treeNonce: treeNonce, db: db, nodeKeyPrefix: append(append([]byte(nil), prefix...), NodePrefix), allocCounterKey: allocCounterKey, allocCounter: allocCount, }, nil }
// New initializes a replication.LogReplicator using an already open kv.DB. func New(db kv.DB, prefix []byte) (replication.LogReplicator, error) { nextIndex := uint64(0) iter := db.NewIterator(kv.BytesPrefix(prefix)) if hasEntries := iter.Last(); hasEntries { nextIndex = binary.BigEndian.Uint64(iter.Key()[len(prefix):]) + 1 } iter.Release() if err := iter.Error(); err != nil { return nil, err } leaderHintSet := make(chan bool, 1) leaderHintSet <- true return &kvLog{ db: db, prefix: prefix, nextIndex: nextIndex, propose: make(chan replication.LogEntry, 100), leaderHintSet: leaderHintSet, waitCommitted: make(chan replication.LogEntry), stop: make(chan struct{}), stopped: make(chan struct{}), }, nil }
// Open initializes a new keyserver based on cfg, reads the persistent state and // binds to the specified ports. It does not handle input: requests will block. func Open(cfg *proto.ReplicaConfig, db kv.DB, log replication.LogReplicator, initialAuthorizationPolicy *proto.AuthorizationPolicy, clk clock.Clock, getKey func(string) (crypto.PrivateKey, error), LookupTXT func(string) ([]string, error)) (ks *Keyserver, err error) { signingKey, err := getKey(cfg.SigningKeyID) if err != nil { return nil, err } vrfKey, err := getKey(cfg.VRFKeyID) if err != nil { return nil, err } publicTLS, err := cfg.PublicTLS.Config(getKey) if err != nil { return nil, err } verifierTLS, err := cfg.VerifierTLS.Config(getKey) if err != nil { return nil, err } hkpTLS, err := cfg.HKPTLS.Config(getKey) if err != nil { return nil, err } httpFrontTLS, err := cfg.HTTPFrontTLS.Config(getKey) if err != nil { return nil, err } ks = &Keyserver{ realm: cfg.Realm, serverID: cfg.ServerID, replicaID: cfg.ReplicaID, serverAuthorized: initialAuthorizationPolicy, sehKey: signingKey.(*[ed25519.PrivateKeySize]byte), vrfSecret: vrfKey.(*[vrf.SecretKeySize]byte), laggingVerifierScan: cfg.LaggingVerifierScan, clientTimeout: cfg.ClientTimeout.Duration(), minEpochInterval: cfg.MinEpochInterval.Duration(), maxEpochInterval: cfg.MaxEpochInterval.Duration(), retryProposalInterval: cfg.ProposalRetryInterval.Duration(), dkimProofAllowedDomains: make(map[string]struct{}), oidcProofConfig: make([]OIDCConfig, 0), samlProofAllowedDomains: make(map[string]struct{}), db: db, log: log, stop: make(chan struct{}), stopped: make(chan struct{}), wr: concurrent.NewOneShotPubSub(), signatureBroadcast: concurrent.NewPublishSubscribe(), leaderHint: true, inRotation: true, clk: clk, lookupTXT: LookupTXT, minEpochIntervalTimer: clk.Timer(0), maxEpochIntervalTimer: clk.Timer(0), } for _, p := range cfg.RegistrationPolicy { switch t := p.PolicyType.(type) { case *proto.RegistrationPolicy_EmailProofByDKIM: for _, d := range t.EmailProofByDKIM.AllowedDomains { ks.dkimProofAllowedDomains[d] = struct{}{} } ks.dkimProofToAddr = t.EmailProofByDKIM.ToAddr ks.dkimProofSubjectPrefix = t.EmailProofByDKIM.SubjectPrefix case *proto.RegistrationPolicy_EmailProofByOIDC: for _, c := range t.EmailProofByOIDC.OIDCConfig { o := &oidc.Client{ClientID: c.ClientID, Issuer: c.Issuer, Validity: c.Validity.Duration(), DiscoveryURL: c.DiscoveryURL} err := o.FetchPubKeys() if err != nil { return nil, err } oc := OIDCConfig{oidcClient: o, scope: c.Scope} oc.allowedDomains = make(map[string]struct{}) for _, d := range c.AllowedDomains { oc.allowedDomains[d] = struct{}{} } ks.oidcProofConfig = append(ks.oidcProofConfig, oc) } case *proto.RegistrationPolicy_EmailProofBySAML: for _, d := range t.EmailProofBySAML.AllowedDomains { ks.samlProofAllowedDomains[d] = struct{}{} } url, cert, err := saml.FetchIDPInfo(t.EmailProofBySAML.IDPMetadataURL) if err != nil { return nil, err } ks.samlProofConsumerServiceURL = t.EmailProofBySAML.ConsumerServiceURL ks.samlProofIDPSSOURL = url ks.samlProofIDPCert = cert ks.samlProofValidity = t.EmailProofBySAML.Validity.Duration() key, err := getKey(t.EmailProofBySAML.ServiceProviderTLS.Certificates[0].KeyID) if err != nil { return nil, err } ks.samlProofSPKey = key // TODO remove this before production case *proto.RegistrationPolicy_InsecureSkipEmailProof: ks.insecureSkipEmailProof = true } } switch replicaStateBytes, err := db.Get(tableReplicaState); err { case ks.db.ErrNotFound(): // ReplicaState zero value is valid initialization case nil: if err := ks.rs.Unmarshal(replicaStateBytes); err != nil { return nil, err } default: return nil, err } ks.leaderHint = true ks.resetEpochTimers(ks.rs.LastEpochDelimiter.Timestamp.Time()) ks.updateEpochProposer() ks.sb = concurrent.NewSequenceBroadcast(ks.rs.NextIndexVerifier) ok := false if cfg.PublicAddr != "" { ks.publicServer = grpc.NewServer(grpc.Creds(credentials.NewTLS(publicTLS))) proto.RegisterE2EKSPublicServer(ks.publicServer, ks) ks.publicListen, err = net.Listen("tcp", cfg.PublicAddr) if err != nil { return nil, err } defer func() { if !ok { ks.publicListen.Close() } }() } if cfg.VerifierAddr != "" { ks.verifierServer = grpc.NewServer(grpc.Creds(credentials.NewTLS(verifierTLS))) proto.RegisterE2EKSVerificationServer(ks.verifierServer, ks) ks.verifierListen, err = net.Listen("tcp", cfg.VerifierAddr) if err != nil { return nil, err } defer func() { if !ok { ks.verifierListen.Close() } }() } if cfg.HKPAddr != "" { ks.hkpListen, err = tls.Listen("tcp", cfg.HKPAddr, hkpTLS) if err != nil { return nil, err } ks.hkpFront = &hkpfront.HKPFront{InsecureSkipVerify: true, Lookup: ks.Lookup, Clk: ks.clk, TLSConfig: hkpTLS} defer func() { if !ok { ks.hkpListen.Close() } }() } if cfg.HTTPFrontAddr != "" { ks.httpFrontListen, err = tls.Listen("tcp", cfg.HTTPFrontAddr, httpFrontTLS) if err != nil { return nil, err } ks.httpFront = &httpfront.HTTPFront{Lookup: ks.Lookup, Update: ks.Update, InRotation: ks.InRotation, IsAuthExpired: isExpired, TLSConfig: httpFrontTLS, SAMLRequest: ks.SAMLRequest, OIDCRequest: ks.OIDCRequest} defer func() { if !ok { ks.httpFrontListen.Close() } }() } ks.merkletree, err = merkletree.AccessMerkleTree(ks.db, []byte{tableMerkleTreePrefix}, nil) if err != nil { return nil, err } ok = true return ks, nil }
// Open initializes a new keyserver based on cfg, reads the persistent state and // binds to the specified ports. It does not handle input: requests will block. func Open(cfg *proto.ReplicaConfig, db kv.DB, log replication.LogReplicator, initialAuthorizationPolicy *proto.AuthorizationPolicy, clk clock.Clock, getKey func(string) (crypto.PrivateKey, error), LookupTXT func(string) ([]string, error)) (ks *Keyserver, err error) { signingKey, err := getKey(cfg.SigningKeyID) if err != nil { return nil, err } vrfKey, err := getKey(cfg.VRFKeyID) if err != nil { return nil, err } publicTLS, err := cfg.PublicTLS.Config(getKey) if err != nil { return nil, err } verifierTLS, err := cfg.VerifierTLS.Config(getKey) if err != nil { return nil, err } hkpTLS, err := cfg.HKPTLS.Config(getKey) if err != nil { return nil, err } httpFrontTLS, err := cfg.HTTPFrontTLS.Config(getKey) if err != nil { return nil, err } ks = &Keyserver{ realm: cfg.Realm, serverID: cfg.ServerID, replicaID: cfg.ReplicaID, serverAuthorized: initialAuthorizationPolicy, sehKey: signingKey.(*[ed25519.PrivateKeySize]byte), vrfSecret: vrfKey.(*[vrf.SecretKeySize]byte), emailProofToAddr: cfg.EmailProofToAddr, emailProofSubjectPrefix: cfg.EmailProofSubjectPrefix, emailProofAllowedDomains: make(map[string]struct{}), laggingVerifierScan: cfg.LaggingVerifierScan, clientTimeout: cfg.ClientTimeout.Duration(), minEpochInterval: cfg.MinEpochInterval.Duration(), maxEpochInterval: cfg.MaxEpochInterval.Duration(), retryProposalInterval: cfg.ProposalRetryInterval.Duration(), db: db, log: log, stop: make(chan struct{}), stopped: make(chan struct{}), wr: concurrent.NewOneShotPubSub(), signatureBroadcast: concurrent.NewPublishSubscribe(), leaderHint: true, clk: clk, lookupTXT: LookupTXT, minEpochIntervalTimer: clk.Timer(0), maxEpochIntervalTimer: clk.Timer(0), } for _, d := range cfg.EmailProofAllowedDomains { ks.emailProofAllowedDomains[d] = struct{}{} } // TODO remove this before production if cfg.KeyserverConfig.InsecureSkipEmailProof { ks.insecureSkipEmailProof = true } switch replicaStateBytes, err := db.Get(tableReplicaState); err { case ks.db.ErrNotFound(): // ReplicaState zero value is valid initialization case nil: if err := ks.rs.Unmarshal(replicaStateBytes); err != nil { return nil, err } default: return nil, err } ks.leaderHint = true ks.resetEpochTimers(ks.rs.LastEpochDelimiter.Timestamp.Time()) ks.updateEpochProposer() ks.sb = concurrent.NewSequenceBroadcast(ks.rs.NextIndexVerifier) ok := false if cfg.PublicAddr != "" { ks.publicServer = grpc.NewServer(grpc.Creds(credentials.NewTLS(publicTLS))) proto.RegisterE2EKSPublicServer(ks.publicServer, ks) ks.publicListen, err = net.Listen("tcp", cfg.PublicAddr) if err != nil { return nil, err } defer func() { if !ok { ks.publicListen.Close() } }() } if cfg.VerifierAddr != "" { ks.verifierServer = grpc.NewServer(grpc.Creds(credentials.NewTLS(verifierTLS))) proto.RegisterE2EKSVerificationServer(ks.verifierServer, ks) ks.verifierListen, err = net.Listen("tcp", cfg.VerifierAddr) if err != nil { return nil, err } defer func() { if !ok { ks.verifierListen.Close() } }() } if cfg.HKPAddr != "" { ks.hkpListen, err = tls.Listen("tcp", cfg.HKPAddr, hkpTLS) if err != nil { return nil, err } ks.hkpFront = &hkpfront.HKPFront{InsecureSkipVerify: true, Lookup: ks.Lookup, Clk: ks.clk} defer func() { if !ok { ks.hkpListen.Close() } }() } if cfg.HTTPFrontAddr != "" { ks.httpFrontListen, err = tls.Listen("tcp", cfg.HTTPFrontAddr, httpFrontTLS) if err != nil { return nil, err } ks.httpFront = &httpfront.HTTPFront{Lookup: ks.Lookup, Update: ks.Update} defer func() { if !ok { ks.httpFrontListen.Close() } }() } ks.merkletree, err = merkletree.AccessMerkleTree(ks.db, []byte{tableMerkleTreePrefix}, nil) if err != nil { return nil, err } ok = true return ks, nil }