func main() { configPathPtr := flag.String("config", "config.json", "path to config file") flag.Parse() configReader, err := os.Open(*configPathPtr) if err != nil { log.Fatalf("Failed to open configuration file: %s", err) } cfg := &proto.VerifierConfig{} err = jsonpb.Unmarshal(configReader, cfg) if err != nil { log.Fatalf("Failed to parse configuration file: %s", err) } leveldb, err := leveldb.OpenFile(cfg.LevelDBPath, nil) if err != nil { log.Fatalf("Couldn't open DB in directory %s: %s", cfg.LevelDBPath, err) } db := leveldbkv.Wrap(leveldb) server, err := verifier.Start(cfg, db, getKey) if err != nil { panic(err) } defer server.Stop() ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt) <-ch }
// setupRealm initializes nReplicas keyserver replicas and nVerifiers // verifiers, and then waits until each one of them has signed an epoch. func setupRealm(t *testing.T, nReplicas, nVerifiers int) ( kss []*Keyserver, caPool *x509.CertPool, clks []*clock.Mock, verifiers []uint64, clientKeyGetter func(string) (crypto.PrivateKey, error), clientConfig *proto.Config, teardown func(), ) { cfgs, gks, ck, clientConfig, caCert, caPool, caKey, teardown := setupKeyservers(t, nReplicas) logs, dbs, clks, _, teardown2 := setupRaftLogCluster(t, nReplicas, 0) teardown = chain(teardown2, teardown) var ksDone sync.WaitGroup ksDone.Add(nReplicas) for i := range dbs { var ksDoneOnce sync.Once dbs[i] = tracekv.WithSimpleTracing(dbs[i], func(update tracekv.Update) { // We are waiting for an epoch to be ratified (in case there are no // verifiers, blocking on them does not help). if update.IsDeletion || len(update.Key) < 1 || update.Key[0] != tableVerifierLogPrefix { return } ksDoneOnce.Do(func() { ksDone.Done() }) }) } type doneVerifier struct { teardown func() id uint64 } ksBarrier := make(chan struct{}) doneVerifiers := make(chan doneVerifier, nVerifiers) vpks := make([]*proto.PublicKey, nVerifiers) for i := 0; i < nVerifiers; i++ { vrBarrier := make(chan struct{}) var verifierTeardown func() var vcfg *proto.VerifierConfig var doneOnce sync.Once dbs[0] = tracekv.WithSimpleTracing(dbs[0], func(update tracekv.Update) { // We are waiting for epoch 1 to be ratified by the verifier and // reach the client because before that lookups requiring this // verifier will immediately fail. if len(update.Key) < 1 || update.Key[0] != tableRatificationsPrefix { return } <-vrBarrier epoch := binary.BigEndian.Uint64(update.Key[1 : 1+8]) id := binary.BigEndian.Uint64(update.Key[1+8 : 1+8+8]) if id == vcfg.ID && epoch == 1 { doneOnce.Do(func() { doneVerifiers <- doneVerifier{verifierTeardown, vcfg.ID} }) } }) go func(i int) { var vdb kv.DB <-ksBarrier var getKey func(string) (crypto.PrivateKey, error) vcfg, getKey, vdb, vpks[i], verifierTeardown = setupVerifier(t, clientConfig.Realms[0].VerificationPolicy, kss[i%nReplicas].verifierListen.Addr().String(), caCert, caPool, caKey) vr, err := verifier.Start(vcfg, vdb, getKey) if err != nil { t.Fatal(err) } verifierTeardown = chain(vr.Stop, verifierTeardown) close(vrBarrier) }(i) } for i := range cfgs { ks, err := Open(cfgs[i], dbs[i], logs[i], clientConfig.Realms[0].VerificationPolicy, clks[i], gks[i], nil) if err != nil { t.Fatal(err) } ks.insecureSkipEmailProof = true ks.Start() teardown = chain(ks.Stop, teardown) kss = append(kss, ks) } close(ksBarrier) ksDoneCh := make(chan struct{}) go func() { ksDone.Wait() close(ksDoneCh) }() loop: for { select { case <-time.After(poll): for _, clk := range clks { clk.Add(tick) } case <-ksDoneCh: break loop } } for i := 0; i < nVerifiers; i++ { v := <-doneVerifiers verifiers = append(verifiers, v.id) teardown = chain(v.teardown, teardown) } pol := copyAuthorizationPolicy(clientConfig.Realms[0].VerificationPolicy) pol.PolicyType = &proto.AuthorizationPolicy_Quorum{Quorum: &proto.QuorumExpr{ Subexpressions: []*proto.QuorumExpr{pol.GetQuorum()}, Threshold: uint32(1 + nVerifiers), Candidates: verifiers, }} for i := 0; i < nVerifiers; i++ { pol.PublicKeys[proto.KeyID(vpks[i])] = vpks[i] } clientConfig.Realms[0].VerificationPolicy = pol return kss, caPool, clks, verifiers, ck, clientConfig, teardown }