Ejemplo n.º 1
0
// setupRealm initializes nReplicas keyserver replicas and nVerifiers
// verifiers, and then waits until each one of them has signed an epoch.
func setupRealm(t *testing.T, nReplicas, nVerifiers int) (
	kss []*Keyserver, caPool *x509.CertPool, clks []*clock.Mock, verifiers []uint64,
	clientKeyGetter func(string) (crypto.PrivateKey, error), clientConfig *proto.Config, teardown func(),
) {
	cfgs, gks, ck, clientConfig, caCert, caPool, caKey, teardown := setupKeyservers(t, nReplicas)
	logs, dbs, clks, _, teardown2 := setupRaftLogCluster(t, nReplicas, 0)
	teardown = chain(teardown2, teardown)

	var ksDone sync.WaitGroup
	ksDone.Add(nReplicas)
	for i := range dbs {
		var ksDoneOnce sync.Once
		dbs[i] = tracekv.WithSimpleTracing(dbs[i], func(update tracekv.Update) {
			// We are waiting for an epoch to be ratified (in case there are no
			// verifiers, blocking on them does not help).
			if update.IsDeletion || len(update.Key) < 1 || update.Key[0] != tableVerifierLogPrefix {
				return
			}
			ksDoneOnce.Do(func() { ksDone.Done() })
		})
	}

	type doneVerifier struct {
		teardown func()
		id       uint64
	}
	ksBarrier := make(chan struct{})
	doneVerifiers := make(chan doneVerifier, nVerifiers)
	vpks := make([]*proto.PublicKey, nVerifiers)
	for i := 0; i < nVerifiers; i++ {
		vrBarrier := make(chan struct{})
		var verifierTeardown func()
		var vcfg *proto.VerifierConfig
		var doneOnce sync.Once
		dbs[0] = tracekv.WithSimpleTracing(dbs[0], func(update tracekv.Update) {
			// We are waiting for epoch 1 to be ratified by the verifier and
			// reach the client because before that lookups requiring this
			// verifier will immediately fail.
			if len(update.Key) < 1 || update.Key[0] != tableRatificationsPrefix {
				return
			}
			<-vrBarrier
			epoch := binary.BigEndian.Uint64(update.Key[1 : 1+8])
			id := binary.BigEndian.Uint64(update.Key[1+8 : 1+8+8])
			if id == vcfg.ID && epoch == 1 {
				doneOnce.Do(func() { doneVerifiers <- doneVerifier{verifierTeardown, vcfg.ID} })
			}
		})
		go func(i int) {
			var vdb kv.DB
			<-ksBarrier
			var getKey func(string) (crypto.PrivateKey, error)
			vcfg, getKey, vdb, vpks[i], verifierTeardown = setupVerifier(t, clientConfig.Realms[0].VerificationPolicy,
				kss[i%nReplicas].verifierListen.Addr().String(), caCert, caPool, caKey)

			vr, err := verifier.Start(vcfg, vdb, getKey)
			if err != nil {
				t.Fatal(err)
			}
			verifierTeardown = chain(vr.Stop, verifierTeardown)
			close(vrBarrier)
		}(i)
	}
	for i := range cfgs {
		ks, err := Open(cfgs[i], dbs[i], logs[i], clientConfig.Realms[0].VerificationPolicy, clks[i], gks[i], nil)
		if err != nil {
			t.Fatal(err)
		}
		ks.insecureSkipEmailProof = true
		ks.Start()
		teardown = chain(ks.Stop, teardown)
		kss = append(kss, ks)
	}
	close(ksBarrier)

	ksDoneCh := make(chan struct{})
	go func() {
		ksDone.Wait()
		close(ksDoneCh)
	}()

loop:
	for {
		select {
		case <-time.After(poll):
			for _, clk := range clks {
				clk.Add(tick)
			}
		case <-ksDoneCh:
			break loop
		}
	}
	for i := 0; i < nVerifiers; i++ {
		v := <-doneVerifiers
		verifiers = append(verifiers, v.id)
		teardown = chain(v.teardown, teardown)
	}
	pol := copyAuthorizationPolicy(clientConfig.Realms[0].VerificationPolicy)
	pol.PolicyType = &proto.AuthorizationPolicy_Quorum{Quorum: &proto.QuorumExpr{
		Subexpressions: []*proto.QuorumExpr{pol.GetQuorum()},
		Threshold:      uint32(1 + nVerifiers),
		Candidates:     verifiers,
	}}
	for i := 0; i < nVerifiers; i++ {
		pol.PublicKeys[proto.KeyID(vpks[i])] = vpks[i]
	}
	clientConfig.Realms[0].VerificationPolicy = pol
	return kss, caPool, clks, verifiers, ck, clientConfig, teardown
}
Ejemplo n.º 2
0
func TestKeyserverStartProgressStop(t *testing.T) {
	pprof()
	nReplicas := 3
	cfgs, gks, _, ccfg, _, _, _, teardown := setupKeyservers(t, nReplicas)
	defer teardown()
	logs, dbs, clks, _, teardown2 := setupRaftLogCluster(t, nReplicas, 0)
	defer teardown2()

	// the db writes are test output. We are waiting for epoch 3 to be ratified
	// by all replicas as a primitive progress check.
	kss := []*Keyserver{}
	var done sync.WaitGroup
	done.Add(nReplicas)
	for i := 0; i < nReplicas; i++ {
		var closeOnce sync.Once
		dbs[i] = tracekv.WithSimpleTracing(dbs[i], func(update tracekv.Update) {
			if update.IsDeletion || len(update.Key) < 1 || update.Key[0] != tableRatificationsPrefix {
				return
			}
			epoch := binary.BigEndian.Uint64(update.Key[1 : 1+8])
			if epoch == 3 {
				closeOnce.Do(func() { done.Done() })
			}
		})

		ks, err := Open(cfgs[i], dbs[i], logs[i], ccfg.Realms[0].VerificationPolicy, clks[i], gks[i], nil)
		if err != nil {
			t.Fatal(err)
		}
		ks.insecureSkipEmailProof = true
		ks.Start()
		kss = append(kss, ks)
	}

	progressCh := make(chan struct{})
	go func() {
		done.Wait()
		close(progressCh)
	}()

loop:
	for {
		select {
		case <-progressCh:
			break loop
		default:
			i := mathrand.Intn(len(kss))
			clks[i].Add(tick)
		}
	}

	for _, ks := range kss {
		ks.Stop()
	}
	teardown2()
	teardown()

	if testing.Verbose() {
		time.Sleep(time.Millisecond)
		n := runtime.NumGoroutine()
		stackBuf := make([]byte, 1014)
		var l int
		for l = runtime.Stack(stackBuf, true); l == len(stackBuf) && l < 128*1024; {
			stackBuf = append(stackBuf, stackBuf...)
		}
		t.Logf("%d goroutines in existence after Stop:\n%s", n, stackBuf[:l])
	}
}