Ejemplo n.º 1
0
func (ks *Keyserver) updateSignatureProposer() {
	// invariant: do not access the db if ThisReplicaNeedsToSignLastEpoch = false
	want := ks.rs.ThisReplicaNeedsToSignLastEpoch
	have := ks.signatureProposer != nil
	if have == want {
		return
	}

	switch want {
	case true:
		tehBytes, err := ks.db.Get(tableEpochHeads(ks.rs.LastEpochDelimiter.EpochNumber))
		if err != nil {
			log.Panicf("ThisReplicaNeedsToSignLastEpoch but no TEH for last epoch in db: %s", err)
		}
		var teh proto.EncodedTimestampedEpochHead
		if err := teh.Unmarshal(tehBytes); err != nil {
			log.Panicf("tableEpochHeads(%d) invalid: %s", ks.rs.LastEpochDelimiter.EpochNumber, err)
		}
		seh := &proto.SignedEpochHead{
			Head:       teh,
			Signatures: map[uint64][]byte{ks.replicaID: ed25519.Sign(ks.sehKey, tehBytes)[:]},
		}
		ks.signatureProposer = StartProposer(ks.log, ks.clk, ks.retryProposalInterval,
			replication.LogEntry{Data: proto.MustMarshal(&proto.KeyserverStep{Type: &proto.KeyserverStep_ReplicaSigned{ReplicaSigned: seh}})})
	case false:
		ks.signatureProposer.Stop()
		ks.signatureProposer = nil
	}
}
Ejemplo n.º 2
0
// PushRatification implements the interfaceE2EKSVerification interface from proto/verifier.proto
func (ks *Keyserver) PushRatification(ctx context.Context, r *proto.SignedEpochHead) (*proto.Nothing, error) {
	verifierCertID, err := authenticateVerifier(ctx)
	if err != nil {
		log.Printf("PushRatification: %s", err)
		return nil, fmt.Errorf("PushRatification: %s", err)
	}
	for signerID := range r.Signatures {
		if signerID != verifierCertID {
			return nil, fmt.Errorf("PushRatification: not authorized: authenticated as %x but tried to write %x's signature", verifierCertID, signerID)
		}
	}
	uid := genUID()
	ch := ks.wr.Wait(uid)
	ks.log.Propose(ctx, replication.LogEntry{Data: proto.MustMarshal(&proto.KeyserverStep{
		UID:  uid,
		Type: &proto.KeyserverStep_VerifierSigned{VerifierSigned: r},
	})})
	select {
	case <-ctx.Done():
		ks.wr.Notify(uid, nil)
		return nil, ctx.Err()
	case <-ch:
		return nil, nil
	}
}
Ejemplo n.º 3
0
// verifierLogAppend censors an entry and prepares the commands to:
// 1) store it to local persistent storage
// 2) mark the log entry as used
// 3) share the new log entry with verifiers
// called from step: no io
func (ks *Keyserver) verifierLogAppend(m *proto.VerifierStep, rs *proto.ReplicaState, wb kv.Batch) func() {
	// m : *mut // RECURSIVE transfer of ownership
	// ks : &const // read-only
	// rs, wb : &mut
	wb.Put(tableVerifierLog(rs.NextIndexVerifier), proto.MustMarshal(m))
	rs.NextIndexVerifier++
	return func() {
		ks.sb.Send(m)
	}
}
Ejemplo n.º 4
0
// run is the CSP-style main loop of the verifier. All code critical for safe
// persistence should be directly in run. All functions called from run should
// either interpret data and modify their mutable arguments OR interact with the
// network and disk, but not both.
func (vr *Verifier) run() {
	keyserverConnection, err := grpc.Dial(vr.keyserverAddr, grpc.WithTransportCredentials(vr.auth))
	if err != nil {
		log.Panicf("dial %s: %s", vr.keyserverAddr, err)
	}
	vr.keyserver = proto.NewE2EKSVerificationClient(keyserverConnection)
	stream, err := vr.keyserver.VerifierStream(vr.ctx, &proto.VerifierStreamRequest{
		Start:    vr.vs.NextIndex,
		PageSize: math.MaxUint64,
	})
	if err != nil {
		keyserverConnection.Close()
		log.Panicf("VerifierStream: %s", err)
	}

	wb := vr.db.NewBatch()
	for !vr.shuttingDown() {
		var step *proto.VerifierStep
		step, err = stream.Recv()
		if err != nil {
			log.Printf("VerifierStream.Recv: %s", err)
			break
		}
		wb.Put(tableVerifierLog(vr.vs.NextIndex), proto.MustMarshal(step))
		deferredIO := vr.step(step, &vr.vs, wb)
		vr.vs.NextIndex++
		wb.Put(tableVerifierState, proto.MustMarshal(&vr.vs))
		if err := vr.db.Write(wb); err != nil {
			log.Panicf("sync step to db: %s", err)
		}
		wb.Reset()
		if deferredIO != nil {
			deferredIO()
		}
	}
}
Ejemplo n.º 5
0
// PushRatification implements the interfaceE2EKSVerification interface from proto/verifier.proto
func (ks *Keyserver) PushRatification(ctx context.Context, r *proto.SignedEpochHead) (*proto.Nothing, error) {
	// FIXME: verify the ratifier signature (tricky: where do we keep verifier pk-s?)
	uid := genUID()
	ch := ks.wr.Wait(uid)
	ks.log.Propose(ctx, replication.LogEntry{Data: proto.MustMarshal(&proto.KeyserverStep{
		UID:            uid,
		VerifierSigned: r,
	})})
	select {
	case <-ctx.Done():
		ks.wr.Notify(uid, nil)
		return nil, ctx.Err()
	case <-ch:
		return nil, nil
	}
}
Ejemplo n.º 6
0
func TestKeyserverRejectsMissignedUpdate(t *testing.T) {
	dieOnCtrlC()
	kss, caPool, clks, _, ck, clientConfig, teardown := setupRealm(t, 3, 3)
	defer teardown()
	stop := stoppableSyncedClocks(clks)
	defer close(stop)

	waitForFirstEpoch(kss[0], clientConfig.Realms[0].VerificationPolicy.GetQuorum())

	clientTLS, err := clientConfig.Realms[0].ClientTLS.Config(ck)
	if err != nil {
		t.Fatal(err)
	}
	_, alicePk, aliceEntry, aliceProfile := doRegister(t, kss[0], clientConfig, clientTLS, caPool, clks[0].Now(), alice, 0, proto.Profile{
		Nonce: []byte("noncenoncenonceNONCE"),
		Keys:  map[string][]byte{"abc": []byte{1, 2, 3}, "xyz": []byte("TEST 456")},
	})

	var aliceKeyIdBytes [8]byte
	sha3.ShakeSum256(aliceKeyIdBytes[:], proto.MustMarshal(alicePk))
	aliceKeyid := binary.BigEndian.Uint64(aliceKeyIdBytes[:8])
	_, badSk, _ := ed25519.GenerateKey(rand.Reader)

	conn, err := grpc.Dial(kss[1].publicListen.Addr().String(), grpc.WithTransportCredentials(credentials.NewTLS(clientTLS)))
	if err != nil {
		t.Fatal(err)
	}
	updateC := proto.NewE2EKSPublicClient(conn)
	_, err = updateC.Update(context.Background(), &proto.UpdateRequest{
		Update: &proto.SignedEntryUpdate{
			NewEntry:   *aliceEntry,
			Signatures: map[uint64][]byte{aliceKeyid: ed25519.Sign(badSk, aliceEntry.Encoding)[:]},
		},
		Profile: *aliceProfile,
		LookupParameters: &proto.LookupRequest{
			UserId:            alice,
			QuorumRequirement: clientConfig.Realms[0].VerificationPolicy.GetQuorum(),
		},
	})
	if err == nil {
		t.Fatalf("update went through even though it was signed with the wrong key")
	}
}
Ejemplo n.º 7
0
// run is the CSP-style main loop of the keyserver. All code critical for safe
// persistence should be directly in run. All functions called from run should
// either interpret data and modify their mutable arguments OR interact with the
// network and disk, but not both.
func (ks *Keyserver) run() {
	defer close(ks.stopped)
	var step proto.KeyserverStep
	wb := ks.db.NewBatch()
	for {
		select {
		case <-ks.stop:
			return
		case stepEntry := <-ks.log.WaitCommitted():
			if stepEntry.ConfChange != nil {
				ks.log.ApplyConfChange(stepEntry.ConfChange)
			}
			stepBytes := stepEntry.Data
			if stepBytes == nil {
				continue // allow logs to skip slots for indexing purposes
			}
			if err := step.Unmarshal(stepBytes); err != nil {
				log.Panicf("invalid step pb in replicated log: %s", err)
			}
			// TODO: (for throughput) allow multiple steps per log entry
			// (pipelining). Maybe this would be better implemented at the log level?
			deferredIO := ks.step(&step, &ks.rs, wb)
			ks.rs.NextIndexLog++
			wb.Put(tableReplicaState, proto.MustMarshal(&ks.rs))
			if err := ks.db.Write(wb); err != nil {
				log.Panicf("sync step to db: %s", err)
			}
			wb.Reset()
			step.Reset()
			if deferredIO != nil {
				deferredIO()
			}
		case ks.leaderHint = <-ks.log.LeaderHintSet():
			ks.updateEpochProposer()
		case <-ks.minEpochIntervalTimer.C:
			ks.minEpochIntervalPassed = true
			ks.updateEpochProposer()
		case <-ks.maxEpochIntervalTimer.C:
			ks.maxEpochIntervalPassed = true
			ks.updateEpochProposer()
		}
	}
}
Ejemplo n.º 8
0
// Update implements proto.E2EKS.UpdateServer
func (ks *Keyserver) Update(ctx context.Context, req *proto.UpdateRequest) (*proto.LookupProof, error) {
	ctx, _ = context.WithTimeout(ctx, ks.clientTimeout)
	if err := ks.verifyUpdateEdge(req); err != nil {
		return nil, err
	}

	uid := genUID()
	ch := ks.wr.Wait(uid)
	ks.log.Propose(ctx, replication.LogEntry{Data: proto.MustMarshal(&proto.KeyserverStep{
		UID:    uid,
		Update: req,
	})})
	select {
	case <-ctx.Done():
		ks.wr.Notify(uid, nil)
		return nil, ctx.Err()
	case v := <-ch:
		out := v.(updateOutput)
		if out.Error != nil {
			return nil, out.Error
		}
		return ks.blockingLookup(ctx, req.LookupParameters, out.Epoch)
	}
}
Ejemplo n.º 9
0
// updateEpochProposer either starts or stops the epoch delimiter proposer as necessary.
func (ks *Keyserver) updateEpochProposer() {
	want := ks.wantEpochProposer()
	have := ks.epochProposer != nil
	if have == want {
		return
	}

	switch want {
	case true:
		ks.epochProposer = StartProposer(ks.log, ks.clk, ks.retryProposalInterval,
			replication.LogEntry{
				Data: proto.MustMarshal(&proto.KeyserverStep{Type: &proto.KeyserverStep_EpochDelimiter{EpochDelimiter: &proto.EpochDelimiter{
					EpochNumber: ks.rs.LastEpochDelimiter.EpochNumber + 1,
					Timestamp:   proto.Time(ks.clk.Now()),
				}}}),
				ConfChange: &replication.ConfChange{
					Operation: replication.ConfChangeNOP,
				},
			})
	case false:
		ks.epochProposer.Stop()
		ks.epochProposer = nil
	}
}
Ejemplo n.º 10
0
func doUpdate(
	t *testing.T, ks *Keyserver, clientConfig *proto.Config, clientTLS *tls.Config, caPool *x509.CertPool, now time.Time,
	name string, sk *[ed25519.PrivateKeySize]byte, pk *proto.PublicKey, version uint64, profileContents proto.Profile,
) (*proto.EncodedEntry, *proto.EncodedProfile) {
	conn, err := grpc.Dial(ks.publicListen.Addr().String(), grpc.WithTransportCredentials(credentials.NewTLS(clientTLS)))
	if err != nil {
		t.Fatal(err)
	}
	publicC := proto.NewE2EKSPublicClient(conn)

	// First, do a lookup to retrieve the index
	lookup, err := publicC.Lookup(context.Background(), &proto.LookupRequest{
		UserId: name,
		// We don't care about any signatures here; the server just needs to tell us the index.
		QuorumRequirement: &proto.QuorumExpr{
			Threshold:      0,
			Candidates:     []uint64{},
			Subexpressions: []*proto.QuorumExpr{},
		},
	})
	if err != nil {
		t.Fatal(err)
	}
	index := lookup.Index

	// Do the update
	var keyidBytes [8]byte
	sha3.ShakeSum256(keyidBytes[:], proto.MustMarshal(pk))
	keyid := binary.BigEndian.Uint64(keyidBytes[:8])

	profile := proto.EncodedProfile{
		Profile: profileContents,
	}
	profile.UpdateEncoding()
	var commitment [64]byte
	sha3.ShakeSum256(commitment[:], profile.Encoding)
	entry := proto.EncodedEntry{
		Entry: proto.Entry{
			Index:   index,
			Version: version,
			UpdatePolicy: &proto.AuthorizationPolicy{
				PublicKeys: map[uint64]*proto.PublicKey{keyid: pk},
				PolicyType: &proto.AuthorizationPolicy_Quorum{Quorum: &proto.QuorumExpr{
					Threshold:      1,
					Candidates:     []uint64{keyid},
					Subexpressions: []*proto.QuorumExpr{},
				},
				}},
			ProfileCommitment: commitment[:],
		},
	}
	entry.UpdateEncoding()
	proof, err := publicC.Update(context.Background(), &proto.UpdateRequest{
		Update: &proto.SignedEntryUpdate{
			NewEntry:   entry,
			Signatures: map[uint64][]byte{keyid: ed25519.Sign(sk, entry.Encoding)[:]},
		},
		Profile: profile,
		LookupParameters: &proto.LookupRequest{
			UserId:            name,
			QuorumRequirement: clientConfig.Realms[0].VerificationPolicy.GetQuorum(),
		},
	})
	if err != nil {
		t.Fatal(err)
	}
	if got, want := proof.Profile.Encoding, profile.Encoding; !bytes.Equal(got, want) {
		t.Errorf("updated profile didn't roundtrip: %x != %x", got, want)
	}
	_, err = coname.VerifyLookup(clientConfig, name, proof, now)
	if err != nil {
		t.Fatal(err)
	}
	return &entry, &profile
}
Ejemplo n.º 11
0
// step is called by run and changes the in-memory state. No i/o allowed.
func (vr *Verifier) step(step *proto.VerifierStep, vs *proto.VerifierState, wb kv.Batch) (deferredIO func()) {
	// vr: &const
	// step, vs, wb: &mut
	switch step.Type.(type) {
	case *proto.VerifierStep_Update:
		index := step.GetUpdate().NewEntry.Index
		prevEntry, err := vr.getEntry(index, vs.NextEpoch)
		if err := coname.VerifyUpdate(prevEntry, step.GetUpdate()); err != nil {
			// the keyserver should filter all bad updates
			log.Panicf("%d: bad update %v: %s", vs.NextIndex, *step, err)
		}
		var entryHash [32]byte
		sha3.ShakeSum256(entryHash[:], step.GetUpdate().NewEntry.Encoding)
		latestTree := vr.merkletree.GetSnapshot(vs.LatestTreeSnapshot)
		newTree, err := latestTree.BeginModification()
		if err != nil {
			log.Panicf("%d: BeginModification(): %s", vs.NextIndex, err)
		}
		if err := newTree.Set(index, entryHash[:]); err != nil {
			log.Panicf("%d: Set(%x,%x): %s", vs.NextIndex, index, entryHash[:], err)
		}
		vs.LatestTreeSnapshot = newTree.Flush(wb).Nr
		wb.Put(tableEntries(index, vs.NextEpoch), step.GetUpdate().NewEntry.Encoding)

	case *proto.VerifierStep_Epoch:
		ok := coname.VerifyPolicy(vr.vs.KeyserverAuth, step.GetEpoch().Head.Encoding, step.GetEpoch().Signatures)
		// the bad steps here will not get persisted to disk right now. do we want them to?
		if !ok {
			log.Panicf("%d: keyserver signature verification failed: %#v", vs.NextIndex, *step)
		}
		r := step.GetEpoch().Head
		if r.Head.Realm != vr.realm {
			log.Panicf("%d: seh for realm %q, expected %q: %#v", vs.NextEpoch, r.Head.Realm, vr.realm, *step)
		}
		if r.Head.Epoch != vs.NextEpoch {
			log.Panicf("%d: got epoch %d instead: %#v", vs.NextEpoch, r.Head.Epoch, *step)
		}
		s := r.Head
		if !bytes.Equal(s.PreviousSummaryHash, vs.PreviousSummaryHash) {
			log.Panicf("%d: seh with previous summary hash %q, expected %q: %#v", vs.NextEpoch, s.PreviousSummaryHash, vs.PreviousSummaryHash, *step)
		}
		latestTree := vr.merkletree.GetSnapshot(vs.LatestTreeSnapshot)
		rootHash, err := latestTree.GetRootHash()
		if err != nil {
			log.Panicf("GetRootHash() failed: %s", err)
		}
		if !bytes.Equal(s.RootHash, rootHash) {
			log.Panicf("%d: seh with root hash %q, expected %q: %#v", vs.NextEpoch, s.RootHash, rootHash, *step)
		}
		seh := &proto.SignedEpochHead{
			Head: proto.EncodedTimestampedEpochHead{TimestampedEpochHead: proto.TimestampedEpochHead{
				Head:      s,
				Timestamp: proto.Time(time.Now()),
			}, Encoding: nil},
			Signatures: make(map[uint64][]byte, 1),
		}
		if vs.PreviousSummaryHash == nil {
			vs.PreviousSummaryHash = make([]byte, 64)
		}
		sha3.ShakeSum256(vs.PreviousSummaryHash[:], seh.Head.Head.Encoding)
		seh.Head.UpdateEncoding()
		seh.Signatures[vr.id] = ed25519.Sign(vr.signingKey, proto.MustMarshal(&seh.Head))[:]
		wb.Put(tableRatifications(vs.NextEpoch, vr.id), proto.MustMarshal(seh))
		vs.NextEpoch++
		return func() {
			_, err := vr.keyserver.PushRatification(vr.ctx, seh)
			if err != nil {
				log.Printf("PushRatification: %s", err)
			}
		}
	default:
		log.Panicf("%d: unknown step: %#v", vs.NextIndex, *step)
	}
	return
}
Ejemplo n.º 12
0
// step is called by run and changes the in-memory state. No i/o allowed.
func (ks *Keyserver) step(step *proto.KeyserverStep, rs *proto.ReplicaState, wb kv.Batch) (deferredIO func()) {
	// ks: &const
	// step, rs, wb: &mut
	switch step.Type.(type) {
	case *proto.KeyserverStep_Update:
		index := step.GetUpdate().Update.NewEntry.Index
		prevUpdate, err := ks.getUpdate(index, math.MaxUint64)
		if err != nil {
			log.Printf("getUpdate: %s", err)
			ks.wr.Notify(step.UID, updateOutput{Error: fmt.Errorf("internal error")})
			return
		}
		if err := ks.verifyUpdateDeterministic(prevUpdate, step.GetUpdate()); err != nil {
			ks.wr.Notify(step.UID, updateOutput{Error: err})
			return
		}
		latestTree := ks.merkletree.GetSnapshot(rs.LatestTreeSnapshot)

		// sanity check: compare previous version in Merkle tree vs in updates table
		prevEntryHashTree, _, err := latestTree.Lookup(index)
		if err != nil {
			ks.wr.Notify(step.UID, updateOutput{Error: fmt.Errorf("internal error")})
			return
		}
		var prevEntryHash []byte
		if prevUpdate != nil {
			prevEntryHash = make([]byte, 32)
			sha3.ShakeSum256(prevEntryHash, prevUpdate.Update.NewEntry.Encoding)
		}
		if !bytes.Equal(prevEntryHashTree, prevEntryHash) {
			log.Fatalf("ERROR: merkle tree and DB inconsistent for index %x: %x vs %x", index, prevEntryHashTree, prevEntryHash)
		}

		var entryHash [32]byte
		sha3.ShakeSum256(entryHash[:], step.GetUpdate().Update.NewEntry.Encoding)
		newTree, err := latestTree.BeginModification()
		if err != nil {
			ks.wr.Notify(step.UID, updateOutput{Error: fmt.Errorf("internal error")})
			return
		}
		if err := newTree.Set(index, entryHash[:]); err != nil {
			log.Printf("setting index '%x' gave error: %s", index, err)
			ks.wr.Notify(step.UID, updateOutput{Error: fmt.Errorf("internal error")})
			return
		}
		rs.LatestTreeSnapshot = newTree.Flush(wb).Nr
		epochNr := rs.LastEpochDelimiter.EpochNumber + 1
		wb.Put(tableUpdateRequests(index, epochNr), proto.MustMarshal(step.GetUpdate()))
		ks.wr.Notify(step.UID, updateOutput{Epoch: epochNr})

		rs.PendingUpdates = true
		ks.updateEpochProposer()

		if rs.LastEpochNeedsRatification {
			// We need to wait for the last epoch to appear in the verifier log before
			// inserting this update.
			wb.Put(tableUpdatesPendingRatification(rs.NextIndexLog), proto.MustMarshal(step.GetUpdate().Update))
		} else {
			// We can deliver the update to verifiers right away.
			return ks.verifierLogAppend(&proto.VerifierStep{Type: &proto.VerifierStep_Update{Update: step.GetUpdate().Update}}, rs, wb)
		}

	case *proto.KeyserverStep_EpochDelimiter:
		if step.GetEpochDelimiter().EpochNumber <= rs.LastEpochDelimiter.EpochNumber {
			return // a duplicate of this step has already been handled
		}
		rs.LastEpochDelimiter = *step.GetEpochDelimiter()
		log.Printf("epoch %d", step.GetEpochDelimiter().EpochNumber)

		rs.PendingUpdates = false
		ks.resetEpochTimers(rs.LastEpochDelimiter.Timestamp.Time())
		// rs.ThisReplicaNeedsToSignLastEpoch might already be true, if a majority
		// signed that did not include us. This will make us skip signing the last
		// epoch, but that's fine.
		rs.ThisReplicaNeedsToSignLastEpoch = true
		// However, it's not okay to see a new epoch delimiter before the previous
		// epoch has been ratified.
		if rs.LastEpochNeedsRatification {
			log.Panicf("new epoch delimiter but last epoch not ratified")
		}
		rs.LastEpochNeedsRatification = true
		ks.updateEpochProposer()
		deferredIO = ks.updateSignatureProposer

		snapshotNumberBytes := make([]byte, 8)
		binary.BigEndian.PutUint64(snapshotNumberBytes, rs.LatestTreeSnapshot)
		wb.Put(tableMerkleTreeSnapshot(step.GetEpochDelimiter().EpochNumber), snapshotNumberBytes)

		latestTree := ks.merkletree.GetSnapshot(rs.LatestTreeSnapshot)
		rootHash, err := latestTree.GetRootHash()
		if err != nil {
			log.Panicf("ks.latestTree.GetRootHash() failed: %s", err)
		}
		teh := &proto.EncodedTimestampedEpochHead{TimestampedEpochHead: proto.TimestampedEpochHead{
			Head: proto.EncodedEpochHead{EpochHead: proto.EpochHead{
				RootHash:            rootHash,
				PreviousSummaryHash: rs.PreviousSummaryHash,
				Realm:               ks.realm,
				Epoch:               step.GetEpochDelimiter().EpochNumber,
				IssueTime:           step.GetEpochDelimiter().Timestamp,
			}, Encoding: nil},
			Timestamp: step.GetEpochDelimiter().Timestamp,
		}, Encoding: nil}
		teh.Head.UpdateEncoding()
		teh.UpdateEncoding()
		if rs.PreviousSummaryHash == nil {
			rs.PreviousSummaryHash = make([]byte, 64)
		}
		sha3.ShakeSum256(rs.PreviousSummaryHash[:], teh.Head.Encoding)

		wb.Put(tableEpochHeads(step.GetEpochDelimiter().EpochNumber), proto.MustMarshal(teh))

	case *proto.KeyserverStep_ReplicaSigned:
		newSEH := step.GetReplicaSigned()
		epochNr := newSEH.Head.Head.Epoch
		// get epoch head
		tehBytes, err := ks.db.Get(tableEpochHeads(epochNr))
		if err != nil {
			log.Panicf("get tableEpochHeads(%d): %s", epochNr, err)
		}
		// compare epoch head to signed epoch head
		if got, want := tehBytes, newSEH.Head.Encoding; !bytes.Equal(got, want) {
			log.Panicf("replica signed different head: wanted %x, got %x", want, got)
		}

		// insert all the new signatures into the ratifications table (there should
		// actually only be one)
		newSehBytes := proto.MustMarshal(newSEH)
		for id := range newSEH.Signatures {
			// the entry might already exist in the DB (if the proposals got
			// duplicated), but it doesn't matter
			wb.Put(tableRatifications(epochNr, id), newSehBytes)
		}

		deferredIO = func() {
			// First write to DB, *then* notify subscribers. That way, if subscribers
			// start listening before searching the DB, they're guaranteed to see the
			// signature: either it's already in the DB, or they'll get notified. If
			// the order was reversed, they could miss the notification but still not
			// see anything in the DB.
			ks.signatureBroadcast.Publish(epochNr, newSEH)
		}

		if epochNr != rs.LastEpochDelimiter.EpochNumber {
			break
		}
		if rs.ThisReplicaNeedsToSignLastEpoch && newSEH.Signatures[ks.replicaID] != nil {
			rs.ThisReplicaNeedsToSignLastEpoch = false
			ks.updateEpochProposer()
			// updateSignatureProposer should in general be called after writes
			// have been flushed to db, but given ThisReplicaNeedsToSignLast =
			// false we know that updateSignatureProposer will not access the db.
			ks.updateSignatureProposer()
		}
		// get all existing ratifications for this epoch
		allSignatures := make(map[uint64][]byte)
		existingRatifications, err := ks.allRatificationsForEpoch(epochNr)
		if err != nil {
			log.Panicf("allRatificationsForEpoch(%d): %s", epochNr, err)
		}
		for _, seh := range existingRatifications {
			for id, sig := range seh.Signatures {
				allSignatures[id] = sig
			}
		}
		// check whether the epoch was already ratified
		wasRatified := coname.VerifyPolicy(ks.serverAuthorized, tehBytes, allSignatures)
		if wasRatified {
			break
		}
		for id, sig := range newSEH.Signatures {
			allSignatures[id] = sig
		}
		// check whether the epoch has now become ratified
		nowRatified := coname.VerifyPolicy(ks.serverAuthorized, tehBytes, allSignatures)
		if !nowRatified {
			break
		}
		if !rs.LastEpochNeedsRatification {
			log.Panicf("%x: thought last epoch was not already ratified, but it was", ks.replicaID)
		}
		rs.LastEpochNeedsRatification = false
		ks.updateEpochProposer()
		var teh proto.EncodedTimestampedEpochHead
		err = teh.Unmarshal(tehBytes)
		if err != nil {
			log.Panicf("invalid epoch head %d (%x): %s", epochNr, tehBytes, err)
		}
		allSignaturesSEH := &proto.SignedEpochHead{
			Head:       teh,
			Signatures: allSignatures,
		}
		oldDeferredIO := deferredIO
		deferredSendEpoch := ks.verifierLogAppend(&proto.VerifierStep{Type: &proto.VerifierStep_Epoch{Epoch: allSignaturesSEH}}, rs, wb)
		deferredSendUpdates := []func(){}
		iter := ks.db.NewIterator(kv.BytesPrefix([]byte{tableUpdatesPendingRatificationPrefix}))
		defer iter.Release()
		for iter.Next() {
			update := &proto.SignedEntryUpdate{}
			err := update.Unmarshal(iter.Value())
			if err != nil {
				log.Panicf("invalid pending update %x: %s", iter.Value(), err)
			}
			deferredSendUpdates = append(deferredSendUpdates, ks.verifierLogAppend(&proto.VerifierStep{Type: &proto.VerifierStep_Update{Update: update}}, rs, wb))
			wb.Delete(iter.Key())
		}
		deferredIO = func() {
			oldDeferredIO()
			// First, send the ratified epoch to verifiers
			deferredSendEpoch()
			// Then send updates that were waiting for that epoch to go out
			for _, f := range deferredSendUpdates {
				f()
			}
		}

	case *proto.KeyserverStep_VerifierSigned:
		rNew := step.GetVerifierSigned()
		for id := range rNew.Signatures {
			// Note: The signature *must* have been authenticated before being inserted
			// into the log, or else verifiers could just trample over everyone else's
			// signatures, including our own.
			dbkey := tableRatifications(rNew.Head.Head.Epoch, id)
			wb.Put(dbkey, proto.MustMarshal(rNew))
		}
		ks.wr.Notify(step.UID, nil)
		return func() {
			// As above, first write to DB, *then* notify subscribers.
			ks.signatureBroadcast.Publish(rNew.Head.Head.Epoch, rNew)
		}
	default:
		log.Panicf("unknown step pb in replicated log: %#v", step)
	}
	return
}