コード例 #1
0
ファイル: verify.go プロジェクト: Liamsi/coname
// VerifierStream implements the interfaceE2EKSVerification interface from proto/verifier.proto
func (ks *Keyserver) VerifierStream(rq *proto.VerifierStreamRequest, stream proto.E2EKSVerification_VerifierStreamServer) error {
	var step proto.VerifierStep // stack-allocate db read buffer
	for start, limit := rq.Start, saturatingAdd(rq.Start, rq.PageSize); start < limit; {
		// Try a kv.Range range scan first because it is the fastest. If this
		// does not satisfy the entire request (because the future entries have
		// not been generated yet), use ks.sb to wait for new entries, falling
		// back to range scans when this thread fails to meet the timing
		// constraints of ks.sb. Both methods of accessing verifier log
		// entries are surfaced here due to flow control and memory allocation
		// constraints: we cannot allow allocation of an unbounded queue.
		iter := ks.db.NewIterator(&kv.Range{Start: tableVerifierLog(start), Limit: tableVerifierLog(limit)})
		for ; iter.Next() && start < limit; start++ {
			select {
			case <-stream.Context().Done():
				iter.Release()
				return stream.Context().Err()
			default:
			}
			dbIdx := binary.BigEndian.Uint64(iter.Key()[1:])
			if dbIdx != start {
				log.Printf("ERROR: non-consecutive entries in verifier log (wanted %d, got %d)", start, dbIdx)
				iter.Release()
				return fmt.Errorf("internal error")
			}
			if err := step.Unmarshal(iter.Value()); err != nil {
				log.Printf("ERROR: invalid protobuf entry in verifier log (index %d)", start)
				iter.Release()
				return fmt.Errorf("internal error")
			}
			if err := stream.Send(&step); err != nil {
				iter.Release()
				return err
			}
			step.Reset()
		}
		iter.Release()
		if err := iter.Error(); err != nil {
			log.Printf("ERROR: range [tableVerifierLog(%d), tableVerifierLog(%d)) ended at %d (not included) with error %s", rq.Start, limit, start, err)
			return fmt.Errorf("internal error")
		}

		// the requested entries are not in the db yet, so let's try to collect
		// them from the sb. ch=nil -> the desired log entry was sent after we
		// did the db range scan but before we called Receive -> it's in db now.
	sbLoop:
		for ch := ks.sb.Receive(start, limit); ch != nil && start < limit; start++ {
			select {
			case <-stream.Context().Done():
				return stream.Context().Err()
			case sbStep, ok := <-ch: // declares new variable, a &const
				if !ok {
					// sb closed the connection. This must be because this
					// client was slow and sb does not wait for laggards.
					// This is okay though: if sb does not have the step
					// anymore, the db must: let's get it from there.
					break sbLoop
				}
				if err := stream.Send(sbStep.(*proto.VerifierStep)); err != nil {
					return err
				}
			}
		}
	}
	return nil
}
コード例 #2
0
ファイル: verifier.go プロジェクト: yahoo/coname
// step is called by run and changes the in-memory state. No i/o allowed.
func (vr *Verifier) step(step *proto.VerifierStep, vs *proto.VerifierState, wb kv.Batch) (deferredIO func()) {
	// vr: &const
	// step, vs, wb: &mut
	switch step.Type.(type) {
	case *proto.VerifierStep_Update:
		index := step.GetUpdate().NewEntry.Index
		prevEntry, err := vr.getEntry(index, vs.NextEpoch)
		if err := coname.VerifyUpdate(prevEntry, step.GetUpdate()); err != nil {
			// the keyserver should filter all bad updates
			log.Panicf("%d: bad update %v: %s", vs.NextIndex, *step, err)
		}
		var entryHash [32]byte
		sha3.ShakeSum256(entryHash[:], step.GetUpdate().NewEntry.Encoding)
		latestTree := vr.merkletree.GetSnapshot(vs.LatestTreeSnapshot)
		newTree, err := latestTree.BeginModification()
		if err != nil {
			log.Panicf("%d: BeginModification(): %s", vs.NextIndex, err)
		}
		if err := newTree.Set(index, entryHash[:]); err != nil {
			log.Panicf("%d: Set(%x,%x): %s", vs.NextIndex, index, entryHash[:], err)
		}
		vs.LatestTreeSnapshot = newTree.Flush(wb).Nr
		wb.Put(tableEntries(index, vs.NextEpoch), step.GetUpdate().NewEntry.Encoding)

	case *proto.VerifierStep_Epoch:
		ok := coname.VerifyPolicy(vr.vs.KeyserverAuth, step.GetEpoch().Head.Encoding, step.GetEpoch().Signatures)
		// the bad steps here will not get persisted to disk right now. do we want them to?
		if !ok {
			log.Panicf("%d: keyserver signature verification failed: %#v", vs.NextIndex, *step)
		}
		r := step.GetEpoch().Head
		if r.Head.Realm != vr.realm {
			log.Panicf("%d: seh for realm %q, expected %q: %#v", vs.NextEpoch, r.Head.Realm, vr.realm, *step)
		}
		if r.Head.Epoch != vs.NextEpoch {
			log.Panicf("%d: got epoch %d instead: %#v", vs.NextEpoch, r.Head.Epoch, *step)
		}
		s := r.Head
		if !bytes.Equal(s.PreviousSummaryHash, vs.PreviousSummaryHash) {
			log.Panicf("%d: seh with previous summary hash %q, expected %q: %#v", vs.NextEpoch, s.PreviousSummaryHash, vs.PreviousSummaryHash, *step)
		}
		latestTree := vr.merkletree.GetSnapshot(vs.LatestTreeSnapshot)
		rootHash, err := latestTree.GetRootHash()
		if err != nil {
			log.Panicf("GetRootHash() failed: %s", err)
		}
		if !bytes.Equal(s.RootHash, rootHash) {
			log.Panicf("%d: seh with root hash %q, expected %q: %#v", vs.NextEpoch, s.RootHash, rootHash, *step)
		}
		seh := &proto.SignedEpochHead{
			Head: proto.EncodedTimestampedEpochHead{TimestampedEpochHead: proto.TimestampedEpochHead{
				Head:      s,
				Timestamp: proto.Time(time.Now()),
			}, Encoding: nil},
			Signatures: make(map[uint64][]byte, 1),
		}
		if vs.PreviousSummaryHash == nil {
			vs.PreviousSummaryHash = make([]byte, 64)
		}
		sha3.ShakeSum256(vs.PreviousSummaryHash[:], seh.Head.Head.Encoding)
		seh.Head.UpdateEncoding()
		seh.Signatures[vr.id] = ed25519.Sign(vr.signingKey, proto.MustMarshal(&seh.Head))[:]
		wb.Put(tableRatifications(vs.NextEpoch, vr.id), proto.MustMarshal(seh))
		vs.NextEpoch++
		return func() {
			_, err := vr.keyserver.PushRatification(vr.ctx, seh)
			if err != nil {
				log.Printf("PushRatification: %s", err)
			}
		}
	default:
		log.Panicf("%d: unknown step: %#v", vs.NextIndex, *step)
	}
	return
}