// ProposeVote calls the 'accept'-vote on the current propose-configuration func (i *Identity) ProposeVote(accept bool) onet.ClientError { log.Lvl3("Voting proposal") if i.Proposed == nil { return onet.NewClientErrorCode(ErrorConfigMissing, "No proposed config") } log.Lvlf3("Voting %t on %s", accept, i.Proposed.Device) if !accept { return nil } hash, err := i.Proposed.Hash() if err != nil { return onet.NewClientErrorCode(ErrorOnet, err.Error()) } sig, err := crypto.SignSchnorr(network.Suite, i.Private, hash) if err != nil { return onet.NewClientErrorCode(ErrorOnet, err.Error()) } pvr := &ProposeVoteReply{} cerr := i.Client.SendProtobuf(i.Cothority.RandomServerIdentity(), &ProposeVote{ ID: i.ID, Signer: i.DeviceName, Signature: &sig, }, pvr) if cerr != nil { return cerr } if pvr.Data != nil { log.Lvl2("Threshold reached and signed") i.Config = i.Proposed i.Proposed = nil } else { log.Lvl2("Threshold not reached") } return nil }
// CreateIdentity will register a new SkipChain and add it to our list of // managed identities. func (s *Service) CreateIdentity(ai *CreateIdentity) (network.Body, onet.ClientError) { log.Lvlf3("%s Creating new identity with config %+v", s, ai.Config) ids := &Storage{ Latest: ai.Config, } log.Lvl3("Creating Root-skipchain") var cerr onet.ClientError ids.Root, cerr = s.skipchain.CreateRoster(ai.Roster, 2, 10, skipchain.VerifyNone, nil) if cerr != nil { return nil, cerr } log.Lvl3("Creating Data-skipchain") ids.Root, ids.Data, cerr = s.skipchain.CreateData(ids.Root, 2, 10, skipchain.VerifyNone, ai.Config) if cerr != nil { return nil, cerr } roster := ids.Root.Roster replies, err := s.propagateIdentity(roster, &PropagateIdentity{ids}, propagateTimeout) if err != nil { return nil, onet.NewClientErrorCode(ErrorOnet, err.Error()) } if replies != len(roster.List) { log.Warn("Did only get", replies, "out of", len(roster.List)) } log.Lvlf2("New chain is\n%x", []byte(ids.Data.Hash)) s.save() return &CreateIdentityReply{ Root: ids.Root, Data: ids.Data, }, nil }
// StartChallenge starts the challenge phase. Typically called by the Root ;) func (c *CoSi) startChallenge() error { challenge, err := c.cosi.CreateChallenge(c.Message) if err != nil { return err } out := &Challenge{ Chall: challenge, } log.Lvlf3("%s Starting Chal=%+v (message = %x)", c.Name(), challenge, c.Message) return c.handleChallenge(out) }
// PropagateSkipBlock will save a new SkipBlock func (s *Service) PropagateSkipBlock(msg network.Body) { sb, ok := msg.(*SkipBlock) if !ok { log.Error("Couldn't convert to SkipBlock") return } if err := sb.VerifySignatures(); err != nil { log.Error(err) return } s.storeSkipBlock(sb) log.Lvlf3("Stored skip block %+v in %x", *sb, s.Context.ServerIdentity().ID[0:8]) }
// handleAnnouncement will pass the message to the round and send back the // output. If in == nil, we are root and we start the round. func (c *CoSi) handleAnnouncement(in *Announcement) error { log.Lvlf3("Message: %x", c.Message) // If we have a hook on announcement call the hook if c.announcementHook != nil { return c.announcementHook() } // If we are leaf, we should go to commitment if c.IsLeaf() { return c.handleCommitment(nil) } // send to children return c.SendToChildren(in) }
// handleChallenge dispatch the challenge to the round and then dispatch the // results down the tree. func (c *CoSi) handleChallenge(in *Challenge) error { log.Lvlf3("%s chal=%+v", c.Name(), in.Chall) c.cosi.Challenge(in.Chall) if c.challengeHook != nil { c.challengeHook(in.Chall) } // if we are leaf, then go to response if c.IsLeaf() { return c.handleResponse(nil) } // otherwise send it to children return c.SendToChildren(in) }
// notify other services about new/updated skipblock func (s *Service) startPropagation(blocks []*SkipBlock) onet.ClientError { log.Lvlf3("Starting to propagate for service %x", s.Context.ServerIdentity().ID[0:8]) for _, block := range blocks { roster := block.Roster if roster == nil { // Suppose it's a dataSkipBlock sb, ok := s.getSkipBlockByID(block.ParentBlockID) if !ok { return onet.NewClientErrorCode(ErrorBlockNoParent, "Didn't find Roster nor parent") } roster = sb.Roster } replies, e := s.Propagate(roster, block, propagateTimeout) if e != nil { return onet.NewClientErrorCode(ErrorOnet, e.Error()) } if replies != len(roster.List) { log.Warn("Did only get", replies, "out of", len(roster.List)) } } return nil }
// handleChallengeCommit verifies the signature and checks if not more than // the threshold of participants refused to sign func (bft *ProtocolBFTCoSi) handleChallengeCommit(msg challengeCommitChan) error { if bft.isClosing() { return nil } ch := msg.ChallengeCommit if !bft.IsRoot() { bft.commit.Challenge(ch.Challenge) } // verify if the signature is correct data := sha512.Sum512(ch.Signature.Msg) bftPrepareSig := &BFTSignature{ Sig: ch.Signature.Sig, Msg: data[:], Exceptions: ch.Signature.Exceptions, } if err := bftPrepareSig.Verify(bft.Suite(), bft.Roster().Publics()); err != nil { log.Lvl3(bft.Name(), "Verification of the signature failed:", err) bft.signRefusal = true } // Check if we have no more than threshold failed nodes if len(ch.Signature.Exceptions) >= int(bft.threshold) { log.Lvlf3("%s: More than threshold (%d/%d) refused to sign - aborting.", bft.Roster(), len(ch.Signature.Exceptions), len(bft.Roster().List)) bft.signRefusal = true } // store the exceptions for later usage bft.tempExceptions = ch.Signature.Exceptions if bft.IsLeaf() { return bft.handleResponseCommit(nil) } return bft.SendToChildrenInParallel(&ch) }
// setIdentityStorage saves an IdentityStorage func (s *Service) setIdentityStorage(id ID, is *Storage) { s.identitiesMutex.Lock() defer s.identitiesMutex.Unlock() log.Lvlf3("%s %x %v", s.Context.ServerIdentity(), id[0:8], is.Latest.Device) s.Identities[string(id)] = is }
func (rh *RandHound) handleR1(r1 WR1) error { msg := &r1.R1 idx := r1.RosterIndex grp := rh.ServerIdxToGroupNum[idx] pos := rh.ServerIdxToGroupIdx[idx] rh.mutex.Lock() defer rh.mutex.Unlock() // Verify R1 message signature if err := verifySchnorr(rh.Suite(), rh.key[grp][pos], msg); err != nil { return err } // Verify that server replied to the correct I1 message if err := verifyMessage(rh.Suite(), rh.i1s[grp], msg.HI1); err != nil { return err } // Record R1 message rh.r1s[idx] = msg // Prepare data for recovery of polynomial commits and verification of shares n := len(msg.EncShare) poly := make([][]byte, n) index := make([]int, n) encShare := make([]abstract.Point, n) encProof := make([]ProofCore, n) for i := 0; i < n; i++ { poly[i] = msg.CommitPoly index[i] = msg.EncShare[i].Pos encShare[i] = msg.EncShare[i].Val encProof[i] = msg.EncShare[i].Proof } // Init PVSS and recover polynomial commits H, _ := rh.Suite().Point().Pick(nil, rh.Suite().Cipher(rh.sid)) pvss := NewPVSS(rh.Suite(), H, rh.threshold[grp]) polyCommit, err := pvss.Commits(poly, index) if err != nil { return err } // Record polynomial commits rh.polyCommit[idx] = polyCommit // Return, if we already committed to secrets previously if len(rh.chosenSecret) > 0 { return nil } // Verify encrypted shares good, _, err := pvss.Verify(H, rh.key[grp], polyCommit, encShare, encProof) if err != nil { return err } // Record valid encrypted shares per secret/server for _, g := range good { if _, ok := rh.secret[idx]; !ok { rh.secret[idx] = make([]int, 0) } rh.secret[idx] = append(rh.secret[idx], msg.EncShare[g].Target) } // Check if there is at least a threshold number of reconstructable secrets // in each group. If yes proceed to the next phase. Note the double-usage // of the threshold which is used to determine if enough valid shares for a // single secret are available and if enough secrets for a given group are // available goodSecret := make(map[int][]int) for i, group := range rh.server { var secret []int for _, server := range group { j := server.RosterIndex if share, ok := rh.secret[j]; ok && rh.threshold[i] <= len(share) { secret = append(secret, j) } } if rh.threshold[i] <= len(secret) { goodSecret[i] = secret } } // Proceed, if there are enough good secrets if len(goodSecret) == rh.groups { // Reset secret for the next phase (see handleR2) rh.secret = make(map[int][]int) // Choose secrets that contribute to collective randomness for i := range rh.server { // Randomly remove some secrets so that a threshold of secrets remains rand := random.Bytes(rh.Suite().Hash().Size(), random.Stream) prng := rh.Suite().Cipher(rand) secret := goodSecret[i] for j := 0; j < len(secret)-rh.threshold[i]; j++ { k := int(random.Uint32(prng) % uint32(len(secret))) secret = append(secret[:k], secret[k+1:]...) } rh.chosenSecret[i] = secret } log.Lvlf3("Grouping: %v", rh.group) log.Lvlf3("ChosenSecret: %v", rh.chosenSecret) // Transformation of commitments from int to uint32 to avoid protobuff errors var chosenSecret = make([][]uint32, len(rh.chosenSecret)) for i := range rh.chosenSecret { var l []uint32 for j := range rh.chosenSecret[i] { l = append(l, uint32(rh.chosenSecret[i][j])) } chosenSecret[i] = l } // Prepare a message for each server of a group and send it for i, group := range rh.server { for j, server := range group { // Among the good secrets chosen previously collect all valid // shares, proofs, and polynomial commits intended for the // target server var encShare []Share var polyCommit []abstract.Point for _, k := range rh.chosenSecret[i] { r1 := rh.r1s[k] pc := rh.polyCommit[k] encShare = append(encShare, r1.EncShare[j]) polyCommit = append(polyCommit, pc[j]) } i2 := &I2{ Sig: crypto.SchnorrSig{}, SID: rh.sid, ChosenSecret: chosenSecret, EncShare: encShare, PolyCommit: polyCommit, } if err := signSchnorr(rh.Suite(), rh.Private(), i2); err != nil { return err } rh.i2s[server.RosterIndex] = i2 if err := rh.SendTo(server, i2); err != nil { return err } } } } return nil }