func (n *TreeNodeInstance) dispatchMsgReader() { for { n.msgDispatchQueueMutex.Lock() if n.closing == true { log.Lvl3("Closing reader") n.msgDispatchQueueMutex.Unlock() return } if len(n.msgDispatchQueue) > 0 { log.Lvl4(n.Info(), "Read message and dispatching it", len(n.msgDispatchQueue)) msg := n.msgDispatchQueue[0] n.msgDispatchQueue = n.msgDispatchQueue[1:] n.msgDispatchQueueMutex.Unlock() err := n.dispatchMsgToProtocol(msg) if err != nil { log.Error("Error while dispatching message:", err) } } else { n.msgDispatchQueueMutex.Unlock() log.Lvl4(n.Info(), "Waiting for message") <-n.msgDispatchQueueWait } } }
// listen starts listening for messages coming from any host that tries to // contact this host. If 'wait' is true, it will try to connect to itself before // returning. func (h *Host) listen(wait bool) { log.Lvl3(h.ServerIdentity.First(), "starts to listen") fn := func(c network.SecureConn) { log.Lvl3(h.workingAddress, "Accepted Connection from", c.Remote()) // register the connection once we know it's ok h.registerConnection(c) h.handleConn(c) } go func() { log.Lvl4("Host listens on:", h.workingAddress) err := h.host.Listen(fn) if err != nil { log.Fatal("Couldn't listen on", h.workingAddress, ":", err) } }() if wait { for { log.Lvl4(h.ServerIdentity.First(), "checking if listener is up") _, err := h.Connect(h.ServerIdentity) if err == nil { log.Lvl4(h.ServerIdentity.First(), "managed to connect to itself") break } time.Sleep(network.WaitRetry) } } }
// handleCommit receives commit messages and signal the end if it received // enough of it. func (p *Protocol) handleCommit(com *Commit) { if p.state != stateCommit { // log.Lvl3(p.Name(), "STORE handle commit packet") p.tempCommitMsg = append(p.tempCommitMsg, com) return } // finish after threshold of Commit msgs p.commitMsgCount++ log.Lvl4(p.Name(), "----------------\nWe got", p.commitMsgCount, "COMMIT msgs and threshold is", p.threshold) if p.IsRoot() { log.Lvl4("Leader got ", p.commitMsgCount) } if p.commitMsgCount >= p.threshold { p.state = stateFinished // reset counter p.commitMsgCount = 0 log.Lvl3(p.Name(), "Threshold reached: We are done... CONSENSUS") if p.IsRoot() && p.onDoneCB != nil { log.Lvl3(p.Name(), "We are root and threshold reached: return to the simulation.") p.onDoneCB() p.finish() } return } }
// ProcessProtocolMsg takes a message and puts it into a queue for later processing. // This allows a protocol to have a backlog of messages. func (n *TreeNodeInstance) ProcessProtocolMsg(msg *ProtocolMsg) { log.Lvl4(n.Info(), "Received message") n.msgDispatchQueueMutex.Lock() n.msgDispatchQueue = append(n.msgDispatchQueue, msg) log.Lvl4(n.Info(), "DispatchQueue-length is", len(n.msgDispatchQueue)) if len(n.msgDispatchQueue) == 1 && len(n.msgDispatchQueueWait) == 0 { n.msgDispatchQueueWait <- true } n.msgDispatchQueueMutex.Unlock() }
// CloseConnections only shuts down the network connections - used mainly // for testing. func (h *Host) closeConnections() error { h.networkLock.Lock() defer h.networkLock.Unlock() for _, c := range h.connections { log.Lvl4(h.ServerIdentity.First(), "Closing connection", c, c.Remote(), c.Local()) err := c.Close() if err != nil { log.Error(h.ServerIdentity.First(), "Couldn't close connection", c) return err } } log.Lvl4(h.ServerIdentity.First(), "Closing tcpHost") h.connections = make(map[network.ServerIdentityID]network.SecureConn) return h.host.Close() }
// Dispatch can handle timeouts func (p *Propagate) Dispatch() error { process := true log.Lvl4(p.ServerIdentity()) for process { p.Lock() timeout := time.Millisecond * time.Duration(p.sd.Msec) p.Unlock() select { case msg := <-p.ChannelSD: log.Lvl3(p.ServerIdentity(), "Got data from", msg.ServerIdentity) if p.onData != nil { _, netMsg, err := network.UnmarshalRegistered(msg.Data) if err == nil { p.onData(netMsg) } } if !p.IsRoot() { log.Lvl3(p.ServerIdentity(), "Sending to parent") p.SendToParent(&PropagateReply{}) } if p.IsLeaf() { process = false } else { log.Lvl3(p.ServerIdentity(), "Sending to children") p.SendToChildren(&msg.PropagateSendData) } case <-p.ChannelReply: p.received++ log.Lvl4(p.ServerIdentity(), "received:", p.received, p.subtree) if !p.IsRoot() { p.SendToParent(&PropagateReply{}) } if p.received == p.subtree { process = false } case <-time.After(timeout): log.Fatal("Timeout") process = false } } if p.IsRoot() { if p.onDoneCb != nil { p.onDoneCb(p.received + 1) } } p.Done() return nil }
// aggregate store the message for a protocol instance such that a protocol // instances will get all its children messages at once. // node is the node the host is representing in this Tree, and sda is the // message being analyzed. func (n *TreeNodeInstance) aggregate(sdaMsg *ProtocolMsg) (network.MessageTypeID, []*ProtocolMsg, bool) { mt := sdaMsg.MsgType fromParent := !n.IsRoot() && sdaMsg.From.TreeNodeID.Equal(n.Parent().ID) if fromParent || !n.HasFlag(mt, AggregateMessages) { return mt, []*ProtocolMsg{sdaMsg}, true } // store the msg according to its type if _, ok := n.msgQueue[mt]; !ok { n.msgQueue[mt] = make([]*ProtocolMsg, 0) } msgs := append(n.msgQueue[mt], sdaMsg) n.msgQueue[mt] = msgs log.Lvl4(n.ServerIdentity().Addresses, "received", len(msgs), "of", len(n.Children()), "messages") // do we have everything yet or no // get the node this host is in this tree // OK we have all the children messages if len(msgs) == len(n.Children()) { // erase delete(n.msgQueue, mt) return mt, msgs, true } // no we still have to wait! return mt, nil, false }
func (bft *ProtocolBFTCoSi) handleResponsePrepare(r *Response) error { // check if we have enough bft.tprMut.Lock() defer bft.tprMut.Unlock() bft.tempPrepareResponse = append(bft.tempPrepareResponse, r.Response) if len(bft.tempPrepareResponse) < len(bft.Children()) { return nil } // wait for verification bzrReturn, ok := bft.waitResponseVerification() if ok { // append response resp, err := bft.prepare.Response(bft.tempPrepareResponse) if err != nil { return err } bzrReturn.Response = resp } log.Lvl4("BFTCoSi Handle Response PREPARE") if bft.IsRoot() { // Notify 'commit'-round as we're root if err := bft.startChallengeCommit(); err != nil { log.Error(err) } return nil } return bft.SendTo(bft.Parent(), bzrReturn) }
// Start will execute one cothority-binary for each server // configured func (d *Localhost) Start(args ...string) error { if err := os.Chdir(d.runDir); err != nil { return err } log.Lvl4("Localhost: chdir into", d.runDir) ex := d.runDir + "/" + d.Simulation d.running = true log.Lvl1("Starting", d.servers, "applications of", ex) for index := 0; index < d.servers; index++ { d.wgRun.Add(1) log.Lvl3("Starting", index) host := "localhost" + strconv.Itoa(index) cmdArgs := []string{"-address", host, "-monitor", "localhost:" + strconv.Itoa(d.monitorPort), "-simul", d.Simulation, "-debug", strconv.Itoa(log.DebugVisible()), } cmdArgs = append(args, cmdArgs...) log.Lvl3("CmdArgs are", cmdArgs) cmd := exec.Command(ex, cmdArgs...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr go func(i int, h string) { log.Lvl3("Localhost: will start host", h) err := cmd.Run() if err != nil { log.Error("Error running localhost", h, ":", err) d.errChan <- err } d.wgRun.Done() log.Lvl3("host (index", i, ")", h, "done") }(index, host) } return nil }
// Equal verifies if the given tree is equal func (t *Tree) Equal(t2 *Tree) bool { if t.ID != t2.ID || t.Roster.ID != t2.Roster.ID { log.Lvl4("Ids of trees don't match") return false } return t.Root.Equal(t2.Root) }
// handleCommitChallenge will verify the signature + check if no more than 1/3 // of participants refused to sign. func (bft *ProtocolBFTCoSi) handleChallengeCommit(ch *ChallengeCommit) error { ch.Challenge = bft.commit.Challenge(ch.Challenge) hash := bft.Suite().Hash() hash.Write(bft.Msg) h := hash.Sum(nil) // verify if the signature is correct if err := cosi.VerifyCosiSignatureWithException(bft.suite, bft.AggregatedPublic, h, ch.Signature, ch.Exceptions); err != nil { log.Error(bft.Name(), "Verification of the signature failed:", err) bft.signRefusal = true } // Check if we have no more than 1/3 failed nodes if len(ch.Exceptions) > int(bft.threshold) { log.Errorf("More than 1/3 (%d/%d) refused to sign ! ABORT", len(ch.Exceptions), len(bft.Roster().List)) bft.signRefusal = true } // store the exceptions for later usage bft.tempExceptions = ch.Exceptions log.Lvl4("BFTCoSi handle Challenge COMMIT") if bft.IsLeaf() { return bft.startResponseCommit() } if err := bft.SendToChildrenInParallel(ch); err != nil { log.Error(err) } return nil }
// CreateRoster creates an Roster with the host-names in 'addresses'. // It creates 's.Hosts' entries, starting from 'port' for each round through // 'addresses' func (s *SimulationBFTree) CreateRoster(sc *SimulationConfig, addresses []string, port int) { start := time.Now() nbrAddr := len(addresses) if sc.PrivateKeys == nil { sc.PrivateKeys = make(map[string]abstract.Scalar) } hosts := s.Hosts if s.SingleHost { // If we want to work with a single host, we only make one // host per server log.Fatal("Not supported yet") hosts = nbrAddr if hosts > s.Hosts { hosts = s.Hosts } } localhosts := false listeners := make([]net.Listener, hosts) if strings.Contains(addresses[0], "localhost") { localhosts = true } entities := make([]*network.ServerIdentity, hosts) log.Lvl3("Doing", hosts, "hosts") key := config.NewKeyPair(network.Suite) for c := 0; c < hosts; c++ { key.Secret.Add(key.Secret, key.Suite.Scalar().One()) key.Public.Add(key.Public, key.Suite.Point().Base()) address := addresses[c%nbrAddr] + ":" if localhosts { // If we have localhosts, we have to search for an empty port var err error listeners[c], err = net.Listen("tcp", ":0") if err != nil { log.Fatal("Couldn't search for empty port:", err) } _, p, _ := net.SplitHostPort(listeners[c].Addr().String()) address += p log.Lvl4("Found free port", address) } else { address += strconv.Itoa(port + c/nbrAddr) } entities[c] = network.NewServerIdentity(key.Public, address) sc.PrivateKeys[entities[c].Addresses[0]] = key.Secret } // And close all our listeners if localhosts { for _, l := range listeners { err := l.Close() if err != nil { log.Fatal("Couldn't close port:", l, err) } } } sc.Roster = NewRoster(entities) log.Lvl3("Creating entity List took: " + time.Now().Sub(start).String()) }
// Close calls all nodes, deletes them from the list and closes them func (o *Overlay) Close() { o.instancesLock.Lock() defer o.instancesLock.Unlock() for _, tni := range o.instances { log.Lvl4(o.host.workingAddress, "Closing TNI", tni.TokenID()) o.nodeDelete(tni.Token()) } }
// Equal tests if that node is equal to the given node func (t *TreeNode) Equal(t2 *TreeNode) bool { if t.ID != t2.ID || t.ServerIdentity.ID != t2.ServerIdentity.ID { log.Lvl4("TreeNode: ids are not equal") return false } if len(t.Children) != len(t2.Children) { log.Lvl4("TreeNode: number of children are not equal") return false } for i, c := range t.Children { if !c.Equal(t2.Children[i]) { log.Lvl4("TreeNode: children are not equal") return false } } return true }
// nodeDone is either called by the end of EndProtocol or by the end of the // response phase of the commit round. func (bft *ProtocolBFTCoSi) nodeDone() bool { log.Lvl4(bft.Name(), "nodeDone()") bft.doneProcessing <- true if bft.onDoneCallback != nil { // only true for the root bft.onDoneCallback() } return true }
// startAnnouncementCommit create the announcement for the commit phase and // sends it down the tree. func (bft *ProtocolBFTCoSi) startAnnouncementCommit() error { ann := bft.commit.CreateAnnouncement() a := &Announce{ TYPE: RoundCommit, Announcement: ann, } log.Lvl4(bft.Name(), "BFTCoSi Start Announcement (COMMIT)") return bft.sendAnnouncement(a) }
// startAnnouncementPrepare create its announcement for the prepare round and // sends it down the tree. func (bft *ProtocolBFTCoSi) startAnnouncementPrepare() error { ann := bft.prepare.CreateAnnouncement() a := &Announce{ TYPE: RoundPrepare, Announcement: ann, } log.Lvl4("BFTCoSi Start Announcement (PREPARE)") return bft.sendAnnouncement(a) }
// SendToTreeNode sends a message to a treeNode func (o *Overlay) SendToTreeNode(from *Token, to *TreeNode, msg network.Body) error { sda := &ProtocolMsg{ Msg: msg, From: from, To: from.ChangeTreeNodeID(to.ID), } log.Lvl4("Sending to entity", to.ServerIdentity.Addresses) return o.host.sendSDAData(to.ServerIdentity, sda) }
// SendISM takes the message and sends it to the corresponding service func (p *ServiceProcessor) SendISM(e *network.ServerIdentity, msg network.Body) error { sName := ServiceFactory.Name(p.Context.ServiceID()) sm, err := CreateServiceMessage(sName, msg) if err != nil { return err } log.Lvl4("Raw-sending to", e) return p.SendRaw(e, sm) }
// handle the arrival of a commitment func (bft *ProtocolBFTCoSi) handleCommit(comm Commitment) error { typedCommitment := &Commitment{} var commitment *cosi.Commitment // store it and check if we have enough commitments switch comm.TYPE { case RoundPrepare: bft.tpcMut.Lock() bft.tempPrepareCommit = append(bft.tempPrepareCommit, comm.Commitment) if len(bft.tempPrepareCommit) < len(bft.Children()) { bft.tpcMut.Unlock() return nil } commitment = bft.prepare.Commit(bft.tempPrepareCommit) bft.tpcMut.Unlock() if bft.IsRoot() { return bft.startChallengePrepare() } log.Lvl4(bft.Name(), "BFTCoSi handle Commit PREPARE") case RoundCommit: bft.tccMut.Lock() bft.tempCommitCommit = append(bft.tempCommitCommit, comm.Commitment) if len(bft.tempCommitCommit) < len(bft.Children()) { bft.tccMut.Unlock() return nil } commitment = bft.commit.Commit(bft.tempCommitCommit) bft.tccMut.Unlock() if bft.IsRoot() { // do nothing: // stop the processing of the round, wait the end of // the "prepare" round: calls startChallengeCommit return nil } log.Lvl4(bft.Name(), "BFTCoSi handle Commit COMMIT") } // set same RoundType as for the received commitment: typedCommitment.TYPE = comm.TYPE typedCommitment.Commitment = commitment return bft.SendToParent(typedCommitment) }
// ProtocolRegisterName is a convenience function to automatically generate // a UUID out of the name. func ProtocolRegisterName(name string, protocol NewProtocol) ProtocolID { u := ProtocolNameToID(name) if protocols == nil { protocols = make(map[ProtocolID]NewProtocol) protocolNames = make(map[ProtocolID]string) } protocolNames[u] = name protocols[u] = protocol log.Lvl4("Registered", name, "to", u) return u }
// waitResponseVerification waits till the end of the verification and returns // the BFTCoSiResponse along with the flag: // true => no exception, the verification is correct // false => exception, the verification is NOT correct func (bft *ProtocolBFTCoSi) waitResponseVerification() (*Response, bool) { log.Lvl4("Waiting for response verification:", bft.Name()) r := &Response{ TYPE: RoundPrepare, } // wait the verification verified := <-bft.verifyChan if !verified { // append our exception r.Exceptions = append(r.Exceptions, cosi.Exception{ Public: bft.Public(), Commitment: bft.prepare.GetCommitment(), }) log.Lvl4("Response verification: failed", bft.Name()) return r, false } log.Lvl4("Response verification: OK", bft.Name()) return r, true }
// closeConnection closes a connection and removes it from the connections-map // The h.networkLock must be taken. func (h *Host) closeConnection(c network.SecureConn) error { h.networkLock.Lock() defer h.networkLock.Unlock() log.Lvl4(h.ServerIdentity.First(), "Closing connection", c, c.Remote(), c.Local()) err := c.Close() if err != nil { return err } delete(h.connections, c.ServerIdentity().ID) return nil }
func (n *TreeNodeInstance) dispatchHandler(msgSlice []*ProtocolMsg) error { mt := msgSlice[0].MsgType to := reflect.TypeOf(n.handlers[mt]).In(0) f := reflect.ValueOf(n.handlers[mt]) if n.HasFlag(mt, AggregateMessages) { msgs := reflect.MakeSlice(to, len(msgSlice), len(msgSlice)) for i, msg := range msgSlice { msgs.Index(i).Set(n.reflectCreate(to.Elem(), msg)) } log.Lvl4("Dispatching aggregation to", n.ServerIdentity().Addresses) f.Call([]reflect.Value{msgs}) } else { for _, msg := range msgSlice { log.Lvl4("Dispatching to", n.ServerIdentity().Addresses) m := n.reflectCreate(to, msg) f.Call([]reflect.Value{m}) } } return nil }
// registerConnection registers an ServerIdentity for a new connection, mapped with the // real physical address of the connection and the connection itself // it locks (and unlocks when done): entityListsLock and networkLock func (h *Host) registerConnection(c network.SecureConn) { log.Lvl4(h.ServerIdentity.First(), "registers", c.ServerIdentity().First()) h.networkLock.Lock() defer h.networkLock.Unlock() id := c.ServerIdentity() _, okc := h.connections[id.ID] if okc { // TODO - we should catch this in some way log.Lvl3("Connection already registered", okc) } h.connections[id.ID] = c }
// handleAnnouncement pass the announcement to the right CoSi struct. func (bft *ProtocolBFTCoSi) handleAnnouncement(ann Announce) error { announcement := &Announce{ Announcement: bft.prepare.Announce(ann.Announcement), } switch ann.TYPE { case RoundPrepare: log.Lvl4(bft.Name(), "BFTCoSi Handle Announcement PREPARE") if bft.IsLeaf() { return bft.startCommitmentPrepare() } announcement.TYPE = RoundPrepare case RoundCommit: log.Lvl4(bft.Name(), "BFTCoSi Handle Announcement COMMIT") if bft.IsLeaf() { return bft.startCommitmentCommit() } announcement.TYPE = RoundCommit } return bft.SendToChildrenInParallel(announcement) }
// SSHRunStdout runs a command on the remote host but redirects stdout and // stderr of the Ssh-command to the os.Stderr and os.Stdout func SSHRunStdout(username, host, command string) error { addr := host if username != "" { addr = username + "@" + addr } log.Lvl4("Going to ssh to", addr, command) cmd := exec.Command("ssh", "-o", "StrictHostKeyChecking=no", addr, "eval '"+command+"'") cmd.Stderr = os.Stderr cmd.Stdout = os.Stdout return cmd.Run() }
// GenLocalHosts will create n hosts with the first one being connected to each of // the other nodes if connect is true. func GenLocalHosts(n int, connect bool, processMessages bool) []*Host { hosts := make([]*Host, n) for i := 0; i < n; i++ { host := NewLocalHost(2000 + i*10) hosts[i] = host } root := hosts[0] for _, host := range hosts { host.ListenAndBind() log.Lvlf3("Listening on %s %x", host.ServerIdentity.First(), host.ServerIdentity.ID) if processMessages { host.StartProcessMessages() } if connect && root != host { log.Lvl4("Connecting", host.ServerIdentity.First(), host.ServerIdentity.ID, "to", root.ServerIdentity.First(), root.ServerIdentity.ID) if _, err := host.Connect(root.ServerIdentity); err != nil { log.Fatal(host.ServerIdentity.Addresses, "Could not connect hosts", root.ServerIdentity.Addresses, err) } // Wait for connection accepted in root connected := false for !connected { time.Sleep(time.Millisecond * 10) root.networkLock.Lock() for id := range root.connections { if id.Equal(host.ServerIdentity.ID) { connected = true break } } root.networkLock.Unlock() } log.Lvl4(host.ServerIdentity.First(), "is connected to root") } } return hosts }
// Build builds the the golang packages in `path` and stores the result in `out`. Besides specifying the environment // variables GOOS and GOARCH you can pass any additional argument using the buildArgs // argument. The command which will be executed is of the following form: // $ go build -v buildArgs... -o out path func Build(path, out, goarch, goos string, buildArgs ...string) (string, error) { var cmd *exec.Cmd var b bytes.Buffer buildBuffer := bufio.NewWriter(&b) wd, _ := os.Getwd() log.Lvl4("In directory", wd) var args []string args = append(args, "build", "-v") args = append(args, buildArgs...) args = append(args, "-o", out, path) cmd = exec.Command("go", args...) log.Lvl4("Building", cmd.Args, "in", path) cmd.Stdout = buildBuffer cmd.Stderr = buildBuffer cmd.Env = append([]string{"GOOS=" + goos, "GOARCH=" + goarch}, os.Environ()...) wd, err := os.Getwd() log.Lvl4(wd) log.Lvl4("Command:", cmd.Args) err = cmd.Run() log.Lvl4(b.String()) return b.String(), err }
// nodeDelete needs to be separated from nodeDone, as it is also called from // Close, but due to locking-issues here we don't lock. func (o *Overlay) nodeDelete(tok *Token) { tni, ok := o.instances[tok.ID()] if !ok { log.Lvl2("Node", tok.ID(), "already gone") return } log.Lvl4("Closing node", tok.ID()) err := tni.Close() if err != nil { log.Error("Error while closing node:", err) } delete(o.instances, tok.ID()) // mark it done ! o.instancesInfo[tok.ID()] = true }