func (s *Server) LogReRun(nextRole string, curRole string) { if nextRole == "root" { var messg = s.Name() + " became root" if curRole == "root" { messg = s.Name() + " remained root" } go s.ConnectToLogger() log.WithFields(log.Fields{ "file": logutils.File(), "type": "role_change", }).Infoln(messg) // dbg.Lvl4("role change: %p", s) } else { var messg = s.Name() + " remained regular" if curRole == "root" { messg = s.Name() + " became regular" } if curRole == "root" { log.WithFields(log.Fields{ "file": logutils.File(), "type": "role_change", }).Infoln(messg) dbg.Lvl4("role change: %p", s) } } }
// Used for goconns // should only be used if clients are created in batch func (s *Server) ListenToClients() { // dbg.Lvl4("LISTENING TO CLIENTS: %p", s, s.Clients) for _, c := range s.Clients { go func(c coconet.Conn) { for { tsm := TimeStampMessage{} err := c.Get(&tsm) if err == coconet.ErrClosed { log.Errorf("%p Failed to get from client:", s, err) s.Close() return } if err != nil { log.WithFields(log.Fields{ "file": logutils.File(), }).Errorf("%p failed To get message:", s, err) } switch tsm.Type { default: log.Errorln("Message of unknown type") case StampRequestType: // dbg.Lvl4("STAMP REQUEST") s.mux.Lock() READING := s.READING s.Queue[READING] = append(s.Queue[READING], MustReplyMessage{Tsm: tsm, To: c.Name()}) s.mux.Unlock() } } }(c) } }
func (sn *Node) logTotalTime(totalTime time.Duration) { log.WithFields(log.Fields{ "file": logutils.File(), "type": "root_challenge total", "round": sn.nRounds, "time": totalTime, }).Info("done with root challenge round " + strconv.Itoa(sn.nRounds)) }
func (sn *Node) logSecondPhase(secondRoundTime time.Duration) { log.WithFields(log.Fields{ "file": logutils.File(), "type": "root_challenge second", "round": sn.nRounds, "time": secondRoundTime, }).Info("done with root challenge round " + strconv.Itoa(sn.nRounds)) }
func (sn *Node) logFirstPhase(firstRoundTime time.Duration) { log.WithFields(log.Fields{ "file": logutils.File(), "type": "root_announce", "round": sn.nRounds, "time": firstRoundTime, }).Info("done with root announce round " + strconv.Itoa(sn.nRounds)) }
func AggregateStats(buck, roundsAfter, times []int64) string { muStats.Lock() log.WithFields(log.Fields{ "file": logutils.File(), "type": "client_msg_stats", "buck": removeTrailingZeroes(buck), "roundsAfter": removeTrailingZeroes(roundsAfter), "times": removeTrailingZeroes(times), }).Info("") muStats.Unlock() return "Client Finished Aggregating Statistics" }
func (s *Server) runAsRegular() string { select { case <-s.closeChan: log.WithFields(log.Fields{ "file": logutils.File(), "type": "close", }).Infoln("server" + s.Name() + "has closed") return "" case nextRole := <-s.ViewChangeCh(): return nextRole } }
func (sn *Node) TryRootFailure(view, Round int) bool { if sn.IsRoot(view) && sn.FailAsRootEvery != 0 { if sn.RoundsAsRoot != 0 && sn.RoundsAsRoot%sn.FailAsRootEvery == 0 { log.Errorln(sn.Name() + "was imposed root failure on round" + strconv.Itoa(Round)) log.WithFields(log.Fields{ "file": logutils.File(), "type": "root_failure", "round": Round, }).Info(sn.Name() + "Root imposed failure") // It doesn't make sense to try view change twice // what we essentially end up doing is double setting sn.ViewChanged // it is up to our followers to time us out and go to the next leader // sn.TryViewChange(view + 1) return true } } return false }
func (sn *Node) TryFailure(view, Round int) error { if sn.TryRootFailure(view, Round) { return ErrImposedFailure } if !sn.IsRoot(view) && sn.FailAsFollowerEvery != 0 && Round%sn.FailAsFollowerEvery == 0 { // when failure rate given fail with that probability if (sn.FailureRate > 0 && sn.ShouldIFail("")) || (sn.FailureRate == 0) { log.WithFields(log.Fields{ "file": logutils.File(), "type": "follower_failure", "round": Round, }).Info(sn.Name() + "Follower imposed failure") return errors.New(sn.Name() + "was imposed follower failure on round" + strconv.Itoa(Round)) } } // doing this before annoucing children to avoid major drama if !sn.IsRoot(view) && sn.ShouldIFail("commit") { log.Warn(sn.Name(), "not announcing or commiting for round", Round) return ErrImposedFailure } return nil }
func main() { deter, err := deploy.ReadConfig() if err != nil { log.Fatal("Couldn't load config-file in forkexec:", err) } conf = deter.Config dbg.DebugVisible = conf.Debug flag.Parse() // connect with the logging server if logger != "" { // blocks until we can connect to the logger lh, err := logutils.NewLoggerHook(logger, physaddr, conf.App) if err != nil { log.WithFields(log.Fields{ "file": logutils.File(), }).Fatalln("Error setting up logging server:", err) } log.AddHook(lh) } setup_deter() i := 0 var wg sync.WaitGroup virts := physToServer[physaddr] if len(virts) > 0 { dbg.Lvl3("starting timestampers for", len(virts), "client(s)", virts) i = (i + 1) % len(loggerports) for _, name := range virts { dbg.Lvl4("Starting", name, "on", physaddr) wg.Add(1) go func(nameport string) { dbg.Lvl3("Running on", physaddr, "starting", nameport) defer wg.Done() args := []string{ "-hostname=" + nameport, "-logger=" + logger, "-physaddr=" + physaddr, "-amroot=" + strconv.FormatBool(nameport == rootname), "-test_connect=" + strconv.FormatBool(testConnect), "-mode=server", "-app=" + conf.App, } dbg.Lvl3("Starting on", physaddr, "with args", args) cmdApp := exec.Command("./app", args...) //cmd.Stdout = log.StandardLogger().Writer() //cmd.Stderr = log.StandardLogger().Writer() cmdApp.Stdout = os.Stdout cmdApp.Stderr = os.Stderr dbg.Lvl3("fork-exec is running command:", args) err = cmdApp.Run() if err != nil { dbg.Lvl2("cmd run:", err) } // get CPU usage stats st := cmdApp.ProcessState.SystemTime() ut := cmdApp.ProcessState.UserTime() log.WithFields(log.Fields{ "file": logutils.File(), "type": "forkexec", "systime": st, "usertime": ut, }).Info("") dbg.Lvl2("Finished with Timestamper", physaddr) }(name) } dbg.Lvl3(physaddr, "Finished starting timestampers") wg.Wait() } else { dbg.Lvl2("No timestampers for", physaddr) } dbg.Lvl2(physaddr, "timestampers exited") }
func (s *Server) runAsRoot(nRounds int) string { // every 5 seconds start a new round ticker := time.Tick(ROUND_TIME) if s.LastRound()+1 > nRounds { log.Errorln(s.Name(), "runAsRoot called with too large round number") return "close" } dbg.Lvl3(s.Name(), "running as root", s.LastRound(), int64(nRounds)) for { select { case nextRole := <-s.ViewChangeCh(): dbg.Lvl4(s.Name(), "assuming next role") return nextRole // s.reRunWith(nextRole, nRounds, true) case <-ticker: start := time.Now() dbg.Lvl4(s.Name(), "is STAMP SERVER STARTING SIGNING ROUND FOR:", s.LastRound()+1, "of", nRounds) var err error if s.App == "vote" { vote := &sign.Vote{ Type: sign.AddVT, Av: &sign.AddVote{ Parent: s.Name(), Name: "test-add-node"}} err = s.StartVotingRound(vote) } else { err = s.StartSigningRound() } if err == sign.ChangingViewError { // report change in view, and continue with the select log.WithFields(log.Fields{ "file": logutils.File(), "type": "view_change", }).Info("Tried to stary signing round on " + s.Name() + " but it reports view change in progress") // skip # of failed round time.Sleep(1 * time.Second) break } else if err != nil { log.Errorln(err) time.Sleep(1 * time.Second) break } if s.LastRound()+1 >= nRounds { log.Infoln(s.Name(), "reports exceeded the max round: terminating", s.LastRound()+1, ">=", nRounds) return "close" } elapsed := time.Since(start) log.WithFields(log.Fields{ "file": logutils.File(), "type": "root_round", "round": s.LastRound(), "time": elapsed, }).Info("root round") } } }
func RunClient(conf *deploy.Config, hc *config.HostConfig) { buck := make([]int64, 300) roundsAfter := make([]int64, MAX_N_ROUNDS) times := make([]int64, MAX_N_SECONDS*1000) // maximum number of milliseconds (maximum rate > 1 per millisecond) dbg.Lvl1("Going to run client and asking servers to print") time.Sleep(3 * time.Second) hc.SNodes[0].RegisterDoneFunc(RoundDone) start := time.Now() tFirst := time.Now() for i := 0; i < conf.Rounds; i++ { time.Sleep(time.Second) //fmt.Println("ANNOUNCING") hc.SNodes[0].LogTest = []byte("Hello World") dbg.Lvl3("Going to launch announcement ", hc.SNodes[0].Name()) start = time.Now() t0 := time.Now() err := hc.SNodes[0].StartSigningRound() if err != nil { dbg.Lvl1(err) } select { case msg := <-done: dbg.Lvl3("Received reply from children", msg) case <-time.After(10 * ROUND_TIME): dbg.Lvl3("client timeouted on waiting for response from") continue } t := time.Since(t0) elapsed := time.Since(start) secToTimeStamp := t.Seconds() secSinceFirst := time.Since(tFirst).Seconds() atomic.AddInt64(&buck[int(secSinceFirst)], 1) index := int(secToTimeStamp) / int(ROUND_TIME/time.Second) atomic.AddInt64(&roundsAfter[index], 1) atomic.AddInt64(×[i], t.Nanoseconds()) log.WithFields(log.Fields{ "file": logutils.File(), "type": "root_announce", "round": i, "time": elapsed, }).Info("") log.WithFields(log.Fields{ "file": logutils.File(), "type": "root_round", "round": i, "time": elapsed, }).Info("root round") } log.WithFields(log.Fields{ "file": logutils.File(), "type": "client_msg_stats", "buck": removeTrailingZeroes(buck), "roundsAfter": removeTrailingZeroes(roundsAfter), "times": removeTrailingZeroes(times), }).Info("") // And tell everybody to quit err := hc.SNodes[0].CloseAll(hc.SNodes[0].Round) if err != nil { log.Fatal("Couldn't close:", err) } }
func RunServer(hosts *config.HostsConfig, app *config.AppConfig, depl *deploy.Config) { s := config.GetSuite(depl.Suite) poly.SUITE = s poly.SECURITY = poly.MODERATE n := len(hosts.Hosts) info := poly.PolyInfo{ N: n, R: n, T: n, } indexPeer := -1 for i, h := range hosts.Hosts { if h == app.Hostname { indexPeer = i break } } if indexPeer == -1 { log.Fatal("Peer ", app.Hostname, "(", app.PhysAddr, ") did not find any match for its name.Abort") } start := time.Now() dbg.Lvl1("Creating new peer ", app.Hostname, "(", app.PhysAddr, ") ...") // indexPeer == 0 <==> peer is root p := NewPeer(indexPeer, app.Hostname, info, indexPeer == 0) // make it listen dbg.Lvl2("Peer", app.Hostname, "is now listening for incoming connections") go p.Listen() // then connect it to its successor in the list for _, h := range hosts.Hosts[indexPeer+1:] { dbg.Lvl2("Peer ", app.Hostname, " will connect to ", h) // will connect and SYN with the remote peer p.ConnectTo(h) } // Wait until this peer is connected / SYN'd with each other peer p.WaitSYNs() if p.IsRoot() { delta := time.Since(start) dbg.Lvl2(p.String(), "Connections accomplished in", delta) log.WithFields(log.Fields{ "file": logutils.File(), "type": "schnorr_connect", "round": 0, "time": delta, }).Info("") } // start to record start = time.Now() // Setup the schnorr system amongst peers p.SetupDistributedSchnorr() p.SendACKs() p.WaitACKs() dbg.Lvl1(p.String(), "completed Schnorr setup") // send setup time if we're root if p.IsRoot() { delta := time.Since(start) dbg.Lvl2(p.String(), "setup accomplished in ", delta) log.WithFields(log.Fields{ "file": logutils.File(), "type": "schnorr_setup", "round": 0, "time": delta, }).Info("") } for round := 0; round < depl.Rounds; round++ { if p.IsRoot() { dbg.Lvl2("Starting round", round) } // Then issue a signature ! start = time.Now() msg := "hello world" // Only root calculates if it's OK and sends a log-message if p.IsRoot() { sig := p.SchnorrSigRoot([]byte(msg)) err := p.VerifySchnorrSig(sig, []byte(msg)) if err != nil { dbg.Fatal(p.String(), "could not verify schnorr signature :/ ", err) } dbg.Lvl2(p.String(), "verified the schnorr sig !") // record time delta := time.Since(start) dbg.Lvl2(p.String(), "signature done in ", delta) log.WithFields(log.Fields{ "file": logutils.File(), "type": "schnorr_round", "round": round, "time": delta, }).Info("") } else { // Compute the partial sig and send it to the root p.SchnorrSigPeer([]byte(msg)) } } p.WaitFins() dbg.Lvl1(p.String(), "is leaving ...") if p.IsRoot() { log.WithFields(log.Fields{ "file": logutils.File(), "type": "schnorr_end", }).Info("") } }
func main() { deter, err := deploy.ReadConfig() if err != nil { log.Fatal("Couldn't load config-file in exec") } conf = deter.Config dbg.DebugVisible = conf.Debug flag.Parse() dbg.Lvl3("Running", appConf.App, appConf.Hostname, "with logger at", appConf.Logger) defer func() { log.Errorln("Terminating host", appConf.Hostname) }() // connect with the logging server if appConf.Logger != "" && (appConf.AmRoot || conf.Debug > 0) { // blocks until we can connect to the appConf.Logger dbg.Lvl3(appConf.Hostname, "Connecting to Logger") lh, err := logutils.NewLoggerHook(appConf.Logger, appConf.Hostname, conf.App) if err != nil { log.WithFields(log.Fields{ "file": logutils.File(), }).Fatalln("Error setting up logging server:", err) } log.AddHook(lh) //log.SetOutput(ioutil.Discard) //fmt.Println("exiting appConf.Logger block") dbg.Lvl4(appConf.Hostname, "Done setting up hook") } if appConf.Mode == "server" { if appConf.PhysAddr == "" { h, _, err := net.SplitHostPort(appConf.Hostname) if err != nil { log.Fatal(appConf.Hostname, "improperly formatted hostname", os.Args) } appConf.PhysAddr = h } // run an http server to serve the cpu and memory profiles go func() { _, port, err := net.SplitHostPort(appConf.Hostname) if err != nil { log.Fatal(appConf.Hostname, "improperly formatted hostname: should be host:port") } p, _ := strconv.Atoi(port) // uncomment if more fine grained memory debuggin is needed //runtime.MemProfileRate = 1 dbg.Lvl3(http.ListenAndServe(net.JoinHostPort(appConf.PhysAddr, strconv.Itoa(p+2)), nil)) }() } dbg.Lvl3("Running timestamp with rFail and fFail: ", conf.RFail, conf.FFail) switch appConf.App { case "coll_sign": coll_sign.Run(&appConf, conf) case "coll_stamp": coll_stamp.Run(&appConf, conf) case "schnorr_sign": schnorr_sign.Run(&appConf, conf) } }